1df8bae1dSRodney W. Grimes /* 21c7c3c6aSMatthew Dillon * Copyright (c) 1998 Matthew Dillon, 326f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 4df8bae1dSRodney W. Grimes * Copyright (c) 1990 University of Utah. 5df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 6df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 7df8bae1dSRodney W. Grimes * 8df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 9df8bae1dSRodney W. Grimes * the Systems Programming Group of the University of Utah Computer 10df8bae1dSRodney W. Grimes * Science Department. 11df8bae1dSRodney W. Grimes * 12df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 13df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 14df8bae1dSRodney W. Grimes * are met: 15df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 17df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 18df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 19df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 20df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 215929bcfaSPhilippe Charnier * must display the following acknowledgement: 22df8bae1dSRodney W. Grimes * This product includes software developed by the University of 23df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 24df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 25df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 26df8bae1dSRodney W. Grimes * without specific prior written permission. 27df8bae1dSRodney W. Grimes * 28df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38df8bae1dSRodney W. Grimes * SUCH DAMAGE. 39df8bae1dSRodney W. Grimes * 401c7c3c6aSMatthew Dillon * New Swap System 411c7c3c6aSMatthew Dillon * Matthew Dillon 421c7c3c6aSMatthew Dillon * 431c7c3c6aSMatthew Dillon * Radix Bitmap 'blists'. 441c7c3c6aSMatthew Dillon * 451c7c3c6aSMatthew Dillon * - The new swapper uses the new radix bitmap code. This should scale 461c7c3c6aSMatthew Dillon * to arbitrarily small or arbitrarily large swap spaces and an almost 471c7c3c6aSMatthew Dillon * arbitrary degree of fragmentation. 481c7c3c6aSMatthew Dillon * 491c7c3c6aSMatthew Dillon * Features: 501c7c3c6aSMatthew Dillon * 511c7c3c6aSMatthew Dillon * - on the fly reallocation of swap during putpages. The new system 521c7c3c6aSMatthew Dillon * does not try to keep previously allocated swap blocks for dirty 531c7c3c6aSMatthew Dillon * pages. 541c7c3c6aSMatthew Dillon * 551c7c3c6aSMatthew Dillon * - on the fly deallocation of swap 561c7c3c6aSMatthew Dillon * 571c7c3c6aSMatthew Dillon * - No more garbage collection required. Unnecessarily allocated swap 581c7c3c6aSMatthew Dillon * blocks only exist for dirty vm_page_t's now and these are already 591c7c3c6aSMatthew Dillon * cycled (in a high-load system) by the pager. We also do on-the-fly 601c7c3c6aSMatthew Dillon * removal of invalidated swap blocks when a page is destroyed 611c7c3c6aSMatthew Dillon * or renamed. 621c7c3c6aSMatthew Dillon * 63df8bae1dSRodney W. Grimes * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 64df8bae1dSRodney W. Grimes * 65df8bae1dSRodney W. Grimes * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 661c7c3c6aSMatthew Dillon * 67c3aac50fSPeter Wemm * $FreeBSD$ 68df8bae1dSRodney W. Grimes */ 69df8bae1dSRodney W. Grimes 70df8bae1dSRodney W. Grimes #include <sys/param.h> 71df8bae1dSRodney W. Grimes #include <sys/systm.h> 72af647ddeSBruce Evans #include <sys/conf.h> 7364abb5a5SDavid Greenman #include <sys/kernel.h> 74df8bae1dSRodney W. Grimes #include <sys/proc.h> 759626b608SPoul-Henning Kamp #include <sys/bio.h> 76df8bae1dSRodney W. Grimes #include <sys/buf.h> 77df8bae1dSRodney W. Grimes #include <sys/vnode.h> 78df8bae1dSRodney W. Grimes #include <sys/malloc.h> 79efeaf95aSDavid Greenman #include <sys/vmmeter.h> 80327f4e83SMatthew Dillon #include <sys/sysctl.h> 811c7c3c6aSMatthew Dillon #include <sys/blist.h> 821c7c3c6aSMatthew Dillon #include <sys/lock.h> 830cddd8f0SMatthew Dillon #include <sys/sx.h> 84936524aaSMatthew Dillon #include <sys/vmmeter.h> 85df8bae1dSRodney W. Grimes 86e47ed70bSJohn Dyson #ifndef MAX_PAGEOUT_CLUSTER 87ffc82b0aSJohn Dyson #define MAX_PAGEOUT_CLUSTER 16 88e47ed70bSJohn Dyson #endif 89e47ed70bSJohn Dyson 90e47ed70bSJohn Dyson #define SWB_NPAGES MAX_PAGEOUT_CLUSTER 91e47ed70bSJohn Dyson 921c7c3c6aSMatthew Dillon #include "opt_swap.h" 93df8bae1dSRodney W. Grimes #include <vm/vm.h> 9421cd6e62SSeigo Tanimura #include <vm/pmap.h> 9521cd6e62SSeigo Tanimura #include <vm/vm_map.h> 9621cd6e62SSeigo Tanimura #include <vm/vm_kern.h> 97efeaf95aSDavid Greenman #include <vm/vm_object.h> 98df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 99efeaf95aSDavid Greenman #include <vm/vm_pager.h> 100df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 10121cd6e62SSeigo Tanimura #include <vm/vm_zone.h> 102df8bae1dSRodney W. Grimes #include <vm/swap_pager.h> 103efeaf95aSDavid Greenman #include <vm/vm_extern.h> 104df8bae1dSRodney W. Grimes 1051c7c3c6aSMatthew Dillon #define SWM_FREE 0x02 /* free, period */ 1061c7c3c6aSMatthew Dillon #define SWM_POP 0x04 /* pop out */ 10726f9a767SRodney W. Grimes 10824a1cce3SDavid Greenman /* 1091c7c3c6aSMatthew Dillon * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks 1101c7c3c6aSMatthew Dillon * in the old system. 11124a1cce3SDavid Greenman */ 1121c7c3c6aSMatthew Dillon extern int vm_swap_size; /* number of free swap blocks, in pages */ 1131c7c3c6aSMatthew Dillon 11420d3034fSMatthew Dillon int swap_pager_full; /* swap space exhaustion (task killing) */ 11520d3034fSMatthew Dillon static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/ 1161c7c3c6aSMatthew Dillon static int nsw_rcount; /* free read buffers */ 117327f4e83SMatthew Dillon static int nsw_wcount_sync; /* limit write buffers / synchronous */ 118327f4e83SMatthew Dillon static int nsw_wcount_async; /* limit write buffers / asynchronous */ 119327f4e83SMatthew Dillon static int nsw_wcount_async_max;/* assigned maximum */ 120327f4e83SMatthew Dillon static int nsw_cluster_max; /* maximum VOP I/O allowed */ 1211c7c3c6aSMatthew Dillon 1221c7c3c6aSMatthew Dillon struct blist *swapblist; 1231c7c3c6aSMatthew Dillon static struct swblock **swhash; 1241c7c3c6aSMatthew Dillon static int swhash_mask; 125327f4e83SMatthew Dillon static int swap_async_max = 4; /* maximum in-progress async I/O's */ 1260cddd8f0SMatthew Dillon static struct sx sw_alloc_sx; 127327f4e83SMatthew Dillon 128edfa785aSRobert Watson /* from vm_swap.c */ 129edfa785aSRobert Watson extern struct vnode *swapdev_vp; 130edfa785aSRobert Watson extern struct swdevt *swdevt; 131edfa785aSRobert Watson extern int nswdev; 13224e7ab7cSPoul-Henning Kamp 133327f4e83SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 134327f4e83SMatthew Dillon CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 1351c7c3c6aSMatthew Dillon 136edfa785aSRobert Watson #define BLK2DEVIDX(blk) (nswdev > 1 ? blk / dmmax % nswdev : 0) 137edfa785aSRobert Watson 1381c7c3c6aSMatthew Dillon /* 1391c7c3c6aSMatthew Dillon * "named" and "unnamed" anon region objects. Try to reduce the overhead 1401c7c3c6aSMatthew Dillon * of searching a named list by hashing it just a little. 1411c7c3c6aSMatthew Dillon */ 1421c7c3c6aSMatthew Dillon 1431c7c3c6aSMatthew Dillon #define NOBJLISTS 8 1441c7c3c6aSMatthew Dillon 1451c7c3c6aSMatthew Dillon #define NOBJLIST(handle) \ 146af647ddeSBruce Evans (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)]) 1471c7c3c6aSMatthew Dillon 148a9fa2c05SAlfred Perlstein static struct mtx sw_alloc_mtx; /* protect list manipulation */ 1491c7c3c6aSMatthew Dillon static struct pagerlst swap_pager_object_list[NOBJLISTS]; 1501c7c3c6aSMatthew Dillon struct pagerlst swap_pager_un_object_list; 1511c7c3c6aSMatthew Dillon vm_zone_t swap_zone; 1521c7c3c6aSMatthew Dillon 1531c7c3c6aSMatthew Dillon /* 1541c7c3c6aSMatthew Dillon * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 1551c7c3c6aSMatthew Dillon * calls hooked from other parts of the VM system and do not appear here. 1561c7c3c6aSMatthew Dillon * (see vm/swap_pager.h). 1571c7c3c6aSMatthew Dillon */ 158ff98689dSBruce Evans static vm_object_t 1596cde7a16SDavid Greenman swap_pager_alloc __P((void *handle, vm_ooffset_t size, 160a316d390SJohn Dyson vm_prot_t prot, vm_ooffset_t offset)); 161ff98689dSBruce Evans static void swap_pager_dealloc __P((vm_object_t object)); 162f708ef1bSPoul-Henning Kamp static int swap_pager_getpages __P((vm_object_t, vm_page_t *, int, int)); 163ff98689dSBruce Evans static void swap_pager_init __P((void)); 1641c7c3c6aSMatthew Dillon static void swap_pager_unswapped __P((vm_page_t)); 1650b441832SPoul-Henning Kamp static void swap_pager_strategy __P((vm_object_t, struct bio *)); 166f708ef1bSPoul-Henning Kamp 167df8bae1dSRodney W. Grimes struct pagerops swappagerops = { 1681c7c3c6aSMatthew Dillon swap_pager_init, /* early system initialization of pager */ 1691c7c3c6aSMatthew Dillon swap_pager_alloc, /* allocate an OBJT_SWAP object */ 1701c7c3c6aSMatthew Dillon swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 1711c7c3c6aSMatthew Dillon swap_pager_getpages, /* pagein */ 1721c7c3c6aSMatthew Dillon swap_pager_putpages, /* pageout */ 1731c7c3c6aSMatthew Dillon swap_pager_haspage, /* get backing store status for page */ 174a5296b05SJulian Elischer swap_pager_unswapped, /* remove swap related to page */ 175a5296b05SJulian Elischer swap_pager_strategy /* pager strategy call */ 176df8bae1dSRodney W. Grimes }; 177df8bae1dSRodney W. Grimes 1780b441832SPoul-Henning Kamp static struct buf *getchainbuf(struct bio *bp, struct vnode *vp, int flags); 179e4057dbdSPoul-Henning Kamp static void flushchainbuf(struct buf *nbp); 1800b441832SPoul-Henning Kamp static void waitchainbuf(struct bio *bp, int count, int done); 181e4057dbdSPoul-Henning Kamp 1821c7c3c6aSMatthew Dillon /* 1831c7c3c6aSMatthew Dillon * dmmax is in page-sized chunks with the new swap system. It was 18464bcb9c8SMatthew Dillon * dev-bsized chunks in the old. dmmax is always a power of 2. 1851c7c3c6aSMatthew Dillon * 1861c7c3c6aSMatthew Dillon * swap_*() routines are externally accessible. swp_*() routines are 1871c7c3c6aSMatthew Dillon * internal. 1881c7c3c6aSMatthew Dillon */ 189f708ef1bSPoul-Henning Kamp int dmmax; 1901c7c3c6aSMatthew Dillon static int dmmax_mask; 19120d3034fSMatthew Dillon int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 19220d3034fSMatthew Dillon int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 19326f9a767SRodney W. Grimes 194cee313c4SRobert Watson SYSCTL_INT(_vm, OID_AUTO, dmmax, 195cee313c4SRobert Watson CTLFLAG_RD, &dmmax, 0, "Maximum size of a swap block"); 196cee313c4SRobert Watson 1971c7c3c6aSMatthew Dillon static __inline void swp_sizecheck __P((void)); 1981c7c3c6aSMatthew Dillon static void swp_pager_sync_iodone __P((struct buf *bp)); 1991c7c3c6aSMatthew Dillon static void swp_pager_async_iodone __P((struct buf *bp)); 20024a1cce3SDavid Greenman 2011c7c3c6aSMatthew Dillon /* 2021c7c3c6aSMatthew Dillon * Swap bitmap functions 2031c7c3c6aSMatthew Dillon */ 2041c7c3c6aSMatthew Dillon static __inline void swp_pager_freeswapspace __P((daddr_t blk, int npages)); 2051c7c3c6aSMatthew Dillon static __inline daddr_t swp_pager_getswapspace __P((int npages)); 2061c7c3c6aSMatthew Dillon 2071c7c3c6aSMatthew Dillon /* 2081c7c3c6aSMatthew Dillon * Metadata functions 2091c7c3c6aSMatthew Dillon */ 2104dcc5c2dSMatthew Dillon static void swp_pager_meta_build __P((vm_object_t, vm_pindex_t, daddr_t)); 2114dcc5c2dSMatthew Dillon static void swp_pager_meta_free __P((vm_object_t, vm_pindex_t, daddr_t)); 2121c7c3c6aSMatthew Dillon static void swp_pager_meta_free_all __P((vm_object_t)); 2131c7c3c6aSMatthew Dillon static daddr_t swp_pager_meta_ctl __P((vm_object_t, vm_pindex_t, int)); 2141c7c3c6aSMatthew Dillon 2151c7c3c6aSMatthew Dillon /* 2161c7c3c6aSMatthew Dillon * SWP_SIZECHECK() - update swap_pager_full indication 2171c7c3c6aSMatthew Dillon * 21820d3034fSMatthew Dillon * update the swap_pager_almost_full indication and warn when we are 21920d3034fSMatthew Dillon * about to run out of swap space, using lowat/hiwat hysteresis. 22020d3034fSMatthew Dillon * 22120d3034fSMatthew Dillon * Clear swap_pager_full ( task killing ) indication when lowat is met. 2221c7c3c6aSMatthew Dillon * 2231c7c3c6aSMatthew Dillon * No restrictions on call 2241c7c3c6aSMatthew Dillon * This routine may not block. 2251c7c3c6aSMatthew Dillon * This routine must be called at splvm() 2261c7c3c6aSMatthew Dillon */ 227c1087c13SBruce Evans static __inline void 2281c7c3c6aSMatthew Dillon swp_sizecheck() 2290d94caffSDavid Greenman { 2300cddd8f0SMatthew Dillon GIANT_REQUIRED; 23123955314SAlfred Perlstein 2321c7c3c6aSMatthew Dillon if (vm_swap_size < nswap_lowat) { 23320d3034fSMatthew Dillon if (swap_pager_almost_full == 0) { 2341af87c92SDavid Greenman printf("swap_pager: out of swap space\n"); 23520d3034fSMatthew Dillon swap_pager_almost_full = 1; 2362b0d37a4SMatthew Dillon } 23720d3034fSMatthew Dillon } else { 23826f9a767SRodney W. Grimes swap_pager_full = 0; 23920d3034fSMatthew Dillon if (vm_swap_size > nswap_hiwat) 24020d3034fSMatthew Dillon swap_pager_almost_full = 0; 24126f9a767SRodney W. Grimes } 2421c7c3c6aSMatthew Dillon } 2431c7c3c6aSMatthew Dillon 2441c7c3c6aSMatthew Dillon /* 2451c7c3c6aSMatthew Dillon * SWAP_PAGER_INIT() - initialize the swap pager! 2461c7c3c6aSMatthew Dillon * 2471c7c3c6aSMatthew Dillon * Expected to be started from system init. NOTE: This code is run 2481c7c3c6aSMatthew Dillon * before much else so be careful what you depend on. Most of the VM 2491c7c3c6aSMatthew Dillon * system has yet to be initialized at this point. 2501c7c3c6aSMatthew Dillon */ 251f5a12711SPoul-Henning Kamp static void 252df8bae1dSRodney W. Grimes swap_pager_init() 253df8bae1dSRodney W. Grimes { 2541c7c3c6aSMatthew Dillon /* 2551c7c3c6aSMatthew Dillon * Initialize object lists 2561c7c3c6aSMatthew Dillon */ 2571c7c3c6aSMatthew Dillon int i; 2581c7c3c6aSMatthew Dillon 2591c7c3c6aSMatthew Dillon for (i = 0; i < NOBJLISTS; ++i) 2601c7c3c6aSMatthew Dillon TAILQ_INIT(&swap_pager_object_list[i]); 26124a1cce3SDavid Greenman TAILQ_INIT(&swap_pager_un_object_list); 262a9fa2c05SAlfred Perlstein mtx_init(&sw_alloc_mtx, "swap_pager list", MTX_DEF); 263df8bae1dSRodney W. Grimes 264df8bae1dSRodney W. Grimes /* 2651c7c3c6aSMatthew Dillon * Device Stripe, in PAGE_SIZE'd blocks 266df8bae1dSRodney W. Grimes */ 2671c7c3c6aSMatthew Dillon dmmax = SWB_NPAGES * 2; 2681c7c3c6aSMatthew Dillon dmmax_mask = ~(dmmax - 1); 2691c7c3c6aSMatthew Dillon } 27026f9a767SRodney W. Grimes 271df8bae1dSRodney W. Grimes /* 2721c7c3c6aSMatthew Dillon * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 2731c7c3c6aSMatthew Dillon * 2741c7c3c6aSMatthew Dillon * Expected to be started from pageout process once, prior to entering 2751c7c3c6aSMatthew Dillon * its main loop. 276df8bae1dSRodney W. Grimes */ 27724a1cce3SDavid Greenman void 27824a1cce3SDavid Greenman swap_pager_swap_init() 279df8bae1dSRodney W. Grimes { 28021cd6e62SSeigo Tanimura int n, n2; 2810d94caffSDavid Greenman 28226f9a767SRodney W. Grimes /* 2831c7c3c6aSMatthew Dillon * Number of in-transit swap bp operations. Don't 2841c7c3c6aSMatthew Dillon * exhaust the pbufs completely. Make sure we 2851c7c3c6aSMatthew Dillon * initialize workable values (0 will work for hysteresis 2861c7c3c6aSMatthew Dillon * but it isn't very efficient). 2871c7c3c6aSMatthew Dillon * 288327f4e83SMatthew Dillon * The nsw_cluster_max is constrained by the bp->b_pages[] 2891c7c3c6aSMatthew Dillon * array (MAXPHYS/PAGE_SIZE) and our locally defined 2901c7c3c6aSMatthew Dillon * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 2911c7c3c6aSMatthew Dillon * constrained by the swap device interleave stripe size. 292327f4e83SMatthew Dillon * 293327f4e83SMatthew Dillon * Currently we hardwire nsw_wcount_async to 4. This limit is 294327f4e83SMatthew Dillon * designed to prevent other I/O from having high latencies due to 295327f4e83SMatthew Dillon * our pageout I/O. The value 4 works well for one or two active swap 296327f4e83SMatthew Dillon * devices but is probably a little low if you have more. Even so, 297327f4e83SMatthew Dillon * a higher value would probably generate only a limited improvement 298327f4e83SMatthew Dillon * with three or four active swap devices since the system does not 299327f4e83SMatthew Dillon * typically have to pageout at extreme bandwidths. We will want 300327f4e83SMatthew Dillon * at least 2 per swap devices, and 4 is a pretty good value if you 301327f4e83SMatthew Dillon * have one NFS swap device due to the command/ack latency over NFS. 302327f4e83SMatthew Dillon * So it all works out pretty well. 30326f9a767SRodney W. Grimes */ 304ad3cce20SMatthew Dillon nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 305327f4e83SMatthew Dillon 3066d541bf1SJohn Baldwin mtx_lock(&pbuf_mtx); 3071c7c3c6aSMatthew Dillon nsw_rcount = (nswbuf + 1) / 2; 308327f4e83SMatthew Dillon nsw_wcount_sync = (nswbuf + 3) / 4; 309327f4e83SMatthew Dillon nsw_wcount_async = 4; 310327f4e83SMatthew Dillon nsw_wcount_async_max = nsw_wcount_async; 3116d541bf1SJohn Baldwin mtx_unlock(&pbuf_mtx); 31224a1cce3SDavid Greenman 3131c7c3c6aSMatthew Dillon /* 3141c7c3c6aSMatthew Dillon * Initialize our zone. Right now I'm just guessing on the number 3151c7c3c6aSMatthew Dillon * we need based on the number of pages in the system. Each swblock 3162f9e4e80SMatthew Dillon * can hold 16 pages, so this is probably overkill. This reservation 3172f9e4e80SMatthew Dillon * is typically limited to around 70MB by default. 3181c7c3c6aSMatthew Dillon */ 3192f9e4e80SMatthew Dillon n = cnt.v_page_count; 3202f9e4e80SMatthew Dillon if (maxswzone && n > maxswzone / sizeof(struct swblock)) 3212f9e4e80SMatthew Dillon n = maxswzone / sizeof(struct swblock); 32221cd6e62SSeigo Tanimura n2 = n; 32361ce6eeeSAlfred Perlstein swap_zone = zinit( 3241c7c3c6aSMatthew Dillon "SWAPMETA", 3251c7c3c6aSMatthew Dillon sizeof(struct swblock), 3261c7c3c6aSMatthew Dillon n, 3271c7c3c6aSMatthew Dillon ZONE_INTERRUPT, 3281c7c3c6aSMatthew Dillon 1 32961ce6eeeSAlfred Perlstein ); 3308355f576SJeff Roberson do { 3318355f576SJeff Roberson if (uma_zone_set_obj(swap_zone, NULL, n)) 33261ce6eeeSAlfred Perlstein break; 33361ce6eeeSAlfred Perlstein /* 33461ce6eeeSAlfred Perlstein * if the allocation failed, try a zone two thirds the 33561ce6eeeSAlfred Perlstein * size of the previous attempt. 33661ce6eeeSAlfred Perlstein */ 33761ce6eeeSAlfred Perlstein n -= ((n + 2) / 3); 33861ce6eeeSAlfred Perlstein } while (n > 0); 33921cd6e62SSeigo Tanimura if (swap_zone == NULL) 34061ce6eeeSAlfred Perlstein panic("failed to zinit swap_zone."); 34121cd6e62SSeigo Tanimura if (n2 != n) 34261ce6eeeSAlfred Perlstein printf("Swap zone entries reduced from %d to %d.\n", n2, n); 34321cd6e62SSeigo Tanimura n2 = n; 34424a1cce3SDavid Greenman 3451c7c3c6aSMatthew Dillon /* 3461c7c3c6aSMatthew Dillon * Initialize our meta-data hash table. The swapper does not need to 3471c7c3c6aSMatthew Dillon * be quite as efficient as the VM system, so we do not use an 3481c7c3c6aSMatthew Dillon * oversized hash table. 3491c7c3c6aSMatthew Dillon * 3501c7c3c6aSMatthew Dillon * n: size of hash table, must be power of 2 3511c7c3c6aSMatthew Dillon * swhash_mask: hash table index mask 3521c7c3c6aSMatthew Dillon */ 35361ce6eeeSAlfred Perlstein for (n = 1; n < n2 / 8; n *= 2) 3541c7c3c6aSMatthew Dillon ; 3557cc0979fSDavid Malone swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK | M_ZERO); 3561c7c3c6aSMatthew Dillon swhash_mask = n - 1; 35724a1cce3SDavid Greenman } 35824a1cce3SDavid Greenman 35924a1cce3SDavid Greenman /* 3601c7c3c6aSMatthew Dillon * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 3611c7c3c6aSMatthew Dillon * its metadata structures. 3621c7c3c6aSMatthew Dillon * 3631c7c3c6aSMatthew Dillon * This routine is called from the mmap and fork code to create a new 3641c7c3c6aSMatthew Dillon * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 3651c7c3c6aSMatthew Dillon * and then converting it with swp_pager_meta_build(). 3661c7c3c6aSMatthew Dillon * 3671c7c3c6aSMatthew Dillon * This routine may block in vm_object_allocate() and create a named 3681c7c3c6aSMatthew Dillon * object lookup race, so we must interlock. We must also run at 3691c7c3c6aSMatthew Dillon * splvm() for the object lookup to handle races with interrupts, but 3701c7c3c6aSMatthew Dillon * we do not have to maintain splvm() in between the lookup and the 3711c7c3c6aSMatthew Dillon * add because (I believe) it is not possible to attempt to create 3721c7c3c6aSMatthew Dillon * a new swap object w/handle when a default object with that handle 3731c7c3c6aSMatthew Dillon * already exists. 37424a1cce3SDavid Greenman */ 375f5a12711SPoul-Henning Kamp static vm_object_t 3766cde7a16SDavid Greenman swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 377b9dcd593SBruce Evans vm_ooffset_t offset) 37824a1cce3SDavid Greenman { 37924a1cce3SDavid Greenman vm_object_t object; 38024a1cce3SDavid Greenman 3810cddd8f0SMatthew Dillon GIANT_REQUIRED; 3820cddd8f0SMatthew Dillon 38324a1cce3SDavid Greenman if (handle) { 3841c7c3c6aSMatthew Dillon /* 3851c7c3c6aSMatthew Dillon * Reference existing named region or allocate new one. There 3861c7c3c6aSMatthew Dillon * should not be a race here against swp_pager_meta_build() 3871c7c3c6aSMatthew Dillon * as called from vm_page_remove() in regards to the lookup 3881c7c3c6aSMatthew Dillon * of the handle. 3891c7c3c6aSMatthew Dillon */ 3900cddd8f0SMatthew Dillon sx_xlock(&sw_alloc_sx); 3911c7c3c6aSMatthew Dillon object = vm_pager_object_lookup(NOBJLIST(handle), handle); 3921c7c3c6aSMatthew Dillon 39324a1cce3SDavid Greenman if (object != NULL) { 39424a1cce3SDavid Greenman vm_object_reference(object); 39524a1cce3SDavid Greenman } else { 3961c7c3c6aSMatthew Dillon object = vm_object_allocate(OBJT_DEFAULT, 3976cde7a16SDavid Greenman OFF_TO_IDX(offset + PAGE_MASK + size)); 39824a1cce3SDavid Greenman object->handle = handle; 3991c7c3c6aSMatthew Dillon 4004dcc5c2dSMatthew Dillon swp_pager_meta_build(object, 0, SWAPBLK_NONE); 40124a1cce3SDavid Greenman } 4020cddd8f0SMatthew Dillon sx_xunlock(&sw_alloc_sx); 40324a1cce3SDavid Greenman } else { 4041c7c3c6aSMatthew Dillon object = vm_object_allocate(OBJT_DEFAULT, 4056cde7a16SDavid Greenman OFF_TO_IDX(offset + PAGE_MASK + size)); 4061c7c3c6aSMatthew Dillon 4074dcc5c2dSMatthew Dillon swp_pager_meta_build(object, 0, SWAPBLK_NONE); 40824a1cce3SDavid Greenman } 40924a1cce3SDavid Greenman 41024a1cce3SDavid Greenman return (object); 411df8bae1dSRodney W. Grimes } 412df8bae1dSRodney W. Grimes 41326f9a767SRodney W. Grimes /* 4141c7c3c6aSMatthew Dillon * SWAP_PAGER_DEALLOC() - remove swap metadata from object 4151c7c3c6aSMatthew Dillon * 4161c7c3c6aSMatthew Dillon * The swap backing for the object is destroyed. The code is 4171c7c3c6aSMatthew Dillon * designed such that we can reinstantiate it later, but this 4181c7c3c6aSMatthew Dillon * routine is typically called only when the entire object is 4191c7c3c6aSMatthew Dillon * about to be destroyed. 4201c7c3c6aSMatthew Dillon * 4211c7c3c6aSMatthew Dillon * This routine may block, but no longer does. 4221c7c3c6aSMatthew Dillon * 4231c7c3c6aSMatthew Dillon * The object must be locked or unreferenceable. 42426f9a767SRodney W. Grimes */ 425df8bae1dSRodney W. Grimes static void 4261c7c3c6aSMatthew Dillon swap_pager_dealloc(object) 4272a4895f4SDavid Greenman vm_object_t object; 42826f9a767SRodney W. Grimes { 4294dcc5c2dSMatthew Dillon int s; 4304dcc5c2dSMatthew Dillon 4310cddd8f0SMatthew Dillon GIANT_REQUIRED; 4320cddd8f0SMatthew Dillon 43326f9a767SRodney W. Grimes /* 4341c7c3c6aSMatthew Dillon * Remove from list right away so lookups will fail if we block for 4351c7c3c6aSMatthew Dillon * pageout completion. 43626f9a767SRodney W. Grimes */ 437a9fa2c05SAlfred Perlstein mtx_lock(&sw_alloc_mtx); 4381c7c3c6aSMatthew Dillon if (object->handle == NULL) { 4391c7c3c6aSMatthew Dillon TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list); 44024ea4a96SDavid Greenman } else { 4411c7c3c6aSMatthew Dillon TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list); 44226f9a767SRodney W. Grimes } 443a9fa2c05SAlfred Perlstein mtx_unlock(&sw_alloc_mtx); 4441c7c3c6aSMatthew Dillon 4451c7c3c6aSMatthew Dillon vm_object_pip_wait(object, "swpdea"); 4461c7c3c6aSMatthew Dillon 4471c7c3c6aSMatthew Dillon /* 4481c7c3c6aSMatthew Dillon * Free all remaining metadata. We only bother to free it from 4491c7c3c6aSMatthew Dillon * the swap meta data. We do not attempt to free swapblk's still 4501c7c3c6aSMatthew Dillon * associated with vm_page_t's for this object. We do not care 4511c7c3c6aSMatthew Dillon * if paging is still in progress on some objects. 4521c7c3c6aSMatthew Dillon */ 4534dcc5c2dSMatthew Dillon s = splvm(); 4541c7c3c6aSMatthew Dillon swp_pager_meta_free_all(object); 4554dcc5c2dSMatthew Dillon splx(s); 4561c7c3c6aSMatthew Dillon } 4571c7c3c6aSMatthew Dillon 4581c7c3c6aSMatthew Dillon /************************************************************************ 4591c7c3c6aSMatthew Dillon * SWAP PAGER BITMAP ROUTINES * 4601c7c3c6aSMatthew Dillon ************************************************************************/ 4611c7c3c6aSMatthew Dillon 4621c7c3c6aSMatthew Dillon /* 4631c7c3c6aSMatthew Dillon * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 4641c7c3c6aSMatthew Dillon * 4651c7c3c6aSMatthew Dillon * Allocate swap for the requested number of pages. The starting 4661c7c3c6aSMatthew Dillon * swap block number (a page index) is returned or SWAPBLK_NONE 4671c7c3c6aSMatthew Dillon * if the allocation failed. 4681c7c3c6aSMatthew Dillon * 4691c7c3c6aSMatthew Dillon * Also has the side effect of advising that somebody made a mistake 4701c7c3c6aSMatthew Dillon * when they configured swap and didn't configure enough. 4711c7c3c6aSMatthew Dillon * 4721c7c3c6aSMatthew Dillon * Must be called at splvm() to avoid races with bitmap frees from 4731c7c3c6aSMatthew Dillon * vm_page_remove() aka swap_pager_page_removed(). 4741c7c3c6aSMatthew Dillon * 4751c7c3c6aSMatthew Dillon * This routine may not block 4761c7c3c6aSMatthew Dillon * This routine must be called at splvm(). 4771c7c3c6aSMatthew Dillon */ 4781c7c3c6aSMatthew Dillon static __inline daddr_t 4791c7c3c6aSMatthew Dillon swp_pager_getswapspace(npages) 4801c7c3c6aSMatthew Dillon int npages; 4811c7c3c6aSMatthew Dillon { 4821c7c3c6aSMatthew Dillon daddr_t blk; 4831c7c3c6aSMatthew Dillon 4840cddd8f0SMatthew Dillon GIANT_REQUIRED; 4850cddd8f0SMatthew Dillon 4861c7c3c6aSMatthew Dillon if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) { 4872b0d37a4SMatthew Dillon if (swap_pager_full != 2) { 4881c7c3c6aSMatthew Dillon printf("swap_pager_getswapspace: failed\n"); 4892b0d37a4SMatthew Dillon swap_pager_full = 2; 49020d3034fSMatthew Dillon swap_pager_almost_full = 1; 4912b0d37a4SMatthew Dillon } 4921c7c3c6aSMatthew Dillon } else { 4931c7c3c6aSMatthew Dillon vm_swap_size -= npages; 494edfa785aSRobert Watson /* per-swap area stats */ 495edfa785aSRobert Watson swdevt[BLK2DEVIDX(blk)].sw_used += npages; 4961c7c3c6aSMatthew Dillon swp_sizecheck(); 4971c7c3c6aSMatthew Dillon } 4981c7c3c6aSMatthew Dillon return (blk); 49926f9a767SRodney W. Grimes } 50026f9a767SRodney W. Grimes 50126f9a767SRodney W. Grimes /* 5021c7c3c6aSMatthew Dillon * SWP_PAGER_FREESWAPSPACE() - free raw swap space 5031c7c3c6aSMatthew Dillon * 5041c7c3c6aSMatthew Dillon * This routine returns the specified swap blocks back to the bitmap. 5051c7c3c6aSMatthew Dillon * 5061c7c3c6aSMatthew Dillon * Note: This routine may not block (it could in the old swap code), 5071c7c3c6aSMatthew Dillon * and through the use of the new blist routines it does not block. 5081c7c3c6aSMatthew Dillon * 5091c7c3c6aSMatthew Dillon * We must be called at splvm() to avoid races with bitmap frees from 5101c7c3c6aSMatthew Dillon * vm_page_remove() aka swap_pager_page_removed(). 5111c7c3c6aSMatthew Dillon * 5121c7c3c6aSMatthew Dillon * This routine may not block 5131c7c3c6aSMatthew Dillon * This routine must be called at splvm(). 51426f9a767SRodney W. Grimes */ 5151c7c3c6aSMatthew Dillon static __inline void 5161c7c3c6aSMatthew Dillon swp_pager_freeswapspace(blk, npages) 5171c7c3c6aSMatthew Dillon daddr_t blk; 5181c7c3c6aSMatthew Dillon int npages; 5190d94caffSDavid Greenman { 5200cddd8f0SMatthew Dillon GIANT_REQUIRED; 52123955314SAlfred Perlstein 5221c7c3c6aSMatthew Dillon blist_free(swapblist, blk, npages); 5231c7c3c6aSMatthew Dillon vm_swap_size += npages; 524edfa785aSRobert Watson /* per-swap area stats */ 525edfa785aSRobert Watson swdevt[BLK2DEVIDX(blk)].sw_used -= npages; 5261c7c3c6aSMatthew Dillon swp_sizecheck(); 52726f9a767SRodney W. Grimes } 5281c7c3c6aSMatthew Dillon 52926f9a767SRodney W. Grimes /* 5301c7c3c6aSMatthew Dillon * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 5311c7c3c6aSMatthew Dillon * range within an object. 5321c7c3c6aSMatthew Dillon * 5331c7c3c6aSMatthew Dillon * This is a globally accessible routine. 5341c7c3c6aSMatthew Dillon * 5351c7c3c6aSMatthew Dillon * This routine removes swapblk assignments from swap metadata. 5361c7c3c6aSMatthew Dillon * 5371c7c3c6aSMatthew Dillon * The external callers of this routine typically have already destroyed 5381c7c3c6aSMatthew Dillon * or renamed vm_page_t's associated with this range in the object so 5391c7c3c6aSMatthew Dillon * we should be ok. 5404dcc5c2dSMatthew Dillon * 5414dcc5c2dSMatthew Dillon * This routine may be called at any spl. We up our spl to splvm temporarily 5424dcc5c2dSMatthew Dillon * in order to perform the metadata removal. 54326f9a767SRodney W. Grimes */ 54426f9a767SRodney W. Grimes void 54524a1cce3SDavid Greenman swap_pager_freespace(object, start, size) 54624a1cce3SDavid Greenman vm_object_t object; 547a316d390SJohn Dyson vm_pindex_t start; 548a316d390SJohn Dyson vm_size_t size; 54926f9a767SRodney W. Grimes { 5504dcc5c2dSMatthew Dillon int s = splvm(); 55123955314SAlfred Perlstein 5520cddd8f0SMatthew Dillon GIANT_REQUIRED; 5531c7c3c6aSMatthew Dillon swp_pager_meta_free(object, start, size); 5544dcc5c2dSMatthew Dillon splx(s); 5554dcc5c2dSMatthew Dillon } 5564dcc5c2dSMatthew Dillon 5574dcc5c2dSMatthew Dillon /* 5584dcc5c2dSMatthew Dillon * SWAP_PAGER_RESERVE() - reserve swap blocks in object 5594dcc5c2dSMatthew Dillon * 5604dcc5c2dSMatthew Dillon * Assigns swap blocks to the specified range within the object. The 5614dcc5c2dSMatthew Dillon * swap blocks are not zerod. Any previous swap assignment is destroyed. 5624dcc5c2dSMatthew Dillon * 5634dcc5c2dSMatthew Dillon * Returns 0 on success, -1 on failure. 5644dcc5c2dSMatthew Dillon */ 5654dcc5c2dSMatthew Dillon int 5664dcc5c2dSMatthew Dillon swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 5674dcc5c2dSMatthew Dillon { 5684dcc5c2dSMatthew Dillon int s; 5694dcc5c2dSMatthew Dillon int n = 0; 5704dcc5c2dSMatthew Dillon daddr_t blk = SWAPBLK_NONE; 5714dcc5c2dSMatthew Dillon vm_pindex_t beg = start; /* save start index */ 5724dcc5c2dSMatthew Dillon 5734dcc5c2dSMatthew Dillon s = splvm(); 5744dcc5c2dSMatthew Dillon while (size) { 5754dcc5c2dSMatthew Dillon if (n == 0) { 5764dcc5c2dSMatthew Dillon n = BLIST_MAX_ALLOC; 5774dcc5c2dSMatthew Dillon while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) { 5784dcc5c2dSMatthew Dillon n >>= 1; 5794dcc5c2dSMatthew Dillon if (n == 0) { 5804dcc5c2dSMatthew Dillon swp_pager_meta_free(object, beg, start - beg); 5814dcc5c2dSMatthew Dillon splx(s); 5824dcc5c2dSMatthew Dillon return (-1); 5834dcc5c2dSMatthew Dillon } 5844dcc5c2dSMatthew Dillon } 5854dcc5c2dSMatthew Dillon } 5864dcc5c2dSMatthew Dillon swp_pager_meta_build(object, start, blk); 5874dcc5c2dSMatthew Dillon --size; 5884dcc5c2dSMatthew Dillon ++start; 5894dcc5c2dSMatthew Dillon ++blk; 5904dcc5c2dSMatthew Dillon --n; 5914dcc5c2dSMatthew Dillon } 5924dcc5c2dSMatthew Dillon swp_pager_meta_free(object, start, n); 5934dcc5c2dSMatthew Dillon splx(s); 5944dcc5c2dSMatthew Dillon return (0); 59526f9a767SRodney W. Grimes } 59626f9a767SRodney W. Grimes 5970a47b48bSJohn Dyson /* 5981c7c3c6aSMatthew Dillon * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 5991c7c3c6aSMatthew Dillon * and destroy the source. 6001c7c3c6aSMatthew Dillon * 6011c7c3c6aSMatthew Dillon * Copy any valid swapblks from the source to the destination. In 6021c7c3c6aSMatthew Dillon * cases where both the source and destination have a valid swapblk, 6031c7c3c6aSMatthew Dillon * we keep the destination's. 6041c7c3c6aSMatthew Dillon * 6051c7c3c6aSMatthew Dillon * This routine is allowed to block. It may block allocating metadata 6061c7c3c6aSMatthew Dillon * indirectly through swp_pager_meta_build() or if paging is still in 6071c7c3c6aSMatthew Dillon * progress on the source. 6081c7c3c6aSMatthew Dillon * 6094dcc5c2dSMatthew Dillon * This routine can be called at any spl 6104dcc5c2dSMatthew Dillon * 6111c7c3c6aSMatthew Dillon * XXX vm_page_collapse() kinda expects us not to block because we 6121c7c3c6aSMatthew Dillon * supposedly do not need to allocate memory, but for the moment we 6131c7c3c6aSMatthew Dillon * *may* have to get a little memory from the zone allocator, but 6141c7c3c6aSMatthew Dillon * it is taken from the interrupt memory. We should be ok. 6151c7c3c6aSMatthew Dillon * 6161c7c3c6aSMatthew Dillon * The source object contains no vm_page_t's (which is just as well) 6171c7c3c6aSMatthew Dillon * 6181c7c3c6aSMatthew Dillon * The source object is of type OBJT_SWAP. 6191c7c3c6aSMatthew Dillon * 6204dcc5c2dSMatthew Dillon * The source and destination objects must be locked or 6214dcc5c2dSMatthew Dillon * inaccessible (XXX are they ?) 62226f9a767SRodney W. Grimes */ 62326f9a767SRodney W. Grimes void 6241c7c3c6aSMatthew Dillon swap_pager_copy(srcobject, dstobject, offset, destroysource) 62524a1cce3SDavid Greenman vm_object_t srcobject; 62624a1cce3SDavid Greenman vm_object_t dstobject; 627a316d390SJohn Dyson vm_pindex_t offset; 628c0877f10SJohn Dyson int destroysource; 62926f9a767SRodney W. Grimes { 630a316d390SJohn Dyson vm_pindex_t i; 6314dcc5c2dSMatthew Dillon int s; 6324dcc5c2dSMatthew Dillon 6330cddd8f0SMatthew Dillon GIANT_REQUIRED; 6340cddd8f0SMatthew Dillon 6354dcc5c2dSMatthew Dillon s = splvm(); 63626f9a767SRodney W. Grimes /* 6371c7c3c6aSMatthew Dillon * If destroysource is set, we remove the source object from the 6381c7c3c6aSMatthew Dillon * swap_pager internal queue now. 63926f9a767SRodney W. Grimes */ 640cbd8ec09SJohn Dyson if (destroysource) { 641a9fa2c05SAlfred Perlstein mtx_lock(&sw_alloc_mtx); 64224a1cce3SDavid Greenman if (srcobject->handle == NULL) { 6431c7c3c6aSMatthew Dillon TAILQ_REMOVE( 6441c7c3c6aSMatthew Dillon &swap_pager_un_object_list, 6451c7c3c6aSMatthew Dillon srcobject, 6461c7c3c6aSMatthew Dillon pager_object_list 6471c7c3c6aSMatthew Dillon ); 64826f9a767SRodney W. Grimes } else { 6491c7c3c6aSMatthew Dillon TAILQ_REMOVE( 6501c7c3c6aSMatthew Dillon NOBJLIST(srcobject->handle), 6511c7c3c6aSMatthew Dillon srcobject, 6521c7c3c6aSMatthew Dillon pager_object_list 6531c7c3c6aSMatthew Dillon ); 65426f9a767SRodney W. Grimes } 655a9fa2c05SAlfred Perlstein mtx_unlock(&sw_alloc_mtx); 656cbd8ec09SJohn Dyson } 65726f9a767SRodney W. Grimes 6581c7c3c6aSMatthew Dillon /* 6591c7c3c6aSMatthew Dillon * transfer source to destination. 6601c7c3c6aSMatthew Dillon */ 6611c7c3c6aSMatthew Dillon for (i = 0; i < dstobject->size; ++i) { 6621c7c3c6aSMatthew Dillon daddr_t dstaddr; 6631c7c3c6aSMatthew Dillon 6641c7c3c6aSMatthew Dillon /* 6651c7c3c6aSMatthew Dillon * Locate (without changing) the swapblk on the destination, 6661c7c3c6aSMatthew Dillon * unless it is invalid in which case free it silently, or 6671c7c3c6aSMatthew Dillon * if the destination is a resident page, in which case the 6681c7c3c6aSMatthew Dillon * source is thrown away. 6691c7c3c6aSMatthew Dillon */ 6701c7c3c6aSMatthew Dillon dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 6711c7c3c6aSMatthew Dillon 6721c7c3c6aSMatthew Dillon if (dstaddr == SWAPBLK_NONE) { 6731c7c3c6aSMatthew Dillon /* 6741c7c3c6aSMatthew Dillon * Destination has no swapblk and is not resident, 6751c7c3c6aSMatthew Dillon * copy source. 6761c7c3c6aSMatthew Dillon */ 6771c7c3c6aSMatthew Dillon daddr_t srcaddr; 6781c7c3c6aSMatthew Dillon 6791c7c3c6aSMatthew Dillon srcaddr = swp_pager_meta_ctl( 6801c7c3c6aSMatthew Dillon srcobject, 6811c7c3c6aSMatthew Dillon i + offset, 6821c7c3c6aSMatthew Dillon SWM_POP 6831c7c3c6aSMatthew Dillon ); 6841c7c3c6aSMatthew Dillon 6851c7c3c6aSMatthew Dillon if (srcaddr != SWAPBLK_NONE) 6864dcc5c2dSMatthew Dillon swp_pager_meta_build(dstobject, i, srcaddr); 6871c7c3c6aSMatthew Dillon } else { 6881c7c3c6aSMatthew Dillon /* 6891c7c3c6aSMatthew Dillon * Destination has valid swapblk or it is represented 6901c7c3c6aSMatthew Dillon * by a resident page. We destroy the sourceblock. 6911c7c3c6aSMatthew Dillon */ 6921c7c3c6aSMatthew Dillon 6931c7c3c6aSMatthew Dillon swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE); 6941c7c3c6aSMatthew Dillon } 69526f9a767SRodney W. Grimes } 69626f9a767SRodney W. Grimes 69726f9a767SRodney W. Grimes /* 6981c7c3c6aSMatthew Dillon * Free left over swap blocks in source. 6991c7c3c6aSMatthew Dillon * 7001c7c3c6aSMatthew Dillon * We have to revert the type to OBJT_DEFAULT so we do not accidently 7011c7c3c6aSMatthew Dillon * double-remove the object from the swap queues. 70226f9a767SRodney W. Grimes */ 703c0877f10SJohn Dyson if (destroysource) { 7041c7c3c6aSMatthew Dillon swp_pager_meta_free_all(srcobject); 7051c7c3c6aSMatthew Dillon /* 7061c7c3c6aSMatthew Dillon * Reverting the type is not necessary, the caller is going 7071c7c3c6aSMatthew Dillon * to destroy srcobject directly, but I'm doing it here 708956f3135SPhilippe Charnier * for consistency since we've removed the object from its 7091c7c3c6aSMatthew Dillon * queues. 7101c7c3c6aSMatthew Dillon */ 7111c7c3c6aSMatthew Dillon srcobject->type = OBJT_DEFAULT; 712c0877f10SJohn Dyson } 7134dcc5c2dSMatthew Dillon splx(s); 71426f9a767SRodney W. Grimes } 71526f9a767SRodney W. Grimes 716df8bae1dSRodney W. Grimes /* 7171c7c3c6aSMatthew Dillon * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 7181c7c3c6aSMatthew Dillon * the requested page. 7191c7c3c6aSMatthew Dillon * 7201c7c3c6aSMatthew Dillon * We determine whether good backing store exists for the requested 7211c7c3c6aSMatthew Dillon * page and return TRUE if it does, FALSE if it doesn't. 7221c7c3c6aSMatthew Dillon * 7231c7c3c6aSMatthew Dillon * If TRUE, we also try to determine how much valid, contiguous backing 7241c7c3c6aSMatthew Dillon * store exists before and after the requested page within a reasonable 7251c7c3c6aSMatthew Dillon * distance. We do not try to restrict it to the swap device stripe 7261c7c3c6aSMatthew Dillon * (that is handled in getpages/putpages). It probably isn't worth 7271c7c3c6aSMatthew Dillon * doing here. 728df8bae1dSRodney W. Grimes */ 7291c7c3c6aSMatthew Dillon boolean_t 730a316d390SJohn Dyson swap_pager_haspage(object, pindex, before, after) 73124a1cce3SDavid Greenman vm_object_t object; 732a316d390SJohn Dyson vm_pindex_t pindex; 73324a1cce3SDavid Greenman int *before; 73424a1cce3SDavid Greenman int *after; 73526f9a767SRodney W. Grimes { 7361c7c3c6aSMatthew Dillon daddr_t blk0; 73725db2c54SMatthew Dillon int s; 73826f9a767SRodney W. Grimes 7391c7c3c6aSMatthew Dillon /* 7401c7c3c6aSMatthew Dillon * do we have good backing store at the requested index ? 7411c7c3c6aSMatthew Dillon */ 74225db2c54SMatthew Dillon s = splvm(); 7431c7c3c6aSMatthew Dillon blk0 = swp_pager_meta_ctl(object, pindex, 0); 7441c7c3c6aSMatthew Dillon 7454dcc5c2dSMatthew Dillon if (blk0 == SWAPBLK_NONE) { 74625db2c54SMatthew Dillon splx(s); 7471c7c3c6aSMatthew Dillon if (before) 74824a1cce3SDavid Greenman *before = 0; 7491c7c3c6aSMatthew Dillon if (after) 75024a1cce3SDavid Greenman *after = 0; 75126f9a767SRodney W. Grimes return (FALSE); 75226f9a767SRodney W. Grimes } 75326f9a767SRodney W. Grimes 75426f9a767SRodney W. Grimes /* 7551c7c3c6aSMatthew Dillon * find backwards-looking contiguous good backing store 756e47ed70bSJohn Dyson */ 7571c7c3c6aSMatthew Dillon if (before != NULL) { 75826f9a767SRodney W. Grimes int i; 7590d94caffSDavid Greenman 7601c7c3c6aSMatthew Dillon for (i = 1; i < (SWB_NPAGES/2); ++i) { 7611c7c3c6aSMatthew Dillon daddr_t blk; 7621c7c3c6aSMatthew Dillon 7631c7c3c6aSMatthew Dillon if (i > pindex) 7641c7c3c6aSMatthew Dillon break; 7651c7c3c6aSMatthew Dillon blk = swp_pager_meta_ctl(object, pindex - i, 0); 7661c7c3c6aSMatthew Dillon if (blk != blk0 - i) 7671c7c3c6aSMatthew Dillon break; 768ffc82b0aSJohn Dyson } 7691c7c3c6aSMatthew Dillon *before = (i - 1); 77026f9a767SRodney W. Grimes } 77126f9a767SRodney W. Grimes 77226f9a767SRodney W. Grimes /* 7731c7c3c6aSMatthew Dillon * find forward-looking contiguous good backing store 77426f9a767SRodney W. Grimes */ 7751c7c3c6aSMatthew Dillon if (after != NULL) { 7761c7c3c6aSMatthew Dillon int i; 7771c7c3c6aSMatthew Dillon 7781c7c3c6aSMatthew Dillon for (i = 1; i < (SWB_NPAGES/2); ++i) { 7791c7c3c6aSMatthew Dillon daddr_t blk; 7801c7c3c6aSMatthew Dillon 7811c7c3c6aSMatthew Dillon blk = swp_pager_meta_ctl(object, pindex + i, 0); 7821c7c3c6aSMatthew Dillon if (blk != blk0 + i) 7831c7c3c6aSMatthew Dillon break; 78426f9a767SRodney W. Grimes } 7851c7c3c6aSMatthew Dillon *after = (i - 1); 7861c7c3c6aSMatthew Dillon } 78725db2c54SMatthew Dillon splx(s); 7881c7c3c6aSMatthew Dillon return (TRUE); 7891c7c3c6aSMatthew Dillon } 7901c7c3c6aSMatthew Dillon 7911c7c3c6aSMatthew Dillon /* 7921c7c3c6aSMatthew Dillon * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 7931c7c3c6aSMatthew Dillon * 7941c7c3c6aSMatthew Dillon * This removes any associated swap backing store, whether valid or 7951c7c3c6aSMatthew Dillon * not, from the page. 7961c7c3c6aSMatthew Dillon * 7971c7c3c6aSMatthew Dillon * This routine is typically called when a page is made dirty, at 7981c7c3c6aSMatthew Dillon * which point any associated swap can be freed. MADV_FREE also 7991c7c3c6aSMatthew Dillon * calls us in a special-case situation 8001c7c3c6aSMatthew Dillon * 8011c7c3c6aSMatthew Dillon * NOTE!!! If the page is clean and the swap was valid, the caller 8021c7c3c6aSMatthew Dillon * should make the page dirty before calling this routine. This routine 8031c7c3c6aSMatthew Dillon * does NOT change the m->dirty status of the page. Also: MADV_FREE 8041c7c3c6aSMatthew Dillon * depends on it. 8051c7c3c6aSMatthew Dillon * 8061c7c3c6aSMatthew Dillon * This routine may not block 8074dcc5c2dSMatthew Dillon * This routine must be called at splvm() 8081c7c3c6aSMatthew Dillon */ 8091c7c3c6aSMatthew Dillon static void 8101c7c3c6aSMatthew Dillon swap_pager_unswapped(m) 8111c7c3c6aSMatthew Dillon vm_page_t m; 8121c7c3c6aSMatthew Dillon { 8131c7c3c6aSMatthew Dillon swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 8141c7c3c6aSMatthew Dillon } 8151c7c3c6aSMatthew Dillon 8161c7c3c6aSMatthew Dillon /* 817a5296b05SJulian Elischer * SWAP_PAGER_STRATEGY() - read, write, free blocks 818a5296b05SJulian Elischer * 819a5296b05SJulian Elischer * This implements the vm_pager_strategy() interface to swap and allows 820a5296b05SJulian Elischer * other parts of the system to directly access swap as backing store 821a5296b05SJulian Elischer * through vm_objects of type OBJT_SWAP. This is intended to be a 822a5296b05SJulian Elischer * cacheless interface ( i.e. caching occurs at higher levels ). 823a5296b05SJulian Elischer * Therefore we do not maintain any resident pages. All I/O goes 8244dcc5c2dSMatthew Dillon * directly to and from the swap device. 825a5296b05SJulian Elischer * 826a5296b05SJulian Elischer * Note that b_blkno is scaled for PAGE_SIZE 827a5296b05SJulian Elischer * 828a5296b05SJulian Elischer * We currently attempt to run I/O synchronously or asynchronously as 829a5296b05SJulian Elischer * the caller requests. This isn't perfect because we loose error 830a5296b05SJulian Elischer * sequencing when we run multiple ops in parallel to satisfy a request. 831a5296b05SJulian Elischer * But this is swap, so we let it all hang out. 832a5296b05SJulian Elischer */ 833a5296b05SJulian Elischer static void 8340b441832SPoul-Henning Kamp swap_pager_strategy(vm_object_t object, struct bio *bp) 835a5296b05SJulian Elischer { 836a5296b05SJulian Elischer vm_pindex_t start; 837a5296b05SJulian Elischer int count; 8384dcc5c2dSMatthew Dillon int s; 839a5296b05SJulian Elischer char *data; 840a5296b05SJulian Elischer struct buf *nbp = NULL; 841a5296b05SJulian Elischer 8420cddd8f0SMatthew Dillon GIANT_REQUIRED; 8430cddd8f0SMatthew Dillon 8440b441832SPoul-Henning Kamp /* XXX: KASSERT instead ? */ 8450b441832SPoul-Henning Kamp if (bp->bio_bcount & PAGE_MASK) { 846a468031cSPoul-Henning Kamp biofinish(bp, NULL, EINVAL); 8470b441832SPoul-Henning Kamp printf("swap_pager_strategy: bp %p blk %d size %d, not page bounded\n", bp, (int)bp->bio_pblkno, (int)bp->bio_bcount); 848a5296b05SJulian Elischer return; 849a5296b05SJulian Elischer } 850a5296b05SJulian Elischer 851a5296b05SJulian Elischer /* 852a5296b05SJulian Elischer * Clear error indication, initialize page index, count, data pointer. 853a5296b05SJulian Elischer */ 8540b441832SPoul-Henning Kamp bp->bio_error = 0; 8550b441832SPoul-Henning Kamp bp->bio_flags &= ~BIO_ERROR; 8560b441832SPoul-Henning Kamp bp->bio_resid = bp->bio_bcount; 857d6844b6bSTor Egge *(u_int *) &bp->bio_driver1 = 0; 858a5296b05SJulian Elischer 8590b441832SPoul-Henning Kamp start = bp->bio_pblkno; 8600b441832SPoul-Henning Kamp count = howmany(bp->bio_bcount, PAGE_SIZE); 8610b441832SPoul-Henning Kamp data = bp->bio_data; 862a5296b05SJulian Elischer 8634dcc5c2dSMatthew Dillon s = splvm(); 8644dcc5c2dSMatthew Dillon 865a5296b05SJulian Elischer /* 86621144e3bSPoul-Henning Kamp * Deal with BIO_DELETE 867a5296b05SJulian Elischer */ 8680b441832SPoul-Henning Kamp if (bp->bio_cmd == BIO_DELETE) { 869a5296b05SJulian Elischer /* 870a5296b05SJulian Elischer * FREE PAGE(s) - destroy underlying swap that is no longer 871a5296b05SJulian Elischer * needed. 872a5296b05SJulian Elischer */ 873a5296b05SJulian Elischer swp_pager_meta_free(object, start, count); 874a5296b05SJulian Elischer splx(s); 8750b441832SPoul-Henning Kamp bp->bio_resid = 0; 8760b441832SPoul-Henning Kamp biodone(bp); 8774dcc5c2dSMatthew Dillon return; 8784dcc5c2dSMatthew Dillon } 8794dcc5c2dSMatthew Dillon 880a5296b05SJulian Elischer /* 8814dcc5c2dSMatthew Dillon * Execute read or write 882a5296b05SJulian Elischer */ 883a5296b05SJulian Elischer while (count > 0) { 884a5296b05SJulian Elischer daddr_t blk; 885a5296b05SJulian Elischer 886a5296b05SJulian Elischer /* 8874dcc5c2dSMatthew Dillon * Obtain block. If block not found and writing, allocate a 8884dcc5c2dSMatthew Dillon * new block and build it into the object. 8894dcc5c2dSMatthew Dillon */ 8904dcc5c2dSMatthew Dillon 8914dcc5c2dSMatthew Dillon blk = swp_pager_meta_ctl(object, start, 0); 8920b441832SPoul-Henning Kamp if ((blk == SWAPBLK_NONE) && (bp->bio_cmd == BIO_WRITE)) { 8934dcc5c2dSMatthew Dillon blk = swp_pager_getswapspace(1); 8944dcc5c2dSMatthew Dillon if (blk == SWAPBLK_NONE) { 8950b441832SPoul-Henning Kamp bp->bio_error = ENOMEM; 8960b441832SPoul-Henning Kamp bp->bio_flags |= BIO_ERROR; 8974dcc5c2dSMatthew Dillon break; 8984dcc5c2dSMatthew Dillon } 8994dcc5c2dSMatthew Dillon swp_pager_meta_build(object, start, blk); 9004dcc5c2dSMatthew Dillon } 9014dcc5c2dSMatthew Dillon 9024dcc5c2dSMatthew Dillon /* 9034dcc5c2dSMatthew Dillon * Do we have to flush our current collection? Yes if: 9044dcc5c2dSMatthew Dillon * 9054dcc5c2dSMatthew Dillon * - no swap block at this index 9064dcc5c2dSMatthew Dillon * - swap block is not contiguous 9074dcc5c2dSMatthew Dillon * - we cross a physical disk boundry in the 9084dcc5c2dSMatthew Dillon * stripe. 909a5296b05SJulian Elischer */ 910a5296b05SJulian Elischer if ( 9114dcc5c2dSMatthew Dillon nbp && (nbp->b_blkno + btoc(nbp->b_bcount) != blk || 9124dcc5c2dSMatthew Dillon ((nbp->b_blkno ^ blk) & dmmax_mask) 913a5296b05SJulian Elischer ) 914a5296b05SJulian Elischer ) { 9154dcc5c2dSMatthew Dillon splx(s); 9160b441832SPoul-Henning Kamp if (bp->bio_cmd == BIO_READ) { 917a5296b05SJulian Elischer ++cnt.v_swapin; 918a5296b05SJulian Elischer cnt.v_swappgsin += btoc(nbp->b_bcount); 9194dcc5c2dSMatthew Dillon } else { 9204dcc5c2dSMatthew Dillon ++cnt.v_swapout; 9214dcc5c2dSMatthew Dillon cnt.v_swappgsout += btoc(nbp->b_bcount); 9224dcc5c2dSMatthew Dillon nbp->b_dirtyend = nbp->b_bcount; 9234dcc5c2dSMatthew Dillon } 924a5296b05SJulian Elischer flushchainbuf(nbp); 9254dcc5c2dSMatthew Dillon s = splvm(); 926a5296b05SJulian Elischer nbp = NULL; 927a5296b05SJulian Elischer } 928a5296b05SJulian Elischer 929a5296b05SJulian Elischer /* 9304dcc5c2dSMatthew Dillon * Add new swapblk to nbp, instantiating nbp if necessary. 9314dcc5c2dSMatthew Dillon * Zero-fill reads are able to take a shortcut. 932a5296b05SJulian Elischer */ 9334dcc5c2dSMatthew Dillon if (blk == SWAPBLK_NONE) { 9344dcc5c2dSMatthew Dillon /* 9354dcc5c2dSMatthew Dillon * We can only get here if we are reading. Since 9364dcc5c2dSMatthew Dillon * we are at splvm() we can safely modify b_resid, 9374dcc5c2dSMatthew Dillon * even if chain ops are in progress. 9384dcc5c2dSMatthew Dillon */ 939a5296b05SJulian Elischer bzero(data, PAGE_SIZE); 9400b441832SPoul-Henning Kamp bp->bio_resid -= PAGE_SIZE; 941a5296b05SJulian Elischer } else { 942a5296b05SJulian Elischer if (nbp == NULL) { 9430b441832SPoul-Henning Kamp nbp = getchainbuf(bp, swapdev_vp, B_ASYNC); 944a5296b05SJulian Elischer nbp->b_blkno = blk; 9454dcc5c2dSMatthew Dillon nbp->b_bcount = 0; 946a5296b05SJulian Elischer nbp->b_data = data; 947a5296b05SJulian Elischer } 948a5296b05SJulian Elischer nbp->b_bcount += PAGE_SIZE; 949a5296b05SJulian Elischer } 950a5296b05SJulian Elischer --count; 951a5296b05SJulian Elischer ++start; 952a5296b05SJulian Elischer data += PAGE_SIZE; 953a5296b05SJulian Elischer } 954a5296b05SJulian Elischer 955a5296b05SJulian Elischer /* 9564dcc5c2dSMatthew Dillon * Flush out last buffer 957a5296b05SJulian Elischer */ 958a5296b05SJulian Elischer splx(s); 959a5296b05SJulian Elischer 960a5296b05SJulian Elischer if (nbp) { 96121144e3bSPoul-Henning Kamp if (nbp->b_iocmd == BIO_READ) { 962a5296b05SJulian Elischer ++cnt.v_swapin; 963a5296b05SJulian Elischer cnt.v_swappgsin += btoc(nbp->b_bcount); 964a5296b05SJulian Elischer } else { 965a5296b05SJulian Elischer ++cnt.v_swapout; 966a5296b05SJulian Elischer cnt.v_swappgsout += btoc(nbp->b_bcount); 9674dcc5c2dSMatthew Dillon nbp->b_dirtyend = nbp->b_bcount; 968a5296b05SJulian Elischer } 969a5296b05SJulian Elischer flushchainbuf(nbp); 9704dcc5c2dSMatthew Dillon /* nbp = NULL; */ 9710cddd8f0SMatthew Dillon } 9724dcc5c2dSMatthew Dillon /* 9734dcc5c2dSMatthew Dillon * Wait for completion. 9744dcc5c2dSMatthew Dillon */ 975a5296b05SJulian Elischer waitchainbuf(bp, 0, 1); 976a5296b05SJulian Elischer } 977a5296b05SJulian Elischer 978a5296b05SJulian Elischer /* 9791c7c3c6aSMatthew Dillon * SWAP_PAGER_GETPAGES() - bring pages in from swap 9801c7c3c6aSMatthew Dillon * 9811c7c3c6aSMatthew Dillon * Attempt to retrieve (m, count) pages from backing store, but make 9821c7c3c6aSMatthew Dillon * sure we retrieve at least m[reqpage]. We try to load in as large 9831c7c3c6aSMatthew Dillon * a chunk surrounding m[reqpage] as is contiguous in swap and which 9841c7c3c6aSMatthew Dillon * belongs to the same object. 9851c7c3c6aSMatthew Dillon * 9861c7c3c6aSMatthew Dillon * The code is designed for asynchronous operation and 9871c7c3c6aSMatthew Dillon * immediate-notification of 'reqpage' but tends not to be 9881c7c3c6aSMatthew Dillon * used that way. Please do not optimize-out this algorithmic 9891c7c3c6aSMatthew Dillon * feature, I intend to improve on it in the future. 9901c7c3c6aSMatthew Dillon * 9911c7c3c6aSMatthew Dillon * The parent has a single vm_object_pip_add() reference prior to 9921c7c3c6aSMatthew Dillon * calling us and we should return with the same. 9931c7c3c6aSMatthew Dillon * 9941c7c3c6aSMatthew Dillon * The parent has BUSY'd the pages. We should return with 'm' 9951c7c3c6aSMatthew Dillon * left busy, but the others adjusted. 9961c7c3c6aSMatthew Dillon */ 997f708ef1bSPoul-Henning Kamp static int 99824a1cce3SDavid Greenman swap_pager_getpages(object, m, count, reqpage) 99924a1cce3SDavid Greenman vm_object_t object; 100026f9a767SRodney W. Grimes vm_page_t *m; 100126f9a767SRodney W. Grimes int count, reqpage; 1002df8bae1dSRodney W. Grimes { 10031c7c3c6aSMatthew Dillon struct buf *bp; 10041c7c3c6aSMatthew Dillon vm_page_t mreq; 10051c7c3c6aSMatthew Dillon int s; 100626f9a767SRodney W. Grimes int i; 100726f9a767SRodney W. Grimes int j; 10081c7c3c6aSMatthew Dillon daddr_t blk; 10091c7c3c6aSMatthew Dillon vm_offset_t kva; 10101c7c3c6aSMatthew Dillon vm_pindex_t lastpindex; 10110d94caffSDavid Greenman 10120cddd8f0SMatthew Dillon GIANT_REQUIRED; 10130cddd8f0SMatthew Dillon 10141c7c3c6aSMatthew Dillon mreq = m[reqpage]; 10151c7c3c6aSMatthew Dillon 10161c7c3c6aSMatthew Dillon if (mreq->object != object) { 10171c7c3c6aSMatthew Dillon panic("swap_pager_getpages: object mismatch %p/%p", 10181c7c3c6aSMatthew Dillon object, 10191c7c3c6aSMatthew Dillon mreq->object 10201c7c3c6aSMatthew Dillon ); 102126f9a767SRodney W. Grimes } 10221c7c3c6aSMatthew Dillon /* 10231c7c3c6aSMatthew Dillon * Calculate range to retrieve. The pages have already been assigned 10241c7c3c6aSMatthew Dillon * their swapblks. We require a *contiguous* range that falls entirely 10251c7c3c6aSMatthew Dillon * within a single device stripe. If we do not supply it, bad things 10264dcc5c2dSMatthew Dillon * happen. Note that blk, iblk & jblk can be SWAPBLK_NONE, but the 10274dcc5c2dSMatthew Dillon * loops are set up such that the case(s) are handled implicitly. 10284dcc5c2dSMatthew Dillon * 10294dcc5c2dSMatthew Dillon * The swp_*() calls must be made at splvm(). vm_page_free() does 10304dcc5c2dSMatthew Dillon * not need to be, but it will go a little faster if it is. 10311c7c3c6aSMatthew Dillon */ 10324dcc5c2dSMatthew Dillon s = splvm(); 10331c7c3c6aSMatthew Dillon blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 10341c7c3c6aSMatthew Dillon 10351c7c3c6aSMatthew Dillon for (i = reqpage - 1; i >= 0; --i) { 10361c7c3c6aSMatthew Dillon daddr_t iblk; 10371c7c3c6aSMatthew Dillon 10381c7c3c6aSMatthew Dillon iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0); 10391c7c3c6aSMatthew Dillon if (blk != iblk + (reqpage - i)) 104026f9a767SRodney W. Grimes break; 10414dcc5c2dSMatthew Dillon if ((blk ^ iblk) & dmmax_mask) 10424dcc5c2dSMatthew Dillon break; 104326f9a767SRodney W. Grimes } 10441c7c3c6aSMatthew Dillon ++i; 10451c7c3c6aSMatthew Dillon 10461c7c3c6aSMatthew Dillon for (j = reqpage + 1; j < count; ++j) { 10471c7c3c6aSMatthew Dillon daddr_t jblk; 10481c7c3c6aSMatthew Dillon 10491c7c3c6aSMatthew Dillon jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0); 10501c7c3c6aSMatthew Dillon if (blk != jblk - (j - reqpage)) 10511c7c3c6aSMatthew Dillon break; 10524dcc5c2dSMatthew Dillon if ((blk ^ jblk) & dmmax_mask) 10534dcc5c2dSMatthew Dillon break; 10541c7c3c6aSMatthew Dillon } 10551c7c3c6aSMatthew Dillon 10561c7c3c6aSMatthew Dillon /* 10571c7c3c6aSMatthew Dillon * free pages outside our collection range. Note: we never free 10581c7c3c6aSMatthew Dillon * mreq, it must remain busy throughout. 10591c7c3c6aSMatthew Dillon */ 10601c7c3c6aSMatthew Dillon { 10611c7c3c6aSMatthew Dillon int k; 10621c7c3c6aSMatthew Dillon 10634dcc5c2dSMatthew Dillon for (k = 0; k < i; ++k) 10644dcc5c2dSMatthew Dillon vm_page_free(m[k]); 10654dcc5c2dSMatthew Dillon for (k = j; k < count; ++k) 10661c7c3c6aSMatthew Dillon vm_page_free(m[k]); 10671c7c3c6aSMatthew Dillon } 10684dcc5c2dSMatthew Dillon splx(s); 10694dcc5c2dSMatthew Dillon 10701c7c3c6aSMatthew Dillon 10711c7c3c6aSMatthew Dillon /* 10724dcc5c2dSMatthew Dillon * Return VM_PAGER_FAIL if we have nothing to do. Return mreq 10734dcc5c2dSMatthew Dillon * still busy, but the others unbusied. 10741c7c3c6aSMatthew Dillon */ 10754dcc5c2dSMatthew Dillon if (blk == SWAPBLK_NONE) 107626f9a767SRodney W. Grimes return (VM_PAGER_FAIL); 1077df8bae1dSRodney W. Grimes 107816f62314SDavid Greenman /* 107916f62314SDavid Greenman * Get a swap buffer header to perform the IO 108016f62314SDavid Greenman */ 10811c7c3c6aSMatthew Dillon bp = getpbuf(&nsw_rcount); 108216f62314SDavid Greenman kva = (vm_offset_t) bp->b_data; 108326f9a767SRodney W. Grimes 108416f62314SDavid Greenman /* 108516f62314SDavid Greenman * map our page(s) into kva for input 10861c7c3c6aSMatthew Dillon * 10871c7c3c6aSMatthew Dillon * NOTE: B_PAGING is set by pbgetvp() 108816f62314SDavid Greenman */ 10891c7c3c6aSMatthew Dillon pmap_qenter(kva, m + i, j - i); 10901c7c3c6aSMatthew Dillon 109121144e3bSPoul-Henning Kamp bp->b_iocmd = BIO_READ; 10921c7c3c6aSMatthew Dillon bp->b_iodone = swp_pager_async_iodone; 1093fdcc1cc0SJohn Baldwin bp->b_rcred = crhold(thread0.td_ucred); 1094fdcc1cc0SJohn Baldwin bp->b_wcred = crhold(thread0.td_ucred); 1095a5296b05SJulian Elischer bp->b_data = (caddr_t) kva; 10961c7c3c6aSMatthew Dillon bp->b_blkno = blk - (reqpage - i); 10971c7c3c6aSMatthew Dillon bp->b_bcount = PAGE_SIZE * (j - i); 10981c7c3c6aSMatthew Dillon bp->b_bufsize = PAGE_SIZE * (j - i); 10991c7c3c6aSMatthew Dillon bp->b_pager.pg_reqpage = reqpage - i; 11001c7c3c6aSMatthew Dillon 11011c7c3c6aSMatthew Dillon { 11021c7c3c6aSMatthew Dillon int k; 11031c7c3c6aSMatthew Dillon 11041c7c3c6aSMatthew Dillon for (k = i; k < j; ++k) { 11051c7c3c6aSMatthew Dillon bp->b_pages[k - i] = m[k]; 11061c7c3c6aSMatthew Dillon vm_page_flag_set(m[k], PG_SWAPINPROG); 11071c7c3c6aSMatthew Dillon } 11081c7c3c6aSMatthew Dillon } 11091c7c3c6aSMatthew Dillon bp->b_npages = j - i; 111026f9a767SRodney W. Grimes 11110d94caffSDavid Greenman pbgetvp(swapdev_vp, bp); 1112df8bae1dSRodney W. Grimes 1113976e77fcSDavid Greenman cnt.v_swapin++; 11141c7c3c6aSMatthew Dillon cnt.v_swappgsin += bp->b_npages; 11151c7c3c6aSMatthew Dillon 1116df8bae1dSRodney W. Grimes /* 11171c7c3c6aSMatthew Dillon * We still hold the lock on mreq, and our automatic completion routine 11181c7c3c6aSMatthew Dillon * does not remove it. 1119df8bae1dSRodney W. Grimes */ 11201c7c3c6aSMatthew Dillon vm_object_pip_add(mreq->object, bp->b_npages); 11211c7c3c6aSMatthew Dillon lastpindex = m[j-1]->pindex; 11221c7c3c6aSMatthew Dillon 11231c7c3c6aSMatthew Dillon /* 11241c7c3c6aSMatthew Dillon * perform the I/O. NOTE!!! bp cannot be considered valid after 11251c7c3c6aSMatthew Dillon * this point because we automatically release it on completion. 11261c7c3c6aSMatthew Dillon * Instead, we look at the one page we are interested in which we 11271c7c3c6aSMatthew Dillon * still hold a lock on even through the I/O completion. 11281c7c3c6aSMatthew Dillon * 11291c7c3c6aSMatthew Dillon * The other pages in our m[] array are also released on completion, 11301c7c3c6aSMatthew Dillon * so we cannot assume they are valid anymore either. 11311c7c3c6aSMatthew Dillon * 1132ea3aecf5SPeter Wemm * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 11331c7c3c6aSMatthew Dillon */ 1134b890cb2cSPeter Wemm BUF_KERNPROC(bp); 1135b99c307aSPoul-Henning Kamp BUF_STRATEGY(bp); 113626f9a767SRodney W. Grimes 113726f9a767SRodney W. Grimes /* 11381c7c3c6aSMatthew Dillon * wait for the page we want to complete. PG_SWAPINPROG is always 11391c7c3c6aSMatthew Dillon * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 11401c7c3c6aSMatthew Dillon * is set in the meta-data. 114126f9a767SRodney W. Grimes */ 11421c7c3c6aSMatthew Dillon s = splvm(); 11431c7c3c6aSMatthew Dillon while ((mreq->flags & PG_SWAPINPROG) != 0) { 11441c7c3c6aSMatthew Dillon vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED); 11451c7c3c6aSMatthew Dillon cnt.v_intrans++; 11460cddd8f0SMatthew Dillon if (tsleep(mreq, PSWP, "swread", hz*20)) { 1147ac1e407bSBruce Evans printf( 11481c7c3c6aSMatthew Dillon "swap_pager: indefinite wait buffer: device:" 1149af647ddeSBruce Evans " %s, blkno: %ld, size: %ld\n", 1150af647ddeSBruce Evans devtoname(bp->b_dev), (long)bp->b_blkno, 1151af647ddeSBruce Evans bp->b_bcount 11521c7c3c6aSMatthew Dillon ); 11531c7c3c6aSMatthew Dillon } 11541b119d9dSDavid Greenman } 1155df8bae1dSRodney W. Grimes splx(s); 115626f9a767SRodney W. Grimes 115726f9a767SRodney W. Grimes /* 1158a1287949SEivind Eklund * mreq is left busied after completion, but all the other pages 11591c7c3c6aSMatthew Dillon * are freed. If we had an unrecoverable read error the page will 11601c7c3c6aSMatthew Dillon * not be valid. 116126f9a767SRodney W. Grimes */ 11621c7c3c6aSMatthew Dillon if (mreq->valid != VM_PAGE_BITS_ALL) { 11631c7c3c6aSMatthew Dillon return (VM_PAGER_ERROR); 116426f9a767SRodney W. Grimes } else { 11651c7c3c6aSMatthew Dillon return (VM_PAGER_OK); 116626f9a767SRodney W. Grimes } 11671c7c3c6aSMatthew Dillon 11681c7c3c6aSMatthew Dillon /* 11691c7c3c6aSMatthew Dillon * A final note: in a low swap situation, we cannot deallocate swap 11701c7c3c6aSMatthew Dillon * and mark a page dirty here because the caller is likely to mark 11711c7c3c6aSMatthew Dillon * the page clean when we return, causing the page to possibly revert 11721c7c3c6aSMatthew Dillon * to all-zero's later. 11731c7c3c6aSMatthew Dillon */ 1174df8bae1dSRodney W. Grimes } 1175df8bae1dSRodney W. Grimes 11761c7c3c6aSMatthew Dillon /* 11771c7c3c6aSMatthew Dillon * swap_pager_putpages: 11781c7c3c6aSMatthew Dillon * 11791c7c3c6aSMatthew Dillon * Assign swap (if necessary) and initiate I/O on the specified pages. 11801c7c3c6aSMatthew Dillon * 11811c7c3c6aSMatthew Dillon * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 11821c7c3c6aSMatthew Dillon * are automatically converted to SWAP objects. 11831c7c3c6aSMatthew Dillon * 1184ea3aecf5SPeter Wemm * In a low memory situation we may block in VOP_STRATEGY(), but the new 11851c7c3c6aSMatthew Dillon * vm_page reservation system coupled with properly written VFS devices 11861c7c3c6aSMatthew Dillon * should ensure that no low-memory deadlock occurs. This is an area 11871c7c3c6aSMatthew Dillon * which needs work. 11881c7c3c6aSMatthew Dillon * 11891c7c3c6aSMatthew Dillon * The parent has N vm_object_pip_add() references prior to 11901c7c3c6aSMatthew Dillon * calling us and will remove references for rtvals[] that are 11911c7c3c6aSMatthew Dillon * not set to VM_PAGER_PEND. We need to remove the rest on I/O 11921c7c3c6aSMatthew Dillon * completion. 11931c7c3c6aSMatthew Dillon * 11941c7c3c6aSMatthew Dillon * The parent has soft-busy'd the pages it passes us and will unbusy 11951c7c3c6aSMatthew Dillon * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 11961c7c3c6aSMatthew Dillon * We need to unbusy the rest on I/O completion. 11971c7c3c6aSMatthew Dillon */ 1198e4542174SMatthew Dillon void 119924a1cce3SDavid Greenman swap_pager_putpages(object, m, count, sync, rtvals) 120024a1cce3SDavid Greenman vm_object_t object; 120126f9a767SRodney W. Grimes vm_page_t *m; 120226f9a767SRodney W. Grimes int count; 120324a1cce3SDavid Greenman boolean_t sync; 120426f9a767SRodney W. Grimes int *rtvals; 1205df8bae1dSRodney W. Grimes { 12061c7c3c6aSMatthew Dillon int i; 12071c7c3c6aSMatthew Dillon int n = 0; 1208df8bae1dSRodney W. Grimes 12090cddd8f0SMatthew Dillon GIANT_REQUIRED; 12101c7c3c6aSMatthew Dillon if (count && m[0]->object != object) { 12111c7c3c6aSMatthew Dillon panic("swap_pager_getpages: object mismatch %p/%p", 12121c7c3c6aSMatthew Dillon object, 12131c7c3c6aSMatthew Dillon m[0]->object 12141c7c3c6aSMatthew Dillon ); 12151c7c3c6aSMatthew Dillon } 12161c7c3c6aSMatthew Dillon /* 12171c7c3c6aSMatthew Dillon * Step 1 12181c7c3c6aSMatthew Dillon * 12191c7c3c6aSMatthew Dillon * Turn object into OBJT_SWAP 12201c7c3c6aSMatthew Dillon * check for bogus sysops 12211c7c3c6aSMatthew Dillon * force sync if not pageout process 12221c7c3c6aSMatthew Dillon */ 12234dcc5c2dSMatthew Dillon if (object->type != OBJT_SWAP) 12244dcc5c2dSMatthew Dillon swp_pager_meta_build(object, 0, SWAPBLK_NONE); 1225e47ed70bSJohn Dyson 1226e47ed70bSJohn Dyson if (curproc != pageproc) 1227e47ed70bSJohn Dyson sync = TRUE; 122826f9a767SRodney W. Grimes 12291c7c3c6aSMatthew Dillon /* 12301c7c3c6aSMatthew Dillon * Step 2 12311c7c3c6aSMatthew Dillon * 1232ad3cce20SMatthew Dillon * Update nsw parameters from swap_async_max sysctl values. 1233ad3cce20SMatthew Dillon * Do not let the sysop crash the machine with bogus numbers. 1234327f4e83SMatthew Dillon */ 12356d541bf1SJohn Baldwin mtx_lock(&pbuf_mtx); 1236327f4e83SMatthew Dillon if (swap_async_max != nsw_wcount_async_max) { 1237327f4e83SMatthew Dillon int n; 1238327f4e83SMatthew Dillon int s; 1239327f4e83SMatthew Dillon 1240327f4e83SMatthew Dillon /* 1241327f4e83SMatthew Dillon * limit range 1242327f4e83SMatthew Dillon */ 1243327f4e83SMatthew Dillon if ((n = swap_async_max) > nswbuf / 2) 1244327f4e83SMatthew Dillon n = nswbuf / 2; 1245327f4e83SMatthew Dillon if (n < 1) 1246327f4e83SMatthew Dillon n = 1; 1247327f4e83SMatthew Dillon swap_async_max = n; 1248327f4e83SMatthew Dillon 1249327f4e83SMatthew Dillon /* 1250327f4e83SMatthew Dillon * Adjust difference ( if possible ). If the current async 1251327f4e83SMatthew Dillon * count is too low, we may not be able to make the adjustment 1252327f4e83SMatthew Dillon * at this time. 1253327f4e83SMatthew Dillon */ 1254327f4e83SMatthew Dillon s = splvm(); 1255327f4e83SMatthew Dillon n -= nsw_wcount_async_max; 1256327f4e83SMatthew Dillon if (nsw_wcount_async + n >= 0) { 1257327f4e83SMatthew Dillon nsw_wcount_async += n; 1258327f4e83SMatthew Dillon nsw_wcount_async_max += n; 1259327f4e83SMatthew Dillon wakeup(&nsw_wcount_async); 1260327f4e83SMatthew Dillon } 1261327f4e83SMatthew Dillon splx(s); 1262327f4e83SMatthew Dillon } 12636d541bf1SJohn Baldwin mtx_unlock(&pbuf_mtx); 1264327f4e83SMatthew Dillon 1265327f4e83SMatthew Dillon /* 1266327f4e83SMatthew Dillon * Step 3 1267327f4e83SMatthew Dillon * 12681c7c3c6aSMatthew Dillon * Assign swap blocks and issue I/O. We reallocate swap on the fly. 12691c7c3c6aSMatthew Dillon * The page is left dirty until the pageout operation completes 12701c7c3c6aSMatthew Dillon * successfully. 12711c7c3c6aSMatthew Dillon */ 12721c7c3c6aSMatthew Dillon for (i = 0; i < count; i += n) { 12731c7c3c6aSMatthew Dillon int s; 12741c7c3c6aSMatthew Dillon int j; 12751c7c3c6aSMatthew Dillon struct buf *bp; 1276a316d390SJohn Dyson daddr_t blk; 127726f9a767SRodney W. Grimes 1278df8bae1dSRodney W. Grimes /* 12791c7c3c6aSMatthew Dillon * Maximum I/O size is limited by a number of factors. 1280df8bae1dSRodney W. Grimes */ 12811c7c3c6aSMatthew Dillon n = min(BLIST_MAX_ALLOC, count - i); 1282327f4e83SMatthew Dillon n = min(n, nsw_cluster_max); 12831c7c3c6aSMatthew Dillon 12844dcc5c2dSMatthew Dillon s = splvm(); 12854dcc5c2dSMatthew Dillon 128626f9a767SRodney W. Grimes /* 12871c7c3c6aSMatthew Dillon * Get biggest block of swap we can. If we fail, fall 12881c7c3c6aSMatthew Dillon * back and try to allocate a smaller block. Don't go 12891c7c3c6aSMatthew Dillon * overboard trying to allocate space if it would overly 12901c7c3c6aSMatthew Dillon * fragment swap. 129126f9a767SRodney W. Grimes */ 12921c7c3c6aSMatthew Dillon while ( 12931c7c3c6aSMatthew Dillon (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE && 12941c7c3c6aSMatthew Dillon n > 4 12951c7c3c6aSMatthew Dillon ) { 12961c7c3c6aSMatthew Dillon n >>= 1; 129726f9a767SRodney W. Grimes } 12981c7c3c6aSMatthew Dillon if (blk == SWAPBLK_NONE) { 12994dcc5c2dSMatthew Dillon for (j = 0; j < n; ++j) 13001c7c3c6aSMatthew Dillon rtvals[i+j] = VM_PAGER_FAIL; 13014dcc5c2dSMatthew Dillon splx(s); 13021c7c3c6aSMatthew Dillon continue; 130326f9a767SRodney W. Grimes } 130426f9a767SRodney W. Grimes 130526f9a767SRodney W. Grimes /* 13064dcc5c2dSMatthew Dillon * The I/O we are constructing cannot cross a physical 13074dcc5c2dSMatthew Dillon * disk boundry in the swap stripe. Note: we are still 13084dcc5c2dSMatthew Dillon * at splvm(). 130926f9a767SRodney W. Grimes */ 13101c7c3c6aSMatthew Dillon if ((blk ^ (blk + n)) & dmmax_mask) { 13111c7c3c6aSMatthew Dillon j = ((blk + dmmax) & dmmax_mask) - blk; 13121c7c3c6aSMatthew Dillon swp_pager_freeswapspace(blk + j, n - j); 13131c7c3c6aSMatthew Dillon n = j; 1314e47ed70bSJohn Dyson } 131526f9a767SRodney W. Grimes 131626f9a767SRodney W. Grimes /* 13171c7c3c6aSMatthew Dillon * All I/O parameters have been satisfied, build the I/O 13181c7c3c6aSMatthew Dillon * request and assign the swap space. 13191c7c3c6aSMatthew Dillon * 13201c7c3c6aSMatthew Dillon * NOTE: B_PAGING is set by pbgetvp() 132126f9a767SRodney W. Grimes */ 1322327f4e83SMatthew Dillon if (sync == TRUE) { 1323327f4e83SMatthew Dillon bp = getpbuf(&nsw_wcount_sync); 1324327f4e83SMatthew Dillon } else { 1325327f4e83SMatthew Dillon bp = getpbuf(&nsw_wcount_async); 132621144e3bSPoul-Henning Kamp bp->b_flags = B_ASYNC; 1327327f4e83SMatthew Dillon } 1328912e4ae9SPoul-Henning Kamp bp->b_iocmd = BIO_WRITE; 13291c7c3c6aSMatthew Dillon bp->b_spc = NULL; /* not used, but NULL-out anyway */ 133026f9a767SRodney W. Grimes 13311c7c3c6aSMatthew Dillon pmap_qenter((vm_offset_t)bp->b_data, &m[i], n); 13321c7c3c6aSMatthew Dillon 1333fdcc1cc0SJohn Baldwin bp->b_rcred = crhold(thread0.td_ucred); 1334fdcc1cc0SJohn Baldwin bp->b_wcred = crhold(thread0.td_ucred); 13351c7c3c6aSMatthew Dillon bp->b_bcount = PAGE_SIZE * n; 13361c7c3c6aSMatthew Dillon bp->b_bufsize = PAGE_SIZE * n; 13371c7c3c6aSMatthew Dillon bp->b_blkno = blk; 1338e47ed70bSJohn Dyson 1339a5296b05SJulian Elischer pbgetvp(swapdev_vp, bp); 1340a5296b05SJulian Elischer 13411c7c3c6aSMatthew Dillon for (j = 0; j < n; ++j) { 13421c7c3c6aSMatthew Dillon vm_page_t mreq = m[i+j]; 13431c7c3c6aSMatthew Dillon 13441c7c3c6aSMatthew Dillon swp_pager_meta_build( 13451c7c3c6aSMatthew Dillon mreq->object, 13461c7c3c6aSMatthew Dillon mreq->pindex, 13474dcc5c2dSMatthew Dillon blk + j 13481c7c3c6aSMatthew Dillon ); 13497dbf82dcSMatthew Dillon vm_page_dirty(mreq); 13501c7c3c6aSMatthew Dillon rtvals[i+j] = VM_PAGER_OK; 13511c7c3c6aSMatthew Dillon 13521c7c3c6aSMatthew Dillon vm_page_flag_set(mreq, PG_SWAPINPROG); 13531c7c3c6aSMatthew Dillon bp->b_pages[j] = mreq; 13541c7c3c6aSMatthew Dillon } 13551c7c3c6aSMatthew Dillon bp->b_npages = n; 1356a5296b05SJulian Elischer /* 1357a5296b05SJulian Elischer * Must set dirty range for NFS to work. 1358a5296b05SJulian Elischer */ 1359a5296b05SJulian Elischer bp->b_dirtyoff = 0; 1360a5296b05SJulian Elischer bp->b_dirtyend = bp->b_bcount; 13611c7c3c6aSMatthew Dillon 13621c7c3c6aSMatthew Dillon cnt.v_swapout++; 13631c7c3c6aSMatthew Dillon cnt.v_swappgsout += bp->b_npages; 136426f9a767SRodney W. Grimes swapdev_vp->v_numoutput++; 136526f9a767SRodney W. Grimes 13664dcc5c2dSMatthew Dillon splx(s); 13674dcc5c2dSMatthew Dillon 136826f9a767SRodney W. Grimes /* 13691c7c3c6aSMatthew Dillon * asynchronous 13701c7c3c6aSMatthew Dillon * 1371ea3aecf5SPeter Wemm * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 137226f9a767SRodney W. Grimes */ 13731c7c3c6aSMatthew Dillon if (sync == FALSE) { 13741c7c3c6aSMatthew Dillon bp->b_iodone = swp_pager_async_iodone; 137567812eacSKirk McKusick BUF_KERNPROC(bp); 1376b99c307aSPoul-Henning Kamp BUF_STRATEGY(bp); 13771c7c3c6aSMatthew Dillon 13781c7c3c6aSMatthew Dillon for (j = 0; j < n; ++j) 13791c7c3c6aSMatthew Dillon rtvals[i+j] = VM_PAGER_PEND; 138023955314SAlfred Perlstein /* restart outter loop */ 13811c7c3c6aSMatthew Dillon continue; 138226f9a767SRodney W. Grimes } 1383e47ed70bSJohn Dyson 138426f9a767SRodney W. Grimes /* 13851c7c3c6aSMatthew Dillon * synchronous 13861c7c3c6aSMatthew Dillon * 1387ea3aecf5SPeter Wemm * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 13881c7c3c6aSMatthew Dillon */ 13891c7c3c6aSMatthew Dillon bp->b_iodone = swp_pager_sync_iodone; 1390b99c307aSPoul-Henning Kamp BUF_STRATEGY(bp); 13911c7c3c6aSMatthew Dillon 13921c7c3c6aSMatthew Dillon /* 13931c7c3c6aSMatthew Dillon * Wait for the sync I/O to complete, then update rtvals. 13941c7c3c6aSMatthew Dillon * We just set the rtvals[] to VM_PAGER_PEND so we can call 13951c7c3c6aSMatthew Dillon * our async completion routine at the end, thus avoiding a 13961c7c3c6aSMatthew Dillon * double-free. 139726f9a767SRodney W. Grimes */ 13984dcc5c2dSMatthew Dillon s = splbio(); 139926f9a767SRodney W. Grimes while ((bp->b_flags & B_DONE) == 0) { 140024a1cce3SDavid Greenman tsleep(bp, PVM, "swwrt", 0); 140126f9a767SRodney W. Grimes } 14021c7c3c6aSMatthew Dillon for (j = 0; j < n; ++j) 14031c7c3c6aSMatthew Dillon rtvals[i+j] = VM_PAGER_PEND; 14041c7c3c6aSMatthew Dillon /* 14051c7c3c6aSMatthew Dillon * Now that we are through with the bp, we can call the 14061c7c3c6aSMatthew Dillon * normal async completion, which frees everything up. 14071c7c3c6aSMatthew Dillon */ 14081c7c3c6aSMatthew Dillon swp_pager_async_iodone(bp); 140926f9a767SRodney W. Grimes splx(s); 14101c7c3c6aSMatthew Dillon } 14111c7c3c6aSMatthew Dillon } 14121c7c3c6aSMatthew Dillon 14131c7c3c6aSMatthew Dillon /* 14141c7c3c6aSMatthew Dillon * swap_pager_sync_iodone: 14151c7c3c6aSMatthew Dillon * 14161c7c3c6aSMatthew Dillon * Completion routine for synchronous reads and writes from/to swap. 14171c7c3c6aSMatthew Dillon * We just mark the bp is complete and wake up anyone waiting on it. 14181c7c3c6aSMatthew Dillon * 14194dcc5c2dSMatthew Dillon * This routine may not block. This routine is called at splbio() or better. 14201c7c3c6aSMatthew Dillon */ 14211c7c3c6aSMatthew Dillon static void 14221c7c3c6aSMatthew Dillon swp_pager_sync_iodone(bp) 14231c7c3c6aSMatthew Dillon struct buf *bp; 14241c7c3c6aSMatthew Dillon { 14251c7c3c6aSMatthew Dillon bp->b_flags |= B_DONE; 14261c7c3c6aSMatthew Dillon bp->b_flags &= ~B_ASYNC; 14271c7c3c6aSMatthew Dillon wakeup(bp); 14281c7c3c6aSMatthew Dillon } 14291c7c3c6aSMatthew Dillon 14301c7c3c6aSMatthew Dillon /* 14311c7c3c6aSMatthew Dillon * swp_pager_async_iodone: 14321c7c3c6aSMatthew Dillon * 14331c7c3c6aSMatthew Dillon * Completion routine for asynchronous reads and writes from/to swap. 14341c7c3c6aSMatthew Dillon * Also called manually by synchronous code to finish up a bp. 14351c7c3c6aSMatthew Dillon * 14361c7c3c6aSMatthew Dillon * For READ operations, the pages are PG_BUSY'd. For WRITE operations, 14371c7c3c6aSMatthew Dillon * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY 14381c7c3c6aSMatthew Dillon * unbusy all pages except the 'main' request page. For WRITE 14391c7c3c6aSMatthew Dillon * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 14401c7c3c6aSMatthew Dillon * because we marked them all VM_PAGER_PEND on return from putpages ). 14411c7c3c6aSMatthew Dillon * 14421c7c3c6aSMatthew Dillon * This routine may not block. 14434dcc5c2dSMatthew Dillon * This routine is called at splbio() or better 14444dcc5c2dSMatthew Dillon * 14454dcc5c2dSMatthew Dillon * We up ourselves to splvm() as required for various vm_page related 14464dcc5c2dSMatthew Dillon * calls. 14471c7c3c6aSMatthew Dillon */ 14481c7c3c6aSMatthew Dillon static void 14491c7c3c6aSMatthew Dillon swp_pager_async_iodone(bp) 145054d92145SMatthew Dillon struct buf *bp; 14511c7c3c6aSMatthew Dillon { 14521c7c3c6aSMatthew Dillon int s; 14531c7c3c6aSMatthew Dillon int i; 14541c7c3c6aSMatthew Dillon vm_object_t object = NULL; 14551c7c3c6aSMatthew Dillon 14560cddd8f0SMatthew Dillon GIANT_REQUIRED; 14571c7c3c6aSMatthew Dillon bp->b_flags |= B_DONE; 14581c7c3c6aSMatthew Dillon 14591c7c3c6aSMatthew Dillon /* 14601c7c3c6aSMatthew Dillon * report error 14611c7c3c6aSMatthew Dillon */ 1462c244d2deSPoul-Henning Kamp if (bp->b_ioflags & BIO_ERROR) { 14631c7c3c6aSMatthew Dillon printf( 14641c7c3c6aSMatthew Dillon "swap_pager: I/O error - %s failed; blkno %ld," 14651c7c3c6aSMatthew Dillon "size %ld, error %d\n", 146621144e3bSPoul-Henning Kamp ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"), 14671c7c3c6aSMatthew Dillon (long)bp->b_blkno, 14681c7c3c6aSMatthew Dillon (long)bp->b_bcount, 14691c7c3c6aSMatthew Dillon bp->b_error 14701c7c3c6aSMatthew Dillon ); 14711c7c3c6aSMatthew Dillon } 14721c7c3c6aSMatthew Dillon 14731c7c3c6aSMatthew Dillon /* 14744dcc5c2dSMatthew Dillon * set object, raise to splvm(). 14751c7c3c6aSMatthew Dillon */ 14761c7c3c6aSMatthew Dillon if (bp->b_npages) 14771c7c3c6aSMatthew Dillon object = bp->b_pages[0]->object; 14784dcc5c2dSMatthew Dillon s = splvm(); 147926f9a767SRodney W. Grimes 148026f9a767SRodney W. Grimes /* 148126f9a767SRodney W. Grimes * remove the mapping for kernel virtual 148226f9a767SRodney W. Grimes */ 14831c7c3c6aSMatthew Dillon pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); 148426f9a767SRodney W. Grimes 148526f9a767SRodney W. Grimes /* 14861c7c3c6aSMatthew Dillon * cleanup pages. If an error occurs writing to swap, we are in 14871c7c3c6aSMatthew Dillon * very serious trouble. If it happens to be a disk error, though, 14881c7c3c6aSMatthew Dillon * we may be able to recover by reassigning the swap later on. So 14891c7c3c6aSMatthew Dillon * in this case we remove the m->swapblk assignment for the page 14901c7c3c6aSMatthew Dillon * but do not free it in the rlist. The errornous block(s) are thus 14911c7c3c6aSMatthew Dillon * never reallocated as swap. Redirty the page and continue. 149226f9a767SRodney W. Grimes */ 14931c7c3c6aSMatthew Dillon for (i = 0; i < bp->b_npages; ++i) { 14941c7c3c6aSMatthew Dillon vm_page_t m = bp->b_pages[i]; 1495e47ed70bSJohn Dyson 14961c7c3c6aSMatthew Dillon vm_page_flag_clear(m, PG_SWAPINPROG); 1497e47ed70bSJohn Dyson 1498c244d2deSPoul-Henning Kamp if (bp->b_ioflags & BIO_ERROR) { 1499ffc82b0aSJohn Dyson /* 15001c7c3c6aSMatthew Dillon * If an error occurs I'd love to throw the swapblk 15011c7c3c6aSMatthew Dillon * away without freeing it back to swapspace, so it 15021c7c3c6aSMatthew Dillon * can never be used again. But I can't from an 15031c7c3c6aSMatthew Dillon * interrupt. 1504ffc82b0aSJohn Dyson */ 150521144e3bSPoul-Henning Kamp if (bp->b_iocmd == BIO_READ) { 15061c7c3c6aSMatthew Dillon /* 15071c7c3c6aSMatthew Dillon * When reading, reqpage needs to stay 15081c7c3c6aSMatthew Dillon * locked for the parent, but all other 15091c7c3c6aSMatthew Dillon * pages can be freed. We still want to 15101c7c3c6aSMatthew Dillon * wakeup the parent waiting on the page, 15111c7c3c6aSMatthew Dillon * though. ( also: pg_reqpage can be -1 and 15121c7c3c6aSMatthew Dillon * not match anything ). 15131c7c3c6aSMatthew Dillon * 15141c7c3c6aSMatthew Dillon * We have to wake specifically requested pages 15151c7c3c6aSMatthew Dillon * up too because we cleared PG_SWAPINPROG and 15161c7c3c6aSMatthew Dillon * someone may be waiting for that. 15171c7c3c6aSMatthew Dillon * 15181c7c3c6aSMatthew Dillon * NOTE: for reads, m->dirty will probably 1519956f3135SPhilippe Charnier * be overridden by the original caller of 15201c7c3c6aSMatthew Dillon * getpages so don't play cute tricks here. 15211c7c3c6aSMatthew Dillon * 1522279d7226SMatthew Dillon * XXX IT IS NOT LEGAL TO FREE THE PAGE HERE 1523279d7226SMatthew Dillon * AS THIS MESSES WITH object->memq, and it is 1524279d7226SMatthew Dillon * not legal to mess with object->memq from an 1525279d7226SMatthew Dillon * interrupt. 15261c7c3c6aSMatthew Dillon */ 15271c7c3c6aSMatthew Dillon m->valid = 0; 15281c7c3c6aSMatthew Dillon vm_page_flag_clear(m, PG_ZERO); 15291c7c3c6aSMatthew Dillon if (i != bp->b_pager.pg_reqpage) 15301c7c3c6aSMatthew Dillon vm_page_free(m); 15311c7c3c6aSMatthew Dillon else 15321c7c3c6aSMatthew Dillon vm_page_flash(m); 15331c7c3c6aSMatthew Dillon /* 15341c7c3c6aSMatthew Dillon * If i == bp->b_pager.pg_reqpage, do not wake 15351c7c3c6aSMatthew Dillon * the page up. The caller needs to. 15361c7c3c6aSMatthew Dillon */ 15371c7c3c6aSMatthew Dillon } else { 15381c7c3c6aSMatthew Dillon /* 15391c7c3c6aSMatthew Dillon * If a write error occurs, reactivate page 15401c7c3c6aSMatthew Dillon * so it doesn't clog the inactive list, 15411c7c3c6aSMatthew Dillon * then finish the I/O. 15421c7c3c6aSMatthew Dillon */ 15437dbf82dcSMatthew Dillon vm_page_dirty(m); 15441c7c3c6aSMatthew Dillon vm_page_activate(m); 15451c7c3c6aSMatthew Dillon vm_page_io_finish(m); 15461c7c3c6aSMatthew Dillon } 154721144e3bSPoul-Henning Kamp } else if (bp->b_iocmd == BIO_READ) { 15481c7c3c6aSMatthew Dillon /* 15491c7c3c6aSMatthew Dillon * For read success, clear dirty bits. Nobody should 15501c7c3c6aSMatthew Dillon * have this page mapped but don't take any chances, 15511c7c3c6aSMatthew Dillon * make sure the pmap modify bits are also cleared. 15521c7c3c6aSMatthew Dillon * 15531c7c3c6aSMatthew Dillon * NOTE: for reads, m->dirty will probably be 1554956f3135SPhilippe Charnier * overridden by the original caller of getpages so 15551c7c3c6aSMatthew Dillon * we cannot set them in order to free the underlying 15561c7c3c6aSMatthew Dillon * swap in a low-swap situation. I don't think we'd 15571c7c3c6aSMatthew Dillon * want to do that anyway, but it was an optimization 15581c7c3c6aSMatthew Dillon * that existed in the old swapper for a time before 15591c7c3c6aSMatthew Dillon * it got ripped out due to precisely this problem. 15601c7c3c6aSMatthew Dillon * 15611c7c3c6aSMatthew Dillon * clear PG_ZERO in page. 15621c7c3c6aSMatthew Dillon * 15631c7c3c6aSMatthew Dillon * If not the requested page then deactivate it. 15641c7c3c6aSMatthew Dillon * 15651c7c3c6aSMatthew Dillon * Note that the requested page, reqpage, is left 15661c7c3c6aSMatthew Dillon * busied, but we still have to wake it up. The 15671c7c3c6aSMatthew Dillon * other pages are released (unbusied) by 15681c7c3c6aSMatthew Dillon * vm_page_wakeup(). We do not set reqpage's 15691c7c3c6aSMatthew Dillon * valid bits here, it is up to the caller. 15701c7c3c6aSMatthew Dillon */ 15710385347cSPeter Wemm pmap_clear_modify(m); 15721c7c3c6aSMatthew Dillon m->valid = VM_PAGE_BITS_ALL; 15732c28a105SAlan Cox vm_page_undirty(m); 15741c7c3c6aSMatthew Dillon vm_page_flag_clear(m, PG_ZERO); 15751c7c3c6aSMatthew Dillon 15761c7c3c6aSMatthew Dillon /* 15771c7c3c6aSMatthew Dillon * We have to wake specifically requested pages 15781c7c3c6aSMatthew Dillon * up too because we cleared PG_SWAPINPROG and 15791c7c3c6aSMatthew Dillon * could be waiting for it in getpages. However, 15801c7c3c6aSMatthew Dillon * be sure to not unbusy getpages specifically 15811c7c3c6aSMatthew Dillon * requested page - getpages expects it to be 15821c7c3c6aSMatthew Dillon * left busy. 15831c7c3c6aSMatthew Dillon */ 15841c7c3c6aSMatthew Dillon if (i != bp->b_pager.pg_reqpage) { 15851c7c3c6aSMatthew Dillon vm_page_deactivate(m); 15861c7c3c6aSMatthew Dillon vm_page_wakeup(m); 15871c7c3c6aSMatthew Dillon } else { 15881c7c3c6aSMatthew Dillon vm_page_flash(m); 15891c7c3c6aSMatthew Dillon } 15901c7c3c6aSMatthew Dillon } else { 15911c7c3c6aSMatthew Dillon /* 15921c7c3c6aSMatthew Dillon * For write success, clear the modify and dirty 15931c7c3c6aSMatthew Dillon * status, then finish the I/O ( which decrements the 15941c7c3c6aSMatthew Dillon * busy count and possibly wakes waiter's up ). 15951c7c3c6aSMatthew Dillon */ 15960385347cSPeter Wemm pmap_clear_modify(m); 1597c52e7044SAlan Cox vm_page_undirty(m); 15981c7c3c6aSMatthew Dillon vm_page_io_finish(m); 1599936524aaSMatthew Dillon if (!vm_page_count_severe() || !vm_page_try_to_cache(m)) 1600936524aaSMatthew Dillon vm_page_protect(m, VM_PROT_READ); 1601ffc82b0aSJohn Dyson } 1602df8bae1dSRodney W. Grimes } 160326f9a767SRodney W. Grimes 16041c7c3c6aSMatthew Dillon /* 16051c7c3c6aSMatthew Dillon * adjust pip. NOTE: the original parent may still have its own 16061c7c3c6aSMatthew Dillon * pip refs on the object. 16071c7c3c6aSMatthew Dillon */ 16081c7c3c6aSMatthew Dillon if (object) 16091c7c3c6aSMatthew Dillon vm_object_pip_wakeupn(object, bp->b_npages); 161026f9a767SRodney W. Grimes 16111c7c3c6aSMatthew Dillon /* 16121c7c3c6aSMatthew Dillon * release the physical I/O buffer 16131c7c3c6aSMatthew Dillon */ 1614327f4e83SMatthew Dillon relpbuf( 1615327f4e83SMatthew Dillon bp, 161621144e3bSPoul-Henning Kamp ((bp->b_iocmd == BIO_READ) ? &nsw_rcount : 1617327f4e83SMatthew Dillon ((bp->b_flags & B_ASYNC) ? 1618327f4e83SMatthew Dillon &nsw_wcount_async : 1619327f4e83SMatthew Dillon &nsw_wcount_sync 1620327f4e83SMatthew Dillon ) 1621327f4e83SMatthew Dillon ) 1622327f4e83SMatthew Dillon ); 162326f9a767SRodney W. Grimes splx(s); 162426f9a767SRodney W. Grimes } 16251c7c3c6aSMatthew Dillon 16261c7c3c6aSMatthew Dillon /************************************************************************ 16271c7c3c6aSMatthew Dillon * SWAP META DATA * 16281c7c3c6aSMatthew Dillon ************************************************************************ 16291c7c3c6aSMatthew Dillon * 16301c7c3c6aSMatthew Dillon * These routines manipulate the swap metadata stored in the 16314dcc5c2dSMatthew Dillon * OBJT_SWAP object. All swp_*() routines must be called at 16324dcc5c2dSMatthew Dillon * splvm() because swap can be freed up by the low level vm_page 16334dcc5c2dSMatthew Dillon * code which might be called from interrupts beyond what splbio() covers. 16341c7c3c6aSMatthew Dillon * 16354dcc5c2dSMatthew Dillon * Swap metadata is implemented with a global hash and not directly 16364dcc5c2dSMatthew Dillon * linked into the object. Instead the object simply contains 16374dcc5c2dSMatthew Dillon * appropriate tracking counters. 16381c7c3c6aSMatthew Dillon */ 16391c7c3c6aSMatthew Dillon 16401c7c3c6aSMatthew Dillon /* 16411c7c3c6aSMatthew Dillon * SWP_PAGER_HASH() - hash swap meta data 16421c7c3c6aSMatthew Dillon * 16434dcc5c2dSMatthew Dillon * This is an inline helper function which hashes the swapblk given 16441c7c3c6aSMatthew Dillon * the object and page index. It returns a pointer to a pointer 16451c7c3c6aSMatthew Dillon * to the object, or a pointer to a NULL pointer if it could not 16461c7c3c6aSMatthew Dillon * find a swapblk. 16474dcc5c2dSMatthew Dillon * 16484dcc5c2dSMatthew Dillon * This routine must be called at splvm(). 16491c7c3c6aSMatthew Dillon */ 16501c7c3c6aSMatthew Dillon static __inline struct swblock ** 16514dcc5c2dSMatthew Dillon swp_pager_hash(vm_object_t object, vm_pindex_t index) 16521c7c3c6aSMatthew Dillon { 16531c7c3c6aSMatthew Dillon struct swblock **pswap; 16541c7c3c6aSMatthew Dillon struct swblock *swap; 16551c7c3c6aSMatthew Dillon 16561c7c3c6aSMatthew Dillon index &= ~SWAP_META_MASK; 1657af647ddeSBruce Evans pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask]; 16581c7c3c6aSMatthew Dillon while ((swap = *pswap) != NULL) { 16591c7c3c6aSMatthew Dillon if (swap->swb_object == object && 16601c7c3c6aSMatthew Dillon swap->swb_index == index 16611c7c3c6aSMatthew Dillon ) { 16621c7c3c6aSMatthew Dillon break; 16631c7c3c6aSMatthew Dillon } 16641c7c3c6aSMatthew Dillon pswap = &swap->swb_hnext; 16651c7c3c6aSMatthew Dillon } 16661c7c3c6aSMatthew Dillon return (pswap); 16671c7c3c6aSMatthew Dillon } 16681c7c3c6aSMatthew Dillon 16691c7c3c6aSMatthew Dillon /* 16701c7c3c6aSMatthew Dillon * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 16711c7c3c6aSMatthew Dillon * 16721c7c3c6aSMatthew Dillon * We first convert the object to a swap object if it is a default 16731c7c3c6aSMatthew Dillon * object. 16741c7c3c6aSMatthew Dillon * 16751c7c3c6aSMatthew Dillon * The specified swapblk is added to the object's swap metadata. If 16761c7c3c6aSMatthew Dillon * the swapblk is not valid, it is freed instead. Any previously 16771c7c3c6aSMatthew Dillon * assigned swapblk is freed. 16784dcc5c2dSMatthew Dillon * 16794dcc5c2dSMatthew Dillon * This routine must be called at splvm(), except when used to convert 16804dcc5c2dSMatthew Dillon * an OBJT_DEFAULT object into an OBJT_SWAP object. 16811c7c3c6aSMatthew Dillon */ 16821c7c3c6aSMatthew Dillon static void 16831c7c3c6aSMatthew Dillon swp_pager_meta_build( 16841c7c3c6aSMatthew Dillon vm_object_t object, 16854dcc5c2dSMatthew Dillon vm_pindex_t index, 16864dcc5c2dSMatthew Dillon daddr_t swapblk 16871c7c3c6aSMatthew Dillon ) { 16881c7c3c6aSMatthew Dillon struct swblock *swap; 16891c7c3c6aSMatthew Dillon struct swblock **pswap; 16901c7c3c6aSMatthew Dillon 16910cddd8f0SMatthew Dillon GIANT_REQUIRED; 16921c7c3c6aSMatthew Dillon /* 16931c7c3c6aSMatthew Dillon * Convert default object to swap object if necessary 16941c7c3c6aSMatthew Dillon */ 16951c7c3c6aSMatthew Dillon if (object->type != OBJT_SWAP) { 16961c7c3c6aSMatthew Dillon object->type = OBJT_SWAP; 16971c7c3c6aSMatthew Dillon object->un_pager.swp.swp_bcount = 0; 16981c7c3c6aSMatthew Dillon 1699a9fa2c05SAlfred Perlstein mtx_lock(&sw_alloc_mtx); 17001c7c3c6aSMatthew Dillon if (object->handle != NULL) { 17011c7c3c6aSMatthew Dillon TAILQ_INSERT_TAIL( 17021c7c3c6aSMatthew Dillon NOBJLIST(object->handle), 17031c7c3c6aSMatthew Dillon object, 17041c7c3c6aSMatthew Dillon pager_object_list 17051c7c3c6aSMatthew Dillon ); 17061c7c3c6aSMatthew Dillon } else { 17071c7c3c6aSMatthew Dillon TAILQ_INSERT_TAIL( 17081c7c3c6aSMatthew Dillon &swap_pager_un_object_list, 17091c7c3c6aSMatthew Dillon object, 17101c7c3c6aSMatthew Dillon pager_object_list 17111c7c3c6aSMatthew Dillon ); 17121c7c3c6aSMatthew Dillon } 1713a9fa2c05SAlfred Perlstein mtx_unlock(&sw_alloc_mtx); 17141c7c3c6aSMatthew Dillon } 17151c7c3c6aSMatthew Dillon 17161c7c3c6aSMatthew Dillon /* 17171c7c3c6aSMatthew Dillon * Locate hash entry. If not found create, but if we aren't adding 17184dcc5c2dSMatthew Dillon * anything just return. If we run out of space in the map we wait 17194dcc5c2dSMatthew Dillon * and, since the hash table may have changed, retry. 17201c7c3c6aSMatthew Dillon */ 17214dcc5c2dSMatthew Dillon retry: 17221c7c3c6aSMatthew Dillon pswap = swp_pager_hash(object, index); 17231c7c3c6aSMatthew Dillon 17241c7c3c6aSMatthew Dillon if ((swap = *pswap) == NULL) { 17251c7c3c6aSMatthew Dillon int i; 17261c7c3c6aSMatthew Dillon 17271c7c3c6aSMatthew Dillon if (swapblk == SWAPBLK_NONE) 17281c7c3c6aSMatthew Dillon return; 17291c7c3c6aSMatthew Dillon 17301c7c3c6aSMatthew Dillon swap = *pswap = zalloc(swap_zone); 17314dcc5c2dSMatthew Dillon if (swap == NULL) { 17324dcc5c2dSMatthew Dillon VM_WAIT; 17334dcc5c2dSMatthew Dillon goto retry; 17344dcc5c2dSMatthew Dillon } 17351c7c3c6aSMatthew Dillon swap->swb_hnext = NULL; 17361c7c3c6aSMatthew Dillon swap->swb_object = object; 17371c7c3c6aSMatthew Dillon swap->swb_index = index & ~SWAP_META_MASK; 17381c7c3c6aSMatthew Dillon swap->swb_count = 0; 17391c7c3c6aSMatthew Dillon 17401c7c3c6aSMatthew Dillon ++object->un_pager.swp.swp_bcount; 17411c7c3c6aSMatthew Dillon 17421c7c3c6aSMatthew Dillon for (i = 0; i < SWAP_META_PAGES; ++i) 17431c7c3c6aSMatthew Dillon swap->swb_pages[i] = SWAPBLK_NONE; 17441c7c3c6aSMatthew Dillon } 17451c7c3c6aSMatthew Dillon 17461c7c3c6aSMatthew Dillon /* 17471c7c3c6aSMatthew Dillon * Delete prior contents of metadata 17481c7c3c6aSMatthew Dillon */ 17491c7c3c6aSMatthew Dillon index &= SWAP_META_MASK; 17501c7c3c6aSMatthew Dillon 17511c7c3c6aSMatthew Dillon if (swap->swb_pages[index] != SWAPBLK_NONE) { 17524dcc5c2dSMatthew Dillon swp_pager_freeswapspace(swap->swb_pages[index], 1); 17531c7c3c6aSMatthew Dillon --swap->swb_count; 17541c7c3c6aSMatthew Dillon } 17551c7c3c6aSMatthew Dillon 17561c7c3c6aSMatthew Dillon /* 17571c7c3c6aSMatthew Dillon * Enter block into metadata 17581c7c3c6aSMatthew Dillon */ 17591c7c3c6aSMatthew Dillon swap->swb_pages[index] = swapblk; 17604dcc5c2dSMatthew Dillon if (swapblk != SWAPBLK_NONE) 17611c7c3c6aSMatthew Dillon ++swap->swb_count; 17621c7c3c6aSMatthew Dillon } 17631c7c3c6aSMatthew Dillon 17641c7c3c6aSMatthew Dillon /* 17651c7c3c6aSMatthew Dillon * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 17661c7c3c6aSMatthew Dillon * 17671c7c3c6aSMatthew Dillon * The requested range of blocks is freed, with any associated swap 17681c7c3c6aSMatthew Dillon * returned to the swap bitmap. 17691c7c3c6aSMatthew Dillon * 17701c7c3c6aSMatthew Dillon * This routine will free swap metadata structures as they are cleaned 17711c7c3c6aSMatthew Dillon * out. This routine does *NOT* operate on swap metadata associated 17721c7c3c6aSMatthew Dillon * with resident pages. 17731c7c3c6aSMatthew Dillon * 17741c7c3c6aSMatthew Dillon * This routine must be called at splvm() 17751c7c3c6aSMatthew Dillon */ 17761c7c3c6aSMatthew Dillon static void 17774dcc5c2dSMatthew Dillon swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count) 17781c7c3c6aSMatthew Dillon { 17790cddd8f0SMatthew Dillon GIANT_REQUIRED; 178023955314SAlfred Perlstein 17811c7c3c6aSMatthew Dillon if (object->type != OBJT_SWAP) 17821c7c3c6aSMatthew Dillon return; 17831c7c3c6aSMatthew Dillon 17841c7c3c6aSMatthew Dillon while (count > 0) { 17851c7c3c6aSMatthew Dillon struct swblock **pswap; 17861c7c3c6aSMatthew Dillon struct swblock *swap; 17871c7c3c6aSMatthew Dillon 17881c7c3c6aSMatthew Dillon pswap = swp_pager_hash(object, index); 17891c7c3c6aSMatthew Dillon 17901c7c3c6aSMatthew Dillon if ((swap = *pswap) != NULL) { 17911c7c3c6aSMatthew Dillon daddr_t v = swap->swb_pages[index & SWAP_META_MASK]; 17921c7c3c6aSMatthew Dillon 17931c7c3c6aSMatthew Dillon if (v != SWAPBLK_NONE) { 17941c7c3c6aSMatthew Dillon swp_pager_freeswapspace(v, 1); 17951c7c3c6aSMatthew Dillon swap->swb_pages[index & SWAP_META_MASK] = 17961c7c3c6aSMatthew Dillon SWAPBLK_NONE; 17971c7c3c6aSMatthew Dillon if (--swap->swb_count == 0) { 17981c7c3c6aSMatthew Dillon *pswap = swap->swb_hnext; 17991c7c3c6aSMatthew Dillon zfree(swap_zone, swap); 18001c7c3c6aSMatthew Dillon --object->un_pager.swp.swp_bcount; 18011c7c3c6aSMatthew Dillon } 18021c7c3c6aSMatthew Dillon } 18031c7c3c6aSMatthew Dillon --count; 18041c7c3c6aSMatthew Dillon ++index; 18051c7c3c6aSMatthew Dillon } else { 18064dcc5c2dSMatthew Dillon int n = SWAP_META_PAGES - (index & SWAP_META_MASK); 18071c7c3c6aSMatthew Dillon count -= n; 18081c7c3c6aSMatthew Dillon index += n; 18091c7c3c6aSMatthew Dillon } 18101c7c3c6aSMatthew Dillon } 18111c7c3c6aSMatthew Dillon } 18121c7c3c6aSMatthew Dillon 18131c7c3c6aSMatthew Dillon /* 18141c7c3c6aSMatthew Dillon * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 18151c7c3c6aSMatthew Dillon * 18161c7c3c6aSMatthew Dillon * This routine locates and destroys all swap metadata associated with 18171c7c3c6aSMatthew Dillon * an object. 18184dcc5c2dSMatthew Dillon * 18194dcc5c2dSMatthew Dillon * This routine must be called at splvm() 18201c7c3c6aSMatthew Dillon */ 18211c7c3c6aSMatthew Dillon static void 18221c7c3c6aSMatthew Dillon swp_pager_meta_free_all(vm_object_t object) 18231c7c3c6aSMatthew Dillon { 18241c7c3c6aSMatthew Dillon daddr_t index = 0; 18251c7c3c6aSMatthew Dillon 18260cddd8f0SMatthew Dillon GIANT_REQUIRED; 182723955314SAlfred Perlstein 18281c7c3c6aSMatthew Dillon if (object->type != OBJT_SWAP) 18291c7c3c6aSMatthew Dillon return; 18301c7c3c6aSMatthew Dillon 18311c7c3c6aSMatthew Dillon while (object->un_pager.swp.swp_bcount) { 18321c7c3c6aSMatthew Dillon struct swblock **pswap; 18331c7c3c6aSMatthew Dillon struct swblock *swap; 18341c7c3c6aSMatthew Dillon 18351c7c3c6aSMatthew Dillon pswap = swp_pager_hash(object, index); 18361c7c3c6aSMatthew Dillon if ((swap = *pswap) != NULL) { 18371c7c3c6aSMatthew Dillon int i; 18381c7c3c6aSMatthew Dillon 18391c7c3c6aSMatthew Dillon for (i = 0; i < SWAP_META_PAGES; ++i) { 18401c7c3c6aSMatthew Dillon daddr_t v = swap->swb_pages[i]; 18411c7c3c6aSMatthew Dillon if (v != SWAPBLK_NONE) { 18421c7c3c6aSMatthew Dillon --swap->swb_count; 18434dcc5c2dSMatthew Dillon swp_pager_freeswapspace(v, 1); 18441c7c3c6aSMatthew Dillon } 18451c7c3c6aSMatthew Dillon } 18461c7c3c6aSMatthew Dillon if (swap->swb_count != 0) 18471c7c3c6aSMatthew Dillon panic("swap_pager_meta_free_all: swb_count != 0"); 18481c7c3c6aSMatthew Dillon *pswap = swap->swb_hnext; 18491c7c3c6aSMatthew Dillon zfree(swap_zone, swap); 18501c7c3c6aSMatthew Dillon --object->un_pager.swp.swp_bcount; 18511c7c3c6aSMatthew Dillon } 18521c7c3c6aSMatthew Dillon index += SWAP_META_PAGES; 18531c7c3c6aSMatthew Dillon if (index > 0x20000000) 18541c7c3c6aSMatthew Dillon panic("swp_pager_meta_free_all: failed to locate all swap meta blocks"); 18551c7c3c6aSMatthew Dillon } 18561c7c3c6aSMatthew Dillon } 18571c7c3c6aSMatthew Dillon 18581c7c3c6aSMatthew Dillon /* 18591c7c3c6aSMatthew Dillon * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 18601c7c3c6aSMatthew Dillon * 18611c7c3c6aSMatthew Dillon * This routine is capable of looking up, popping, or freeing 18621c7c3c6aSMatthew Dillon * swapblk assignments in the swap meta data or in the vm_page_t. 18631c7c3c6aSMatthew Dillon * The routine typically returns the swapblk being looked-up, or popped, 18641c7c3c6aSMatthew Dillon * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 18651c7c3c6aSMatthew Dillon * was invalid. This routine will automatically free any invalid 18661c7c3c6aSMatthew Dillon * meta-data swapblks. 18671c7c3c6aSMatthew Dillon * 18681c7c3c6aSMatthew Dillon * It is not possible to store invalid swapblks in the swap meta data 18691c7c3c6aSMatthew Dillon * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 18701c7c3c6aSMatthew Dillon * 18711c7c3c6aSMatthew Dillon * When acting on a busy resident page and paging is in progress, we 18721c7c3c6aSMatthew Dillon * have to wait until paging is complete but otherwise can act on the 18731c7c3c6aSMatthew Dillon * busy page. 18741c7c3c6aSMatthew Dillon * 18754dcc5c2dSMatthew Dillon * This routine must be called at splvm(). 18761c7c3c6aSMatthew Dillon * 18774dcc5c2dSMatthew Dillon * SWM_FREE remove and free swap block from metadata 18781c7c3c6aSMatthew Dillon * SWM_POP remove from meta data but do not free.. pop it out 18791c7c3c6aSMatthew Dillon */ 18801c7c3c6aSMatthew Dillon static daddr_t 18811c7c3c6aSMatthew Dillon swp_pager_meta_ctl( 18821c7c3c6aSMatthew Dillon vm_object_t object, 18831c7c3c6aSMatthew Dillon vm_pindex_t index, 18841c7c3c6aSMatthew Dillon int flags 18851c7c3c6aSMatthew Dillon ) { 18864dcc5c2dSMatthew Dillon struct swblock **pswap; 18874dcc5c2dSMatthew Dillon struct swblock *swap; 18884dcc5c2dSMatthew Dillon daddr_t r1; 18894dcc5c2dSMatthew Dillon 18900cddd8f0SMatthew Dillon GIANT_REQUIRED; 18911c7c3c6aSMatthew Dillon /* 18921c7c3c6aSMatthew Dillon * The meta data only exists of the object is OBJT_SWAP 18931c7c3c6aSMatthew Dillon * and even then might not be allocated yet. 18941c7c3c6aSMatthew Dillon */ 18954dcc5c2dSMatthew Dillon if (object->type != OBJT_SWAP) 18961c7c3c6aSMatthew Dillon return (SWAPBLK_NONE); 18971c7c3c6aSMatthew Dillon 18984dcc5c2dSMatthew Dillon r1 = SWAPBLK_NONE; 18991c7c3c6aSMatthew Dillon pswap = swp_pager_hash(object, index); 19001c7c3c6aSMatthew Dillon 19011c7c3c6aSMatthew Dillon if ((swap = *pswap) != NULL) { 19024dcc5c2dSMatthew Dillon index &= SWAP_META_MASK; 19031c7c3c6aSMatthew Dillon r1 = swap->swb_pages[index]; 19041c7c3c6aSMatthew Dillon 19051c7c3c6aSMatthew Dillon if (r1 != SWAPBLK_NONE) { 19061c7c3c6aSMatthew Dillon if (flags & SWM_FREE) { 19074dcc5c2dSMatthew Dillon swp_pager_freeswapspace(r1, 1); 19081c7c3c6aSMatthew Dillon r1 = SWAPBLK_NONE; 19091c7c3c6aSMatthew Dillon } 19101c7c3c6aSMatthew Dillon if (flags & (SWM_FREE|SWM_POP)) { 19111c7c3c6aSMatthew Dillon swap->swb_pages[index] = SWAPBLK_NONE; 19121c7c3c6aSMatthew Dillon if (--swap->swb_count == 0) { 19131c7c3c6aSMatthew Dillon *pswap = swap->swb_hnext; 19141c7c3c6aSMatthew Dillon zfree(swap_zone, swap); 19151c7c3c6aSMatthew Dillon --object->un_pager.swp.swp_bcount; 19161c7c3c6aSMatthew Dillon } 19171c7c3c6aSMatthew Dillon } 19181c7c3c6aSMatthew Dillon } 19191c7c3c6aSMatthew Dillon } 19201c7c3c6aSMatthew Dillon return (r1); 19211c7c3c6aSMatthew Dillon } 19221c7c3c6aSMatthew Dillon 1923e4057dbdSPoul-Henning Kamp /******************************************************** 1924e4057dbdSPoul-Henning Kamp * CHAINING FUNCTIONS * 1925e4057dbdSPoul-Henning Kamp ******************************************************** 1926e4057dbdSPoul-Henning Kamp * 1927e4057dbdSPoul-Henning Kamp * These functions support recursion of I/O operations 1928e4057dbdSPoul-Henning Kamp * on bp's, typically by chaining one or more 'child' bp's 1929e4057dbdSPoul-Henning Kamp * to the parent. Synchronous, asynchronous, and semi-synchronous 1930e4057dbdSPoul-Henning Kamp * chaining is possible. 1931e4057dbdSPoul-Henning Kamp */ 1932e4057dbdSPoul-Henning Kamp 1933e4057dbdSPoul-Henning Kamp /* 1934e4057dbdSPoul-Henning Kamp * vm_pager_chain_iodone: 1935e4057dbdSPoul-Henning Kamp * 1936e4057dbdSPoul-Henning Kamp * io completion routine for child bp. Currently we fudge a bit 1937e4057dbdSPoul-Henning Kamp * on dealing with b_resid. Since users of these routines may issue 1938e4057dbdSPoul-Henning Kamp * multiple children simultaneously, sequencing of the error can be lost. 1939e4057dbdSPoul-Henning Kamp */ 1940e4057dbdSPoul-Henning Kamp static void 1941e4057dbdSPoul-Henning Kamp vm_pager_chain_iodone(struct buf *nbp) 1942e4057dbdSPoul-Henning Kamp { 19430b441832SPoul-Henning Kamp struct bio *bp; 19440b441832SPoul-Henning Kamp u_int *count; 1945e4057dbdSPoul-Henning Kamp 19460b441832SPoul-Henning Kamp bp = nbp->b_caller1; 1947d6844b6bSTor Egge count = (u_int *)&(bp->bio_driver1); 19480b441832SPoul-Henning Kamp if (bp != NULL) { 1949e4057dbdSPoul-Henning Kamp if (nbp->b_ioflags & BIO_ERROR) { 19500b441832SPoul-Henning Kamp bp->bio_flags |= BIO_ERROR; 19510b441832SPoul-Henning Kamp bp->bio_error = nbp->b_error; 1952e4057dbdSPoul-Henning Kamp } else if (nbp->b_resid != 0) { 19530b441832SPoul-Henning Kamp bp->bio_flags |= BIO_ERROR; 19540b441832SPoul-Henning Kamp bp->bio_error = EINVAL; 1955e4057dbdSPoul-Henning Kamp } else { 19560b441832SPoul-Henning Kamp bp->bio_resid -= nbp->b_bcount; 1957e4057dbdSPoul-Henning Kamp } 19580b441832SPoul-Henning Kamp nbp->b_caller1 = NULL; 19590b441832SPoul-Henning Kamp --(*count); 19600b441832SPoul-Henning Kamp if (bp->bio_flags & BIO_FLAG1) { 19610b441832SPoul-Henning Kamp bp->bio_flags &= ~BIO_FLAG1; 1962e4057dbdSPoul-Henning Kamp wakeup(bp); 1963e4057dbdSPoul-Henning Kamp } 1964e4057dbdSPoul-Henning Kamp } 1965e4057dbdSPoul-Henning Kamp nbp->b_flags |= B_DONE; 1966e4057dbdSPoul-Henning Kamp nbp->b_flags &= ~B_ASYNC; 1967e4057dbdSPoul-Henning Kamp relpbuf(nbp, NULL); 1968e4057dbdSPoul-Henning Kamp } 1969e4057dbdSPoul-Henning Kamp 1970e4057dbdSPoul-Henning Kamp /* 1971e4057dbdSPoul-Henning Kamp * getchainbuf: 1972e4057dbdSPoul-Henning Kamp * 1973e4057dbdSPoul-Henning Kamp * Obtain a physical buffer and chain it to its parent buffer. When 1974e4057dbdSPoul-Henning Kamp * I/O completes, the parent buffer will be B_SIGNAL'd. Errors are 1975e4057dbdSPoul-Henning Kamp * automatically propagated to the parent 1976e4057dbdSPoul-Henning Kamp */ 1977e4057dbdSPoul-Henning Kamp struct buf * 19780b441832SPoul-Henning Kamp getchainbuf(struct bio *bp, struct vnode *vp, int flags) 1979e4057dbdSPoul-Henning Kamp { 198023955314SAlfred Perlstein struct buf *nbp; 198123955314SAlfred Perlstein u_int *count; 198223955314SAlfred Perlstein 19830cddd8f0SMatthew Dillon GIANT_REQUIRED; 198423955314SAlfred Perlstein nbp = getpbuf(NULL); 1985d6844b6bSTor Egge count = (u_int *)&(bp->bio_driver1); 1986e4057dbdSPoul-Henning Kamp 19870b441832SPoul-Henning Kamp nbp->b_caller1 = bp; 19880b441832SPoul-Henning Kamp ++(*count); 1989e4057dbdSPoul-Henning Kamp 19900b441832SPoul-Henning Kamp if (*count > 4) 1991e4057dbdSPoul-Henning Kamp waitchainbuf(bp, 4, 0); 1992e4057dbdSPoul-Henning Kamp 19930b441832SPoul-Henning Kamp nbp->b_iocmd = bp->bio_cmd; 199457c10583SPoul-Henning Kamp nbp->b_ioflags = 0; 1995e4057dbdSPoul-Henning Kamp nbp->b_flags = flags; 1996fdcc1cc0SJohn Baldwin nbp->b_rcred = crhold(thread0.td_ucred); 1997fdcc1cc0SJohn Baldwin nbp->b_wcred = crhold(thread0.td_ucred); 1998e4057dbdSPoul-Henning Kamp nbp->b_iodone = vm_pager_chain_iodone; 1999e4057dbdSPoul-Henning Kamp 2000e4057dbdSPoul-Henning Kamp if (vp) 2001e4057dbdSPoul-Henning Kamp pbgetvp(vp, nbp); 2002e4057dbdSPoul-Henning Kamp return (nbp); 2003e4057dbdSPoul-Henning Kamp } 2004e4057dbdSPoul-Henning Kamp 2005e4057dbdSPoul-Henning Kamp void 2006e4057dbdSPoul-Henning Kamp flushchainbuf(struct buf *nbp) 2007e4057dbdSPoul-Henning Kamp { 20080cddd8f0SMatthew Dillon GIANT_REQUIRED; 2009e4057dbdSPoul-Henning Kamp if (nbp->b_bcount) { 2010e4057dbdSPoul-Henning Kamp nbp->b_bufsize = nbp->b_bcount; 2011e4057dbdSPoul-Henning Kamp if (nbp->b_iocmd == BIO_WRITE) 2012e4057dbdSPoul-Henning Kamp nbp->b_dirtyend = nbp->b_bcount; 2013e4057dbdSPoul-Henning Kamp BUF_KERNPROC(nbp); 2014e4057dbdSPoul-Henning Kamp BUF_STRATEGY(nbp); 2015e4057dbdSPoul-Henning Kamp } else { 2016e4057dbdSPoul-Henning Kamp bufdone(nbp); 2017e4057dbdSPoul-Henning Kamp } 2018e4057dbdSPoul-Henning Kamp } 2019e4057dbdSPoul-Henning Kamp 202023955314SAlfred Perlstein static void 20210b441832SPoul-Henning Kamp waitchainbuf(struct bio *bp, int limit, int done) 2022e4057dbdSPoul-Henning Kamp { 2023e4057dbdSPoul-Henning Kamp int s; 202423955314SAlfred Perlstein u_int *count; 2025e4057dbdSPoul-Henning Kamp 20260cddd8f0SMatthew Dillon GIANT_REQUIRED; 2027d6844b6bSTor Egge count = (u_int *)&(bp->bio_driver1); 2028e4057dbdSPoul-Henning Kamp s = splbio(); 20290b441832SPoul-Henning Kamp while (*count > limit) { 20300b441832SPoul-Henning Kamp bp->bio_flags |= BIO_FLAG1; 2031e4057dbdSPoul-Henning Kamp tsleep(bp, PRIBIO + 4, "bpchain", 0); 2032e4057dbdSPoul-Henning Kamp } 2033e4057dbdSPoul-Henning Kamp if (done) { 20340b441832SPoul-Henning Kamp if (bp->bio_resid != 0 && !(bp->bio_flags & BIO_ERROR)) { 20350b441832SPoul-Henning Kamp bp->bio_flags |= BIO_ERROR; 20360b441832SPoul-Henning Kamp bp->bio_error = EINVAL; 2037e4057dbdSPoul-Henning Kamp } 20380b441832SPoul-Henning Kamp biodone(bp); 2039e4057dbdSPoul-Henning Kamp } 2040e4057dbdSPoul-Henning Kamp splx(s); 2041e4057dbdSPoul-Henning Kamp } 2042e4057dbdSPoul-Henning Kamp 2043