1df8bae1dSRodney W. Grimes /* 21c7c3c6aSMatthew Dillon * Copyright (c) 1998 Matthew Dillon, 326f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 4df8bae1dSRodney W. Grimes * Copyright (c) 1990 University of Utah. 5df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 6df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 7df8bae1dSRodney W. Grimes * 8df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 9df8bae1dSRodney W. Grimes * the Systems Programming Group of the University of Utah Computer 10df8bae1dSRodney W. Grimes * Science Department. 11df8bae1dSRodney W. Grimes * 12df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 13df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 14df8bae1dSRodney W. Grimes * are met: 15df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 17df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 18df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 19df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 20df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 215929bcfaSPhilippe Charnier * must display the following acknowledgement: 22df8bae1dSRodney W. Grimes * This product includes software developed by the University of 23df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 24df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 25df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 26df8bae1dSRodney W. Grimes * without specific prior written permission. 27df8bae1dSRodney W. Grimes * 28df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38df8bae1dSRodney W. Grimes * SUCH DAMAGE. 39df8bae1dSRodney W. Grimes * 401c7c3c6aSMatthew Dillon * New Swap System 411c7c3c6aSMatthew Dillon * Matthew Dillon 421c7c3c6aSMatthew Dillon * 431c7c3c6aSMatthew Dillon * Radix Bitmap 'blists'. 441c7c3c6aSMatthew Dillon * 451c7c3c6aSMatthew Dillon * - The new swapper uses the new radix bitmap code. This should scale 461c7c3c6aSMatthew Dillon * to arbitrarily small or arbitrarily large swap spaces and an almost 471c7c3c6aSMatthew Dillon * arbitrary degree of fragmentation. 481c7c3c6aSMatthew Dillon * 491c7c3c6aSMatthew Dillon * Features: 501c7c3c6aSMatthew Dillon * 511c7c3c6aSMatthew Dillon * - on the fly reallocation of swap during putpages. The new system 521c7c3c6aSMatthew Dillon * does not try to keep previously allocated swap blocks for dirty 531c7c3c6aSMatthew Dillon * pages. 541c7c3c6aSMatthew Dillon * 551c7c3c6aSMatthew Dillon * - on the fly deallocation of swap 561c7c3c6aSMatthew Dillon * 571c7c3c6aSMatthew Dillon * - No more garbage collection required. Unnecessarily allocated swap 581c7c3c6aSMatthew Dillon * blocks only exist for dirty vm_page_t's now and these are already 591c7c3c6aSMatthew Dillon * cycled (in a high-load system) by the pager. We also do on-the-fly 601c7c3c6aSMatthew Dillon * removal of invalidated swap blocks when a page is destroyed 611c7c3c6aSMatthew Dillon * or renamed. 621c7c3c6aSMatthew Dillon * 63df8bae1dSRodney W. Grimes * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 64df8bae1dSRodney W. Grimes * 65df8bae1dSRodney W. Grimes * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 661c7c3c6aSMatthew Dillon * 67c3aac50fSPeter Wemm * $FreeBSD$ 68df8bae1dSRodney W. Grimes */ 69df8bae1dSRodney W. Grimes 70df8bae1dSRodney W. Grimes #include <sys/param.h> 71df8bae1dSRodney W. Grimes #include <sys/systm.h> 72af647ddeSBruce Evans #include <sys/conf.h> 7364abb5a5SDavid Greenman #include <sys/kernel.h> 74df8bae1dSRodney W. Grimes #include <sys/proc.h> 759626b608SPoul-Henning Kamp #include <sys/bio.h> 76df8bae1dSRodney W. Grimes #include <sys/buf.h> 77df8bae1dSRodney W. Grimes #include <sys/vnode.h> 78df8bae1dSRodney W. Grimes #include <sys/malloc.h> 79efeaf95aSDavid Greenman #include <sys/vmmeter.h> 80327f4e83SMatthew Dillon #include <sys/sysctl.h> 811c7c3c6aSMatthew Dillon #include <sys/blist.h> 821c7c3c6aSMatthew Dillon #include <sys/lock.h> 83936524aaSMatthew Dillon #include <sys/vmmeter.h> 84df8bae1dSRodney W. Grimes 85e47ed70bSJohn Dyson #ifndef MAX_PAGEOUT_CLUSTER 86ffc82b0aSJohn Dyson #define MAX_PAGEOUT_CLUSTER 16 87e47ed70bSJohn Dyson #endif 88e47ed70bSJohn Dyson 89e47ed70bSJohn Dyson #define SWB_NPAGES MAX_PAGEOUT_CLUSTER 90e47ed70bSJohn Dyson 911c7c3c6aSMatthew Dillon #include "opt_swap.h" 92df8bae1dSRodney W. Grimes #include <vm/vm.h> 9321cd6e62SSeigo Tanimura #include <vm/pmap.h> 9421cd6e62SSeigo Tanimura #include <vm/vm_map.h> 9521cd6e62SSeigo Tanimura #include <vm/vm_kern.h> 96efeaf95aSDavid Greenman #include <vm/vm_object.h> 97df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 98efeaf95aSDavid Greenman #include <vm/vm_pager.h> 99df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 10021cd6e62SSeigo Tanimura #include <vm/vm_zone.h> 101df8bae1dSRodney W. Grimes #include <vm/swap_pager.h> 102efeaf95aSDavid Greenman #include <vm/vm_extern.h> 103df8bae1dSRodney W. Grimes 1041c7c3c6aSMatthew Dillon #define SWM_FREE 0x02 /* free, period */ 1051c7c3c6aSMatthew Dillon #define SWM_POP 0x04 /* pop out */ 10626f9a767SRodney W. Grimes 10724a1cce3SDavid Greenman /* 1081c7c3c6aSMatthew Dillon * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks 1091c7c3c6aSMatthew Dillon * in the old system. 11024a1cce3SDavid Greenman */ 1111c7c3c6aSMatthew Dillon 1121c7c3c6aSMatthew Dillon extern int vm_swap_size; /* number of free swap blocks, in pages */ 1131c7c3c6aSMatthew Dillon 11420d3034fSMatthew Dillon int swap_pager_full; /* swap space exhaustion (task killing) */ 11520d3034fSMatthew Dillon static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/ 1161c7c3c6aSMatthew Dillon static int nsw_rcount; /* free read buffers */ 117327f4e83SMatthew Dillon static int nsw_wcount_sync; /* limit write buffers / synchronous */ 118327f4e83SMatthew Dillon static int nsw_wcount_async; /* limit write buffers / asynchronous */ 119327f4e83SMatthew Dillon static int nsw_wcount_async_max;/* assigned maximum */ 120327f4e83SMatthew Dillon static int nsw_cluster_max; /* maximum VOP I/O allowed */ 1211c7c3c6aSMatthew Dillon static int sw_alloc_interlock; /* swap pager allocation interlock */ 1221c7c3c6aSMatthew Dillon 1231c7c3c6aSMatthew Dillon struct blist *swapblist; 1241c7c3c6aSMatthew Dillon static struct swblock **swhash; 1251c7c3c6aSMatthew Dillon static int swhash_mask; 126327f4e83SMatthew Dillon static int swap_async_max = 4; /* maximum in-progress async I/O's */ 127327f4e83SMatthew Dillon 128ea3aecf5SPeter Wemm extern struct vnode *swapdev_vp; /* from vm_swap.c */ 12924e7ab7cSPoul-Henning Kamp 130327f4e83SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 131327f4e83SMatthew Dillon CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 1321c7c3c6aSMatthew Dillon 1331c7c3c6aSMatthew Dillon /* 1341c7c3c6aSMatthew Dillon * "named" and "unnamed" anon region objects. Try to reduce the overhead 1351c7c3c6aSMatthew Dillon * of searching a named list by hashing it just a little. 1361c7c3c6aSMatthew Dillon */ 1371c7c3c6aSMatthew Dillon 1381c7c3c6aSMatthew Dillon #define NOBJLISTS 8 1391c7c3c6aSMatthew Dillon 1401c7c3c6aSMatthew Dillon #define NOBJLIST(handle) \ 141af647ddeSBruce Evans (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)]) 1421c7c3c6aSMatthew Dillon 1431c7c3c6aSMatthew Dillon static struct pagerlst swap_pager_object_list[NOBJLISTS]; 1441c7c3c6aSMatthew Dillon struct pagerlst swap_pager_un_object_list; 1451c7c3c6aSMatthew Dillon vm_zone_t swap_zone; 1461c7c3c6aSMatthew Dillon 1471c7c3c6aSMatthew Dillon /* 1481c7c3c6aSMatthew Dillon * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 1491c7c3c6aSMatthew Dillon * calls hooked from other parts of the VM system and do not appear here. 1501c7c3c6aSMatthew Dillon * (see vm/swap_pager.h). 1511c7c3c6aSMatthew Dillon */ 1521c7c3c6aSMatthew Dillon 153ff98689dSBruce Evans static vm_object_t 1546cde7a16SDavid Greenman swap_pager_alloc __P((void *handle, vm_ooffset_t size, 155a316d390SJohn Dyson vm_prot_t prot, vm_ooffset_t offset)); 156ff98689dSBruce Evans static void swap_pager_dealloc __P((vm_object_t object)); 157f708ef1bSPoul-Henning Kamp static int swap_pager_getpages __P((vm_object_t, vm_page_t *, int, int)); 158ff98689dSBruce Evans static void swap_pager_init __P((void)); 1591c7c3c6aSMatthew Dillon static void swap_pager_unswapped __P((vm_page_t)); 1600b441832SPoul-Henning Kamp static void swap_pager_strategy __P((vm_object_t, struct bio *)); 161f708ef1bSPoul-Henning Kamp 162df8bae1dSRodney W. Grimes struct pagerops swappagerops = { 1631c7c3c6aSMatthew Dillon swap_pager_init, /* early system initialization of pager */ 1641c7c3c6aSMatthew Dillon swap_pager_alloc, /* allocate an OBJT_SWAP object */ 1651c7c3c6aSMatthew Dillon swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 1661c7c3c6aSMatthew Dillon swap_pager_getpages, /* pagein */ 1671c7c3c6aSMatthew Dillon swap_pager_putpages, /* pageout */ 1681c7c3c6aSMatthew Dillon swap_pager_haspage, /* get backing store status for page */ 169a5296b05SJulian Elischer swap_pager_unswapped, /* remove swap related to page */ 170a5296b05SJulian Elischer swap_pager_strategy /* pager strategy call */ 171df8bae1dSRodney W. Grimes }; 172df8bae1dSRodney W. Grimes 1730b441832SPoul-Henning Kamp static struct buf *getchainbuf(struct bio *bp, struct vnode *vp, int flags); 174e4057dbdSPoul-Henning Kamp static void flushchainbuf(struct buf *nbp); 1750b441832SPoul-Henning Kamp static void waitchainbuf(struct bio *bp, int count, int done); 176e4057dbdSPoul-Henning Kamp 1771c7c3c6aSMatthew Dillon /* 1781c7c3c6aSMatthew Dillon * dmmax is in page-sized chunks with the new swap system. It was 17964bcb9c8SMatthew Dillon * dev-bsized chunks in the old. dmmax is always a power of 2. 1801c7c3c6aSMatthew Dillon * 1811c7c3c6aSMatthew Dillon * swap_*() routines are externally accessible. swp_*() routines are 1821c7c3c6aSMatthew Dillon * internal. 1831c7c3c6aSMatthew Dillon */ 1841c7c3c6aSMatthew Dillon 185f708ef1bSPoul-Henning Kamp int dmmax; 1861c7c3c6aSMatthew Dillon static int dmmax_mask; 18720d3034fSMatthew Dillon int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 18820d3034fSMatthew Dillon int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 18926f9a767SRodney W. Grimes 190cee313c4SRobert Watson SYSCTL_INT(_vm, OID_AUTO, dmmax, 191cee313c4SRobert Watson CTLFLAG_RD, &dmmax, 0, "Maximum size of a swap block"); 192cee313c4SRobert Watson 1931c7c3c6aSMatthew Dillon static __inline void swp_sizecheck __P((void)); 1941c7c3c6aSMatthew Dillon static void swp_pager_sync_iodone __P((struct buf *bp)); 1951c7c3c6aSMatthew Dillon static void swp_pager_async_iodone __P((struct buf *bp)); 19624a1cce3SDavid Greenman 1971c7c3c6aSMatthew Dillon /* 1981c7c3c6aSMatthew Dillon * Swap bitmap functions 1991c7c3c6aSMatthew Dillon */ 2001c7c3c6aSMatthew Dillon 2011c7c3c6aSMatthew Dillon static __inline void swp_pager_freeswapspace __P((daddr_t blk, int npages)); 2021c7c3c6aSMatthew Dillon static __inline daddr_t swp_pager_getswapspace __P((int npages)); 2031c7c3c6aSMatthew Dillon 2041c7c3c6aSMatthew Dillon /* 2051c7c3c6aSMatthew Dillon * Metadata functions 2061c7c3c6aSMatthew Dillon */ 2071c7c3c6aSMatthew Dillon 2084dcc5c2dSMatthew Dillon static void swp_pager_meta_build __P((vm_object_t, vm_pindex_t, daddr_t)); 2094dcc5c2dSMatthew Dillon static void swp_pager_meta_free __P((vm_object_t, vm_pindex_t, daddr_t)); 2101c7c3c6aSMatthew Dillon static void swp_pager_meta_free_all __P((vm_object_t)); 2111c7c3c6aSMatthew Dillon static daddr_t swp_pager_meta_ctl __P((vm_object_t, vm_pindex_t, int)); 2121c7c3c6aSMatthew Dillon 2131c7c3c6aSMatthew Dillon /* 2141c7c3c6aSMatthew Dillon * SWP_SIZECHECK() - update swap_pager_full indication 2151c7c3c6aSMatthew Dillon * 21620d3034fSMatthew Dillon * update the swap_pager_almost_full indication and warn when we are 21720d3034fSMatthew Dillon * about to run out of swap space, using lowat/hiwat hysteresis. 21820d3034fSMatthew Dillon * 21920d3034fSMatthew Dillon * Clear swap_pager_full ( task killing ) indication when lowat is met. 2201c7c3c6aSMatthew Dillon * 2211c7c3c6aSMatthew Dillon * No restrictions on call 2221c7c3c6aSMatthew Dillon * This routine may not block. 2231c7c3c6aSMatthew Dillon * This routine must be called at splvm() 2241c7c3c6aSMatthew Dillon */ 225de5f6a77SJohn Dyson 226c1087c13SBruce Evans static __inline void 2271c7c3c6aSMatthew Dillon swp_sizecheck() 2280d94caffSDavid Greenman { 2291c7c3c6aSMatthew Dillon if (vm_swap_size < nswap_lowat) { 23020d3034fSMatthew Dillon if (swap_pager_almost_full == 0) { 2311af87c92SDavid Greenman printf("swap_pager: out of swap space\n"); 23220d3034fSMatthew Dillon swap_pager_almost_full = 1; 2332b0d37a4SMatthew Dillon } 23420d3034fSMatthew Dillon } else { 23526f9a767SRodney W. Grimes swap_pager_full = 0; 23620d3034fSMatthew Dillon if (vm_swap_size > nswap_hiwat) 23720d3034fSMatthew Dillon swap_pager_almost_full = 0; 23826f9a767SRodney W. Grimes } 2391c7c3c6aSMatthew Dillon } 2401c7c3c6aSMatthew Dillon 2411c7c3c6aSMatthew Dillon /* 2421c7c3c6aSMatthew Dillon * SWAP_PAGER_INIT() - initialize the swap pager! 2431c7c3c6aSMatthew Dillon * 2441c7c3c6aSMatthew Dillon * Expected to be started from system init. NOTE: This code is run 2451c7c3c6aSMatthew Dillon * before much else so be careful what you depend on. Most of the VM 2461c7c3c6aSMatthew Dillon * system has yet to be initialized at this point. 2471c7c3c6aSMatthew Dillon */ 24826f9a767SRodney W. Grimes 249f5a12711SPoul-Henning Kamp static void 250df8bae1dSRodney W. Grimes swap_pager_init() 251df8bae1dSRodney W. Grimes { 2521c7c3c6aSMatthew Dillon /* 2531c7c3c6aSMatthew Dillon * Initialize object lists 2541c7c3c6aSMatthew Dillon */ 2551c7c3c6aSMatthew Dillon int i; 2561c7c3c6aSMatthew Dillon 2571c7c3c6aSMatthew Dillon for (i = 0; i < NOBJLISTS; ++i) 2581c7c3c6aSMatthew Dillon TAILQ_INIT(&swap_pager_object_list[i]); 25924a1cce3SDavid Greenman TAILQ_INIT(&swap_pager_un_object_list); 260df8bae1dSRodney W. Grimes 261df8bae1dSRodney W. Grimes /* 2621c7c3c6aSMatthew Dillon * Device Stripe, in PAGE_SIZE'd blocks 263df8bae1dSRodney W. Grimes */ 2641c7c3c6aSMatthew Dillon 2651c7c3c6aSMatthew Dillon dmmax = SWB_NPAGES * 2; 2661c7c3c6aSMatthew Dillon dmmax_mask = ~(dmmax - 1); 2671c7c3c6aSMatthew Dillon } 26826f9a767SRodney W. Grimes 269df8bae1dSRodney W. Grimes /* 2701c7c3c6aSMatthew Dillon * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 2711c7c3c6aSMatthew Dillon * 2721c7c3c6aSMatthew Dillon * Expected to be started from pageout process once, prior to entering 2731c7c3c6aSMatthew Dillon * its main loop. 274df8bae1dSRodney W. Grimes */ 275df8bae1dSRodney W. Grimes 27624a1cce3SDavid Greenman void 27724a1cce3SDavid Greenman swap_pager_swap_init() 278df8bae1dSRodney W. Grimes { 27921cd6e62SSeigo Tanimura int n, n2; 2800d94caffSDavid Greenman 28126f9a767SRodney W. Grimes /* 2821c7c3c6aSMatthew Dillon * Number of in-transit swap bp operations. Don't 2831c7c3c6aSMatthew Dillon * exhaust the pbufs completely. Make sure we 2841c7c3c6aSMatthew Dillon * initialize workable values (0 will work for hysteresis 2851c7c3c6aSMatthew Dillon * but it isn't very efficient). 2861c7c3c6aSMatthew Dillon * 287327f4e83SMatthew Dillon * The nsw_cluster_max is constrained by the bp->b_pages[] 2881c7c3c6aSMatthew Dillon * array (MAXPHYS/PAGE_SIZE) and our locally defined 2891c7c3c6aSMatthew Dillon * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 2901c7c3c6aSMatthew Dillon * constrained by the swap device interleave stripe size. 291327f4e83SMatthew Dillon * 292327f4e83SMatthew Dillon * Currently we hardwire nsw_wcount_async to 4. This limit is 293327f4e83SMatthew Dillon * designed to prevent other I/O from having high latencies due to 294327f4e83SMatthew Dillon * our pageout I/O. The value 4 works well for one or two active swap 295327f4e83SMatthew Dillon * devices but is probably a little low if you have more. Even so, 296327f4e83SMatthew Dillon * a higher value would probably generate only a limited improvement 297327f4e83SMatthew Dillon * with three or four active swap devices since the system does not 298327f4e83SMatthew Dillon * typically have to pageout at extreme bandwidths. We will want 299327f4e83SMatthew Dillon * at least 2 per swap devices, and 4 is a pretty good value if you 300327f4e83SMatthew Dillon * have one NFS swap device due to the command/ack latency over NFS. 301327f4e83SMatthew Dillon * So it all works out pretty well. 30226f9a767SRodney W. Grimes */ 30324a1cce3SDavid Greenman 304ad3cce20SMatthew Dillon nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 305327f4e83SMatthew Dillon 3061c7c3c6aSMatthew Dillon nsw_rcount = (nswbuf + 1) / 2; 307327f4e83SMatthew Dillon nsw_wcount_sync = (nswbuf + 3) / 4; 308327f4e83SMatthew Dillon nsw_wcount_async = 4; 309327f4e83SMatthew Dillon nsw_wcount_async_max = nsw_wcount_async; 31024a1cce3SDavid Greenman 3111c7c3c6aSMatthew Dillon /* 3121c7c3c6aSMatthew Dillon * Initialize our zone. Right now I'm just guessing on the number 3131c7c3c6aSMatthew Dillon * we need based on the number of pages in the system. Each swblock 3141c7c3c6aSMatthew Dillon * can hold 16 pages, so this is probably overkill. 3151c7c3c6aSMatthew Dillon */ 31624a1cce3SDavid Greenman 31721cd6e62SSeigo Tanimura n = min(cnt.v_page_count, (kernel_map->max_offset - kernel_map->min_offset) / PAGE_SIZE) * 2; 31821cd6e62SSeigo Tanimura n2 = n; 31926f9a767SRodney W. Grimes 32021cd6e62SSeigo Tanimura while (n > 0 32121cd6e62SSeigo Tanimura && (swap_zone = zinit( 3221c7c3c6aSMatthew Dillon "SWAPMETA", 3231c7c3c6aSMatthew Dillon sizeof(struct swblock), 3241c7c3c6aSMatthew Dillon n, 3251c7c3c6aSMatthew Dillon ZONE_INTERRUPT, 3261c7c3c6aSMatthew Dillon 1 32721cd6e62SSeigo Tanimura )) == NULL) 32821cd6e62SSeigo Tanimura n >>= 1; 32921cd6e62SSeigo Tanimura if (swap_zone == NULL) 33021cd6e62SSeigo Tanimura printf("WARNING: failed to init swap_zone!\n"); 33121cd6e62SSeigo Tanimura if (n2 != n) 33221cd6e62SSeigo Tanimura printf("Swap zone entries reduced to %d.\n", n); 33321cd6e62SSeigo Tanimura n2 = n; 33424a1cce3SDavid Greenman 3351c7c3c6aSMatthew Dillon /* 3361c7c3c6aSMatthew Dillon * Initialize our meta-data hash table. The swapper does not need to 3371c7c3c6aSMatthew Dillon * be quite as efficient as the VM system, so we do not use an 3381c7c3c6aSMatthew Dillon * oversized hash table. 3391c7c3c6aSMatthew Dillon * 3401c7c3c6aSMatthew Dillon * n: size of hash table, must be power of 2 3411c7c3c6aSMatthew Dillon * swhash_mask: hash table index mask 3421c7c3c6aSMatthew Dillon */ 343df8bae1dSRodney W. Grimes 34421cd6e62SSeigo Tanimura for (n = 1; n < n2 ; n <<= 1) 3451c7c3c6aSMatthew Dillon ; 3461c7c3c6aSMatthew Dillon 3477cc0979fSDavid Malone swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK | M_ZERO); 3481c7c3c6aSMatthew Dillon 3491c7c3c6aSMatthew Dillon swhash_mask = n - 1; 35024a1cce3SDavid Greenman } 35124a1cce3SDavid Greenman 35224a1cce3SDavid Greenman /* 3531c7c3c6aSMatthew Dillon * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 3541c7c3c6aSMatthew Dillon * its metadata structures. 3551c7c3c6aSMatthew Dillon * 3561c7c3c6aSMatthew Dillon * This routine is called from the mmap and fork code to create a new 3571c7c3c6aSMatthew Dillon * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 3581c7c3c6aSMatthew Dillon * and then converting it with swp_pager_meta_build(). 3591c7c3c6aSMatthew Dillon * 3601c7c3c6aSMatthew Dillon * This routine may block in vm_object_allocate() and create a named 3611c7c3c6aSMatthew Dillon * object lookup race, so we must interlock. We must also run at 3621c7c3c6aSMatthew Dillon * splvm() for the object lookup to handle races with interrupts, but 3631c7c3c6aSMatthew Dillon * we do not have to maintain splvm() in between the lookup and the 3641c7c3c6aSMatthew Dillon * add because (I believe) it is not possible to attempt to create 3651c7c3c6aSMatthew Dillon * a new swap object w/handle when a default object with that handle 3661c7c3c6aSMatthew Dillon * already exists. 36724a1cce3SDavid Greenman */ 3681c7c3c6aSMatthew Dillon 369f5a12711SPoul-Henning Kamp static vm_object_t 3706cde7a16SDavid Greenman swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 371b9dcd593SBruce Evans vm_ooffset_t offset) 37224a1cce3SDavid Greenman { 37324a1cce3SDavid Greenman vm_object_t object; 37424a1cce3SDavid Greenman 37524a1cce3SDavid Greenman if (handle) { 3761c7c3c6aSMatthew Dillon /* 3771c7c3c6aSMatthew Dillon * Reference existing named region or allocate new one. There 3781c7c3c6aSMatthew Dillon * should not be a race here against swp_pager_meta_build() 3791c7c3c6aSMatthew Dillon * as called from vm_page_remove() in regards to the lookup 3801c7c3c6aSMatthew Dillon * of the handle. 3811c7c3c6aSMatthew Dillon */ 3821c7c3c6aSMatthew Dillon 3831c7c3c6aSMatthew Dillon while (sw_alloc_interlock) { 3841c7c3c6aSMatthew Dillon sw_alloc_interlock = -1; 3851c7c3c6aSMatthew Dillon tsleep(&sw_alloc_interlock, PVM, "swpalc", 0); 3861c7c3c6aSMatthew Dillon } 3871c7c3c6aSMatthew Dillon sw_alloc_interlock = 1; 3881c7c3c6aSMatthew Dillon 3891c7c3c6aSMatthew Dillon object = vm_pager_object_lookup(NOBJLIST(handle), handle); 3901c7c3c6aSMatthew Dillon 39124a1cce3SDavid Greenman if (object != NULL) { 39224a1cce3SDavid Greenman vm_object_reference(object); 39324a1cce3SDavid Greenman } else { 3941c7c3c6aSMatthew Dillon object = vm_object_allocate(OBJT_DEFAULT, 3956cde7a16SDavid Greenman OFF_TO_IDX(offset + PAGE_MASK + size)); 39624a1cce3SDavid Greenman object->handle = handle; 3971c7c3c6aSMatthew Dillon 3984dcc5c2dSMatthew Dillon swp_pager_meta_build(object, 0, SWAPBLK_NONE); 39924a1cce3SDavid Greenman } 4001c7c3c6aSMatthew Dillon 4011c7c3c6aSMatthew Dillon if (sw_alloc_interlock < 0) 4021c7c3c6aSMatthew Dillon wakeup(&sw_alloc_interlock); 4031c7c3c6aSMatthew Dillon 4041c7c3c6aSMatthew Dillon sw_alloc_interlock = 0; 40524a1cce3SDavid Greenman } else { 4061c7c3c6aSMatthew Dillon object = vm_object_allocate(OBJT_DEFAULT, 4076cde7a16SDavid Greenman OFF_TO_IDX(offset + PAGE_MASK + size)); 4081c7c3c6aSMatthew Dillon 4094dcc5c2dSMatthew Dillon swp_pager_meta_build(object, 0, SWAPBLK_NONE); 41024a1cce3SDavid Greenman } 41124a1cce3SDavid Greenman 41224a1cce3SDavid Greenman return (object); 413df8bae1dSRodney W. Grimes } 414df8bae1dSRodney W. Grimes 41526f9a767SRodney W. Grimes /* 4161c7c3c6aSMatthew Dillon * SWAP_PAGER_DEALLOC() - remove swap metadata from object 4171c7c3c6aSMatthew Dillon * 4181c7c3c6aSMatthew Dillon * The swap backing for the object is destroyed. The code is 4191c7c3c6aSMatthew Dillon * designed such that we can reinstantiate it later, but this 4201c7c3c6aSMatthew Dillon * routine is typically called only when the entire object is 4211c7c3c6aSMatthew Dillon * about to be destroyed. 4221c7c3c6aSMatthew Dillon * 4231c7c3c6aSMatthew Dillon * This routine may block, but no longer does. 4241c7c3c6aSMatthew Dillon * 4251c7c3c6aSMatthew Dillon * The object must be locked or unreferenceable. 42626f9a767SRodney W. Grimes */ 42726f9a767SRodney W. Grimes 428df8bae1dSRodney W. Grimes static void 4291c7c3c6aSMatthew Dillon swap_pager_dealloc(object) 4302a4895f4SDavid Greenman vm_object_t object; 43126f9a767SRodney W. Grimes { 4324dcc5c2dSMatthew Dillon int s; 4334dcc5c2dSMatthew Dillon 43426f9a767SRodney W. Grimes /* 4351c7c3c6aSMatthew Dillon * Remove from list right away so lookups will fail if we block for 4361c7c3c6aSMatthew Dillon * pageout completion. 43726f9a767SRodney W. Grimes */ 438b44e4b7aSJohn Dyson 4391c7c3c6aSMatthew Dillon if (object->handle == NULL) { 4401c7c3c6aSMatthew Dillon TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list); 44124ea4a96SDavid Greenman } else { 4421c7c3c6aSMatthew Dillon TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list); 44326f9a767SRodney W. Grimes } 4441c7c3c6aSMatthew Dillon 4451c7c3c6aSMatthew Dillon vm_object_pip_wait(object, "swpdea"); 4461c7c3c6aSMatthew Dillon 4471c7c3c6aSMatthew Dillon /* 4481c7c3c6aSMatthew Dillon * Free all remaining metadata. We only bother to free it from 4491c7c3c6aSMatthew Dillon * the swap meta data. We do not attempt to free swapblk's still 4501c7c3c6aSMatthew Dillon * associated with vm_page_t's for this object. We do not care 4511c7c3c6aSMatthew Dillon * if paging is still in progress on some objects. 4521c7c3c6aSMatthew Dillon */ 4534dcc5c2dSMatthew Dillon s = splvm(); 4541c7c3c6aSMatthew Dillon swp_pager_meta_free_all(object); 4554dcc5c2dSMatthew Dillon splx(s); 4561c7c3c6aSMatthew Dillon } 4571c7c3c6aSMatthew Dillon 4581c7c3c6aSMatthew Dillon /************************************************************************ 4591c7c3c6aSMatthew Dillon * SWAP PAGER BITMAP ROUTINES * 4601c7c3c6aSMatthew Dillon ************************************************************************/ 4611c7c3c6aSMatthew Dillon 4621c7c3c6aSMatthew Dillon /* 4631c7c3c6aSMatthew Dillon * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 4641c7c3c6aSMatthew Dillon * 4651c7c3c6aSMatthew Dillon * Allocate swap for the requested number of pages. The starting 4661c7c3c6aSMatthew Dillon * swap block number (a page index) is returned or SWAPBLK_NONE 4671c7c3c6aSMatthew Dillon * if the allocation failed. 4681c7c3c6aSMatthew Dillon * 4691c7c3c6aSMatthew Dillon * Also has the side effect of advising that somebody made a mistake 4701c7c3c6aSMatthew Dillon * when they configured swap and didn't configure enough. 4711c7c3c6aSMatthew Dillon * 4721c7c3c6aSMatthew Dillon * Must be called at splvm() to avoid races with bitmap frees from 4731c7c3c6aSMatthew Dillon * vm_page_remove() aka swap_pager_page_removed(). 4741c7c3c6aSMatthew Dillon * 4751c7c3c6aSMatthew Dillon * This routine may not block 4761c7c3c6aSMatthew Dillon * This routine must be called at splvm(). 4771c7c3c6aSMatthew Dillon */ 4781c7c3c6aSMatthew Dillon 4791c7c3c6aSMatthew Dillon static __inline daddr_t 4801c7c3c6aSMatthew Dillon swp_pager_getswapspace(npages) 4811c7c3c6aSMatthew Dillon int npages; 4821c7c3c6aSMatthew Dillon { 4831c7c3c6aSMatthew Dillon daddr_t blk; 4841c7c3c6aSMatthew Dillon 4851c7c3c6aSMatthew Dillon if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) { 4862b0d37a4SMatthew Dillon if (swap_pager_full != 2) { 4871c7c3c6aSMatthew Dillon printf("swap_pager_getswapspace: failed\n"); 4882b0d37a4SMatthew Dillon swap_pager_full = 2; 48920d3034fSMatthew Dillon swap_pager_almost_full = 1; 4902b0d37a4SMatthew Dillon } 4911c7c3c6aSMatthew Dillon } else { 4921c7c3c6aSMatthew Dillon vm_swap_size -= npages; 4931c7c3c6aSMatthew Dillon swp_sizecheck(); 4941c7c3c6aSMatthew Dillon } 4951c7c3c6aSMatthew Dillon return(blk); 49626f9a767SRodney W. Grimes } 49726f9a767SRodney W. Grimes 49826f9a767SRodney W. Grimes /* 4991c7c3c6aSMatthew Dillon * SWP_PAGER_FREESWAPSPACE() - free raw swap space 5001c7c3c6aSMatthew Dillon * 5011c7c3c6aSMatthew Dillon * This routine returns the specified swap blocks back to the bitmap. 5021c7c3c6aSMatthew Dillon * 5031c7c3c6aSMatthew Dillon * Note: This routine may not block (it could in the old swap code), 5041c7c3c6aSMatthew Dillon * and through the use of the new blist routines it does not block. 5051c7c3c6aSMatthew Dillon * 5061c7c3c6aSMatthew Dillon * We must be called at splvm() to avoid races with bitmap frees from 5071c7c3c6aSMatthew Dillon * vm_page_remove() aka swap_pager_page_removed(). 5081c7c3c6aSMatthew Dillon * 5091c7c3c6aSMatthew Dillon * This routine may not block 5101c7c3c6aSMatthew Dillon * This routine must be called at splvm(). 51126f9a767SRodney W. Grimes */ 5121c7c3c6aSMatthew Dillon 5131c7c3c6aSMatthew Dillon static __inline void 5141c7c3c6aSMatthew Dillon swp_pager_freeswapspace(blk, npages) 5151c7c3c6aSMatthew Dillon daddr_t blk; 5161c7c3c6aSMatthew Dillon int npages; 5170d94caffSDavid Greenman { 5181c7c3c6aSMatthew Dillon blist_free(swapblist, blk, npages); 5191c7c3c6aSMatthew Dillon vm_swap_size += npages; 5201c7c3c6aSMatthew Dillon swp_sizecheck(); 52126f9a767SRodney W. Grimes } 5221c7c3c6aSMatthew Dillon 52326f9a767SRodney W. Grimes /* 5241c7c3c6aSMatthew Dillon * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 5251c7c3c6aSMatthew Dillon * range within an object. 5261c7c3c6aSMatthew Dillon * 5271c7c3c6aSMatthew Dillon * This is a globally accessible routine. 5281c7c3c6aSMatthew Dillon * 5291c7c3c6aSMatthew Dillon * This routine removes swapblk assignments from swap metadata. 5301c7c3c6aSMatthew Dillon * 5311c7c3c6aSMatthew Dillon * The external callers of this routine typically have already destroyed 5321c7c3c6aSMatthew Dillon * or renamed vm_page_t's associated with this range in the object so 5331c7c3c6aSMatthew Dillon * we should be ok. 5344dcc5c2dSMatthew Dillon * 5354dcc5c2dSMatthew Dillon * This routine may be called at any spl. We up our spl to splvm temporarily 5364dcc5c2dSMatthew Dillon * in order to perform the metadata removal. 53726f9a767SRodney W. Grimes */ 5381c7c3c6aSMatthew Dillon 53926f9a767SRodney W. Grimes void 54024a1cce3SDavid Greenman swap_pager_freespace(object, start, size) 54124a1cce3SDavid Greenman vm_object_t object; 542a316d390SJohn Dyson vm_pindex_t start; 543a316d390SJohn Dyson vm_size_t size; 54426f9a767SRodney W. Grimes { 5454dcc5c2dSMatthew Dillon int s = splvm(); 5461c7c3c6aSMatthew Dillon swp_pager_meta_free(object, start, size); 5474dcc5c2dSMatthew Dillon splx(s); 5484dcc5c2dSMatthew Dillon } 5494dcc5c2dSMatthew Dillon 5504dcc5c2dSMatthew Dillon /* 5514dcc5c2dSMatthew Dillon * SWAP_PAGER_RESERVE() - reserve swap blocks in object 5524dcc5c2dSMatthew Dillon * 5534dcc5c2dSMatthew Dillon * Assigns swap blocks to the specified range within the object. The 5544dcc5c2dSMatthew Dillon * swap blocks are not zerod. Any previous swap assignment is destroyed. 5554dcc5c2dSMatthew Dillon * 5564dcc5c2dSMatthew Dillon * Returns 0 on success, -1 on failure. 5574dcc5c2dSMatthew Dillon */ 5584dcc5c2dSMatthew Dillon 5594dcc5c2dSMatthew Dillon int 5604dcc5c2dSMatthew Dillon swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 5614dcc5c2dSMatthew Dillon { 5624dcc5c2dSMatthew Dillon int s; 5634dcc5c2dSMatthew Dillon int n = 0; 5644dcc5c2dSMatthew Dillon daddr_t blk = SWAPBLK_NONE; 5654dcc5c2dSMatthew Dillon vm_pindex_t beg = start; /* save start index */ 5664dcc5c2dSMatthew Dillon 5674dcc5c2dSMatthew Dillon s = splvm(); 5684dcc5c2dSMatthew Dillon while (size) { 5694dcc5c2dSMatthew Dillon if (n == 0) { 5704dcc5c2dSMatthew Dillon n = BLIST_MAX_ALLOC; 5714dcc5c2dSMatthew Dillon while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) { 5724dcc5c2dSMatthew Dillon n >>= 1; 5734dcc5c2dSMatthew Dillon if (n == 0) { 5744dcc5c2dSMatthew Dillon swp_pager_meta_free(object, beg, start - beg); 5754dcc5c2dSMatthew Dillon splx(s); 5764dcc5c2dSMatthew Dillon return(-1); 5774dcc5c2dSMatthew Dillon } 5784dcc5c2dSMatthew Dillon } 5794dcc5c2dSMatthew Dillon } 5804dcc5c2dSMatthew Dillon swp_pager_meta_build(object, start, blk); 5814dcc5c2dSMatthew Dillon --size; 5824dcc5c2dSMatthew Dillon ++start; 5834dcc5c2dSMatthew Dillon ++blk; 5844dcc5c2dSMatthew Dillon --n; 5854dcc5c2dSMatthew Dillon } 5864dcc5c2dSMatthew Dillon swp_pager_meta_free(object, start, n); 5874dcc5c2dSMatthew Dillon splx(s); 5884dcc5c2dSMatthew Dillon return(0); 58926f9a767SRodney W. Grimes } 59026f9a767SRodney W. Grimes 5910a47b48bSJohn Dyson /* 5921c7c3c6aSMatthew Dillon * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 5931c7c3c6aSMatthew Dillon * and destroy the source. 5941c7c3c6aSMatthew Dillon * 5951c7c3c6aSMatthew Dillon * Copy any valid swapblks from the source to the destination. In 5961c7c3c6aSMatthew Dillon * cases where both the source and destination have a valid swapblk, 5971c7c3c6aSMatthew Dillon * we keep the destination's. 5981c7c3c6aSMatthew Dillon * 5991c7c3c6aSMatthew Dillon * This routine is allowed to block. It may block allocating metadata 6001c7c3c6aSMatthew Dillon * indirectly through swp_pager_meta_build() or if paging is still in 6011c7c3c6aSMatthew Dillon * progress on the source. 6021c7c3c6aSMatthew Dillon * 6034dcc5c2dSMatthew Dillon * This routine can be called at any spl 6044dcc5c2dSMatthew Dillon * 6051c7c3c6aSMatthew Dillon * XXX vm_page_collapse() kinda expects us not to block because we 6061c7c3c6aSMatthew Dillon * supposedly do not need to allocate memory, but for the moment we 6071c7c3c6aSMatthew Dillon * *may* have to get a little memory from the zone allocator, but 6081c7c3c6aSMatthew Dillon * it is taken from the interrupt memory. We should be ok. 6091c7c3c6aSMatthew Dillon * 6101c7c3c6aSMatthew Dillon * The source object contains no vm_page_t's (which is just as well) 6111c7c3c6aSMatthew Dillon * 6121c7c3c6aSMatthew Dillon * The source object is of type OBJT_SWAP. 6131c7c3c6aSMatthew Dillon * 6144dcc5c2dSMatthew Dillon * The source and destination objects must be locked or 6154dcc5c2dSMatthew Dillon * inaccessible (XXX are they ?) 61626f9a767SRodney W. Grimes */ 61726f9a767SRodney W. Grimes 61826f9a767SRodney W. Grimes void 6191c7c3c6aSMatthew Dillon swap_pager_copy(srcobject, dstobject, offset, destroysource) 62024a1cce3SDavid Greenman vm_object_t srcobject; 62124a1cce3SDavid Greenman vm_object_t dstobject; 622a316d390SJohn Dyson vm_pindex_t offset; 623c0877f10SJohn Dyson int destroysource; 62426f9a767SRodney W. Grimes { 625a316d390SJohn Dyson vm_pindex_t i; 6264dcc5c2dSMatthew Dillon int s; 6274dcc5c2dSMatthew Dillon 6284dcc5c2dSMatthew Dillon s = splvm(); 62926f9a767SRodney W. Grimes 63026f9a767SRodney W. Grimes /* 6311c7c3c6aSMatthew Dillon * If destroysource is set, we remove the source object from the 6321c7c3c6aSMatthew Dillon * swap_pager internal queue now. 63326f9a767SRodney W. Grimes */ 6341c7c3c6aSMatthew Dillon 635cbd8ec09SJohn Dyson if (destroysource) { 63624a1cce3SDavid Greenman if (srcobject->handle == NULL) { 6371c7c3c6aSMatthew Dillon TAILQ_REMOVE( 6381c7c3c6aSMatthew Dillon &swap_pager_un_object_list, 6391c7c3c6aSMatthew Dillon srcobject, 6401c7c3c6aSMatthew Dillon pager_object_list 6411c7c3c6aSMatthew Dillon ); 64226f9a767SRodney W. Grimes } else { 6431c7c3c6aSMatthew Dillon TAILQ_REMOVE( 6441c7c3c6aSMatthew Dillon NOBJLIST(srcobject->handle), 6451c7c3c6aSMatthew Dillon srcobject, 6461c7c3c6aSMatthew Dillon pager_object_list 6471c7c3c6aSMatthew Dillon ); 64826f9a767SRodney W. Grimes } 649cbd8ec09SJohn Dyson } 65026f9a767SRodney W. Grimes 6511c7c3c6aSMatthew Dillon /* 6521c7c3c6aSMatthew Dillon * transfer source to destination. 6531c7c3c6aSMatthew Dillon */ 6541c7c3c6aSMatthew Dillon 6551c7c3c6aSMatthew Dillon for (i = 0; i < dstobject->size; ++i) { 6561c7c3c6aSMatthew Dillon daddr_t dstaddr; 6571c7c3c6aSMatthew Dillon 6581c7c3c6aSMatthew Dillon /* 6591c7c3c6aSMatthew Dillon * Locate (without changing) the swapblk on the destination, 6601c7c3c6aSMatthew Dillon * unless it is invalid in which case free it silently, or 6611c7c3c6aSMatthew Dillon * if the destination is a resident page, in which case the 6621c7c3c6aSMatthew Dillon * source is thrown away. 6631c7c3c6aSMatthew Dillon */ 6641c7c3c6aSMatthew Dillon 6651c7c3c6aSMatthew Dillon dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 6661c7c3c6aSMatthew Dillon 6671c7c3c6aSMatthew Dillon if (dstaddr == SWAPBLK_NONE) { 6681c7c3c6aSMatthew Dillon /* 6691c7c3c6aSMatthew Dillon * Destination has no swapblk and is not resident, 6701c7c3c6aSMatthew Dillon * copy source. 6711c7c3c6aSMatthew Dillon */ 6721c7c3c6aSMatthew Dillon daddr_t srcaddr; 6731c7c3c6aSMatthew Dillon 6741c7c3c6aSMatthew Dillon srcaddr = swp_pager_meta_ctl( 6751c7c3c6aSMatthew Dillon srcobject, 6761c7c3c6aSMatthew Dillon i + offset, 6771c7c3c6aSMatthew Dillon SWM_POP 6781c7c3c6aSMatthew Dillon ); 6791c7c3c6aSMatthew Dillon 6801c7c3c6aSMatthew Dillon if (srcaddr != SWAPBLK_NONE) 6814dcc5c2dSMatthew Dillon swp_pager_meta_build(dstobject, i, srcaddr); 6821c7c3c6aSMatthew Dillon } else { 6831c7c3c6aSMatthew Dillon /* 6841c7c3c6aSMatthew Dillon * Destination has valid swapblk or it is represented 6851c7c3c6aSMatthew Dillon * by a resident page. We destroy the sourceblock. 6861c7c3c6aSMatthew Dillon */ 6871c7c3c6aSMatthew Dillon 6881c7c3c6aSMatthew Dillon swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE); 6891c7c3c6aSMatthew Dillon } 69026f9a767SRodney W. Grimes } 69126f9a767SRodney W. Grimes 69226f9a767SRodney W. Grimes /* 6931c7c3c6aSMatthew Dillon * Free left over swap blocks in source. 6941c7c3c6aSMatthew Dillon * 6951c7c3c6aSMatthew Dillon * We have to revert the type to OBJT_DEFAULT so we do not accidently 6961c7c3c6aSMatthew Dillon * double-remove the object from the swap queues. 69726f9a767SRodney W. Grimes */ 69826f9a767SRodney W. Grimes 699c0877f10SJohn Dyson if (destroysource) { 7001c7c3c6aSMatthew Dillon swp_pager_meta_free_all(srcobject); 7011c7c3c6aSMatthew Dillon /* 7021c7c3c6aSMatthew Dillon * Reverting the type is not necessary, the caller is going 7031c7c3c6aSMatthew Dillon * to destroy srcobject directly, but I'm doing it here 704956f3135SPhilippe Charnier * for consistency since we've removed the object from its 7051c7c3c6aSMatthew Dillon * queues. 7061c7c3c6aSMatthew Dillon */ 7071c7c3c6aSMatthew Dillon srcobject->type = OBJT_DEFAULT; 708c0877f10SJohn Dyson } 7094dcc5c2dSMatthew Dillon splx(s); 71026f9a767SRodney W. Grimes } 71126f9a767SRodney W. Grimes 712df8bae1dSRodney W. Grimes /* 7131c7c3c6aSMatthew Dillon * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 7141c7c3c6aSMatthew Dillon * the requested page. 7151c7c3c6aSMatthew Dillon * 7161c7c3c6aSMatthew Dillon * We determine whether good backing store exists for the requested 7171c7c3c6aSMatthew Dillon * page and return TRUE if it does, FALSE if it doesn't. 7181c7c3c6aSMatthew Dillon * 7191c7c3c6aSMatthew Dillon * If TRUE, we also try to determine how much valid, contiguous backing 7201c7c3c6aSMatthew Dillon * store exists before and after the requested page within a reasonable 7211c7c3c6aSMatthew Dillon * distance. We do not try to restrict it to the swap device stripe 7221c7c3c6aSMatthew Dillon * (that is handled in getpages/putpages). It probably isn't worth 7231c7c3c6aSMatthew Dillon * doing here. 724df8bae1dSRodney W. Grimes */ 72526f9a767SRodney W. Grimes 7261c7c3c6aSMatthew Dillon boolean_t 727a316d390SJohn Dyson swap_pager_haspage(object, pindex, before, after) 72824a1cce3SDavid Greenman vm_object_t object; 729a316d390SJohn Dyson vm_pindex_t pindex; 73024a1cce3SDavid Greenman int *before; 73124a1cce3SDavid Greenman int *after; 73226f9a767SRodney W. Grimes { 7331c7c3c6aSMatthew Dillon daddr_t blk0; 73425db2c54SMatthew Dillon int s; 73526f9a767SRodney W. Grimes 7361c7c3c6aSMatthew Dillon /* 7371c7c3c6aSMatthew Dillon * do we have good backing store at the requested index ? 7381c7c3c6aSMatthew Dillon */ 7391c7c3c6aSMatthew Dillon 74025db2c54SMatthew Dillon s = splvm(); 7411c7c3c6aSMatthew Dillon blk0 = swp_pager_meta_ctl(object, pindex, 0); 7421c7c3c6aSMatthew Dillon 7434dcc5c2dSMatthew Dillon if (blk0 == SWAPBLK_NONE) { 74425db2c54SMatthew Dillon splx(s); 7451c7c3c6aSMatthew Dillon if (before) 74624a1cce3SDavid Greenman *before = 0; 7471c7c3c6aSMatthew Dillon if (after) 74824a1cce3SDavid Greenman *after = 0; 74926f9a767SRodney W. Grimes return (FALSE); 75026f9a767SRodney W. Grimes } 75126f9a767SRodney W. Grimes 75226f9a767SRodney W. Grimes /* 7531c7c3c6aSMatthew Dillon * find backwards-looking contiguous good backing store 754e47ed70bSJohn Dyson */ 755e47ed70bSJohn Dyson 7561c7c3c6aSMatthew Dillon if (before != NULL) { 75726f9a767SRodney W. Grimes int i; 7580d94caffSDavid Greenman 7591c7c3c6aSMatthew Dillon for (i = 1; i < (SWB_NPAGES/2); ++i) { 7601c7c3c6aSMatthew Dillon daddr_t blk; 7611c7c3c6aSMatthew Dillon 7621c7c3c6aSMatthew Dillon if (i > pindex) 7631c7c3c6aSMatthew Dillon break; 7641c7c3c6aSMatthew Dillon blk = swp_pager_meta_ctl(object, pindex - i, 0); 7651c7c3c6aSMatthew Dillon if (blk != blk0 - i) 7661c7c3c6aSMatthew Dillon break; 767ffc82b0aSJohn Dyson } 7681c7c3c6aSMatthew Dillon *before = (i - 1); 76926f9a767SRodney W. Grimes } 77026f9a767SRodney W. Grimes 77126f9a767SRodney W. Grimes /* 7721c7c3c6aSMatthew Dillon * find forward-looking contiguous good backing store 77326f9a767SRodney W. Grimes */ 7741c7c3c6aSMatthew Dillon 7751c7c3c6aSMatthew Dillon if (after != NULL) { 7761c7c3c6aSMatthew Dillon int i; 7771c7c3c6aSMatthew Dillon 7781c7c3c6aSMatthew Dillon for (i = 1; i < (SWB_NPAGES/2); ++i) { 7791c7c3c6aSMatthew Dillon daddr_t blk; 7801c7c3c6aSMatthew Dillon 7811c7c3c6aSMatthew Dillon blk = swp_pager_meta_ctl(object, pindex + i, 0); 7821c7c3c6aSMatthew Dillon if (blk != blk0 + i) 7831c7c3c6aSMatthew Dillon break; 78426f9a767SRodney W. Grimes } 7851c7c3c6aSMatthew Dillon *after = (i - 1); 7861c7c3c6aSMatthew Dillon } 78725db2c54SMatthew Dillon splx(s); 7881c7c3c6aSMatthew Dillon return (TRUE); 7891c7c3c6aSMatthew Dillon } 7901c7c3c6aSMatthew Dillon 7911c7c3c6aSMatthew Dillon /* 7921c7c3c6aSMatthew Dillon * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 7931c7c3c6aSMatthew Dillon * 7941c7c3c6aSMatthew Dillon * This removes any associated swap backing store, whether valid or 7951c7c3c6aSMatthew Dillon * not, from the page. 7961c7c3c6aSMatthew Dillon * 7971c7c3c6aSMatthew Dillon * This routine is typically called when a page is made dirty, at 7981c7c3c6aSMatthew Dillon * which point any associated swap can be freed. MADV_FREE also 7991c7c3c6aSMatthew Dillon * calls us in a special-case situation 8001c7c3c6aSMatthew Dillon * 8011c7c3c6aSMatthew Dillon * NOTE!!! If the page is clean and the swap was valid, the caller 8021c7c3c6aSMatthew Dillon * should make the page dirty before calling this routine. This routine 8031c7c3c6aSMatthew Dillon * does NOT change the m->dirty status of the page. Also: MADV_FREE 8041c7c3c6aSMatthew Dillon * depends on it. 8051c7c3c6aSMatthew Dillon * 8061c7c3c6aSMatthew Dillon * This routine may not block 8074dcc5c2dSMatthew Dillon * This routine must be called at splvm() 8081c7c3c6aSMatthew Dillon */ 8091c7c3c6aSMatthew Dillon 8101c7c3c6aSMatthew Dillon static void 8111c7c3c6aSMatthew Dillon swap_pager_unswapped(m) 8121c7c3c6aSMatthew Dillon vm_page_t m; 8131c7c3c6aSMatthew Dillon { 8141c7c3c6aSMatthew Dillon swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 8151c7c3c6aSMatthew Dillon } 8161c7c3c6aSMatthew Dillon 8171c7c3c6aSMatthew Dillon /* 818a5296b05SJulian Elischer * SWAP_PAGER_STRATEGY() - read, write, free blocks 819a5296b05SJulian Elischer * 820a5296b05SJulian Elischer * This implements the vm_pager_strategy() interface to swap and allows 821a5296b05SJulian Elischer * other parts of the system to directly access swap as backing store 822a5296b05SJulian Elischer * through vm_objects of type OBJT_SWAP. This is intended to be a 823a5296b05SJulian Elischer * cacheless interface ( i.e. caching occurs at higher levels ). 824a5296b05SJulian Elischer * Therefore we do not maintain any resident pages. All I/O goes 8254dcc5c2dSMatthew Dillon * directly to and from the swap device. 826a5296b05SJulian Elischer * 827a5296b05SJulian Elischer * Note that b_blkno is scaled for PAGE_SIZE 828a5296b05SJulian Elischer * 829a5296b05SJulian Elischer * We currently attempt to run I/O synchronously or asynchronously as 830a5296b05SJulian Elischer * the caller requests. This isn't perfect because we loose error 831a5296b05SJulian Elischer * sequencing when we run multiple ops in parallel to satisfy a request. 832a5296b05SJulian Elischer * But this is swap, so we let it all hang out. 833a5296b05SJulian Elischer */ 834a5296b05SJulian Elischer 835a5296b05SJulian Elischer static void 8360b441832SPoul-Henning Kamp swap_pager_strategy(vm_object_t object, struct bio *bp) 837a5296b05SJulian Elischer { 838a5296b05SJulian Elischer vm_pindex_t start; 839a5296b05SJulian Elischer int count; 8404dcc5c2dSMatthew Dillon int s; 841a5296b05SJulian Elischer char *data; 842a5296b05SJulian Elischer struct buf *nbp = NULL; 843a5296b05SJulian Elischer 8440b441832SPoul-Henning Kamp /* XXX: KASSERT instead ? */ 8450b441832SPoul-Henning Kamp if (bp->bio_bcount & PAGE_MASK) { 8460b441832SPoul-Henning Kamp bp->bio_error = EINVAL; 8470b441832SPoul-Henning Kamp bp->bio_flags |= BIO_ERROR; 8480b441832SPoul-Henning Kamp biodone(bp); 8490b441832SPoul-Henning Kamp printf("swap_pager_strategy: bp %p blk %d size %d, not page bounded\n", bp, (int)bp->bio_pblkno, (int)bp->bio_bcount); 850a5296b05SJulian Elischer return; 851a5296b05SJulian Elischer } 852a5296b05SJulian Elischer 853a5296b05SJulian Elischer /* 854a5296b05SJulian Elischer * Clear error indication, initialize page index, count, data pointer. 855a5296b05SJulian Elischer */ 856a5296b05SJulian Elischer 8570b441832SPoul-Henning Kamp bp->bio_error = 0; 8580b441832SPoul-Henning Kamp bp->bio_flags &= ~BIO_ERROR; 8590b441832SPoul-Henning Kamp bp->bio_resid = bp->bio_bcount; 860a5296b05SJulian Elischer 8610b441832SPoul-Henning Kamp start = bp->bio_pblkno; 8620b441832SPoul-Henning Kamp count = howmany(bp->bio_bcount, PAGE_SIZE); 8630b441832SPoul-Henning Kamp data = bp->bio_data; 864a5296b05SJulian Elischer 8654dcc5c2dSMatthew Dillon s = splvm(); 8664dcc5c2dSMatthew Dillon 867a5296b05SJulian Elischer /* 86821144e3bSPoul-Henning Kamp * Deal with BIO_DELETE 869a5296b05SJulian Elischer */ 870a5296b05SJulian Elischer 8710b441832SPoul-Henning Kamp if (bp->bio_cmd == BIO_DELETE) { 872a5296b05SJulian Elischer /* 873a5296b05SJulian Elischer * FREE PAGE(s) - destroy underlying swap that is no longer 874a5296b05SJulian Elischer * needed. 875a5296b05SJulian Elischer */ 876a5296b05SJulian Elischer swp_pager_meta_free(object, start, count); 877a5296b05SJulian Elischer splx(s); 8780b441832SPoul-Henning Kamp bp->bio_resid = 0; 8790b441832SPoul-Henning Kamp biodone(bp); 8804dcc5c2dSMatthew Dillon return; 8814dcc5c2dSMatthew Dillon } 8824dcc5c2dSMatthew Dillon 883a5296b05SJulian Elischer /* 8844dcc5c2dSMatthew Dillon * Execute read or write 885a5296b05SJulian Elischer */ 886a5296b05SJulian Elischer 887a5296b05SJulian Elischer while (count > 0) { 888a5296b05SJulian Elischer daddr_t blk; 889a5296b05SJulian Elischer 890a5296b05SJulian Elischer /* 8914dcc5c2dSMatthew Dillon * Obtain block. If block not found and writing, allocate a 8924dcc5c2dSMatthew Dillon * new block and build it into the object. 8934dcc5c2dSMatthew Dillon */ 8944dcc5c2dSMatthew Dillon 8954dcc5c2dSMatthew Dillon blk = swp_pager_meta_ctl(object, start, 0); 8960b441832SPoul-Henning Kamp if ((blk == SWAPBLK_NONE) && (bp->bio_cmd == BIO_WRITE)) { 8974dcc5c2dSMatthew Dillon blk = swp_pager_getswapspace(1); 8984dcc5c2dSMatthew Dillon if (blk == SWAPBLK_NONE) { 8990b441832SPoul-Henning Kamp bp->bio_error = ENOMEM; 9000b441832SPoul-Henning Kamp bp->bio_flags |= BIO_ERROR; 9014dcc5c2dSMatthew Dillon break; 9024dcc5c2dSMatthew Dillon } 9034dcc5c2dSMatthew Dillon swp_pager_meta_build(object, start, blk); 9044dcc5c2dSMatthew Dillon } 9054dcc5c2dSMatthew Dillon 9064dcc5c2dSMatthew Dillon /* 9074dcc5c2dSMatthew Dillon * Do we have to flush our current collection? Yes if: 9084dcc5c2dSMatthew Dillon * 9094dcc5c2dSMatthew Dillon * - no swap block at this index 9104dcc5c2dSMatthew Dillon * - swap block is not contiguous 9114dcc5c2dSMatthew Dillon * - we cross a physical disk boundry in the 9124dcc5c2dSMatthew Dillon * stripe. 913a5296b05SJulian Elischer */ 914a5296b05SJulian Elischer 915a5296b05SJulian Elischer if ( 9164dcc5c2dSMatthew Dillon nbp && (nbp->b_blkno + btoc(nbp->b_bcount) != blk || 9174dcc5c2dSMatthew Dillon ((nbp->b_blkno ^ blk) & dmmax_mask) 918a5296b05SJulian Elischer ) 919a5296b05SJulian Elischer ) { 9204dcc5c2dSMatthew Dillon splx(s); 9210b441832SPoul-Henning Kamp if (bp->bio_cmd == BIO_READ) { 922a5296b05SJulian Elischer ++cnt.v_swapin; 923a5296b05SJulian Elischer cnt.v_swappgsin += btoc(nbp->b_bcount); 9244dcc5c2dSMatthew Dillon } else { 9254dcc5c2dSMatthew Dillon ++cnt.v_swapout; 9264dcc5c2dSMatthew Dillon cnt.v_swappgsout += btoc(nbp->b_bcount); 9274dcc5c2dSMatthew Dillon nbp->b_dirtyend = nbp->b_bcount; 9284dcc5c2dSMatthew Dillon } 929a5296b05SJulian Elischer flushchainbuf(nbp); 9304dcc5c2dSMatthew Dillon s = splvm(); 931a5296b05SJulian Elischer nbp = NULL; 932a5296b05SJulian Elischer } 933a5296b05SJulian Elischer 934a5296b05SJulian Elischer /* 9354dcc5c2dSMatthew Dillon * Add new swapblk to nbp, instantiating nbp if necessary. 9364dcc5c2dSMatthew Dillon * Zero-fill reads are able to take a shortcut. 937a5296b05SJulian Elischer */ 9384dcc5c2dSMatthew Dillon 9394dcc5c2dSMatthew Dillon if (blk == SWAPBLK_NONE) { 9404dcc5c2dSMatthew Dillon /* 9414dcc5c2dSMatthew Dillon * We can only get here if we are reading. Since 9424dcc5c2dSMatthew Dillon * we are at splvm() we can safely modify b_resid, 9434dcc5c2dSMatthew Dillon * even if chain ops are in progress. 9444dcc5c2dSMatthew Dillon */ 945a5296b05SJulian Elischer bzero(data, PAGE_SIZE); 9460b441832SPoul-Henning Kamp bp->bio_resid -= PAGE_SIZE; 947a5296b05SJulian Elischer } else { 948a5296b05SJulian Elischer if (nbp == NULL) { 9490b441832SPoul-Henning Kamp nbp = getchainbuf(bp, swapdev_vp, B_ASYNC); 950a5296b05SJulian Elischer nbp->b_blkno = blk; 9514dcc5c2dSMatthew Dillon nbp->b_bcount = 0; 952a5296b05SJulian Elischer nbp->b_data = data; 953a5296b05SJulian Elischer } 954a5296b05SJulian Elischer nbp->b_bcount += PAGE_SIZE; 955a5296b05SJulian Elischer } 956a5296b05SJulian Elischer --count; 957a5296b05SJulian Elischer ++start; 958a5296b05SJulian Elischer data += PAGE_SIZE; 959a5296b05SJulian Elischer } 960a5296b05SJulian Elischer 961a5296b05SJulian Elischer /* 9624dcc5c2dSMatthew Dillon * Flush out last buffer 963a5296b05SJulian Elischer */ 964a5296b05SJulian Elischer 965a5296b05SJulian Elischer splx(s); 966a5296b05SJulian Elischer 967a5296b05SJulian Elischer if (nbp) { 96821144e3bSPoul-Henning Kamp if (nbp->b_iocmd == BIO_READ) { 969a5296b05SJulian Elischer ++cnt.v_swapin; 970a5296b05SJulian Elischer cnt.v_swappgsin += btoc(nbp->b_bcount); 971a5296b05SJulian Elischer } else { 972a5296b05SJulian Elischer ++cnt.v_swapout; 973a5296b05SJulian Elischer cnt.v_swappgsout += btoc(nbp->b_bcount); 9744dcc5c2dSMatthew Dillon nbp->b_dirtyend = nbp->b_bcount; 975a5296b05SJulian Elischer } 976a5296b05SJulian Elischer flushchainbuf(nbp); 9774dcc5c2dSMatthew Dillon /* nbp = NULL; */ 978a5296b05SJulian Elischer } 9794dcc5c2dSMatthew Dillon 9804dcc5c2dSMatthew Dillon /* 9814dcc5c2dSMatthew Dillon * Wait for completion. 9824dcc5c2dSMatthew Dillon */ 9834dcc5c2dSMatthew Dillon 984a5296b05SJulian Elischer waitchainbuf(bp, 0, 1); 985a5296b05SJulian Elischer } 986a5296b05SJulian Elischer 987a5296b05SJulian Elischer /* 9881c7c3c6aSMatthew Dillon * SWAP_PAGER_GETPAGES() - bring pages in from swap 9891c7c3c6aSMatthew Dillon * 9901c7c3c6aSMatthew Dillon * Attempt to retrieve (m, count) pages from backing store, but make 9911c7c3c6aSMatthew Dillon * sure we retrieve at least m[reqpage]. We try to load in as large 9921c7c3c6aSMatthew Dillon * a chunk surrounding m[reqpage] as is contiguous in swap and which 9931c7c3c6aSMatthew Dillon * belongs to the same object. 9941c7c3c6aSMatthew Dillon * 9951c7c3c6aSMatthew Dillon * The code is designed for asynchronous operation and 9961c7c3c6aSMatthew Dillon * immediate-notification of 'reqpage' but tends not to be 9971c7c3c6aSMatthew Dillon * used that way. Please do not optimize-out this algorithmic 9981c7c3c6aSMatthew Dillon * feature, I intend to improve on it in the future. 9991c7c3c6aSMatthew Dillon * 10001c7c3c6aSMatthew Dillon * The parent has a single vm_object_pip_add() reference prior to 10011c7c3c6aSMatthew Dillon * calling us and we should return with the same. 10021c7c3c6aSMatthew Dillon * 10031c7c3c6aSMatthew Dillon * The parent has BUSY'd the pages. We should return with 'm' 10041c7c3c6aSMatthew Dillon * left busy, but the others adjusted. 10051c7c3c6aSMatthew Dillon */ 100626f9a767SRodney W. Grimes 1007f708ef1bSPoul-Henning Kamp static int 100824a1cce3SDavid Greenman swap_pager_getpages(object, m, count, reqpage) 100924a1cce3SDavid Greenman vm_object_t object; 101026f9a767SRodney W. Grimes vm_page_t *m; 101126f9a767SRodney W. Grimes int count, reqpage; 1012df8bae1dSRodney W. Grimes { 10131c7c3c6aSMatthew Dillon struct buf *bp; 10141c7c3c6aSMatthew Dillon vm_page_t mreq; 10151c7c3c6aSMatthew Dillon int s; 101626f9a767SRodney W. Grimes int i; 101726f9a767SRodney W. Grimes int j; 10181c7c3c6aSMatthew Dillon daddr_t blk; 10191c7c3c6aSMatthew Dillon vm_offset_t kva; 10201c7c3c6aSMatthew Dillon vm_pindex_t lastpindex; 10210d94caffSDavid Greenman 10221c7c3c6aSMatthew Dillon mreq = m[reqpage]; 10231c7c3c6aSMatthew Dillon 10241c7c3c6aSMatthew Dillon if (mreq->object != object) { 10251c7c3c6aSMatthew Dillon panic("swap_pager_getpages: object mismatch %p/%p", 10261c7c3c6aSMatthew Dillon object, 10271c7c3c6aSMatthew Dillon mreq->object 10281c7c3c6aSMatthew Dillon ); 102926f9a767SRodney W. Grimes } 10301c7c3c6aSMatthew Dillon /* 10311c7c3c6aSMatthew Dillon * Calculate range to retrieve. The pages have already been assigned 10321c7c3c6aSMatthew Dillon * their swapblks. We require a *contiguous* range that falls entirely 10331c7c3c6aSMatthew Dillon * within a single device stripe. If we do not supply it, bad things 10344dcc5c2dSMatthew Dillon * happen. Note that blk, iblk & jblk can be SWAPBLK_NONE, but the 10354dcc5c2dSMatthew Dillon * loops are set up such that the case(s) are handled implicitly. 10364dcc5c2dSMatthew Dillon * 10374dcc5c2dSMatthew Dillon * The swp_*() calls must be made at splvm(). vm_page_free() does 10384dcc5c2dSMatthew Dillon * not need to be, but it will go a little faster if it is. 10391c7c3c6aSMatthew Dillon */ 10401c7c3c6aSMatthew Dillon 10414dcc5c2dSMatthew Dillon s = splvm(); 10421c7c3c6aSMatthew Dillon blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 10431c7c3c6aSMatthew Dillon 10441c7c3c6aSMatthew Dillon for (i = reqpage - 1; i >= 0; --i) { 10451c7c3c6aSMatthew Dillon daddr_t iblk; 10461c7c3c6aSMatthew Dillon 10471c7c3c6aSMatthew Dillon iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0); 10481c7c3c6aSMatthew Dillon if (blk != iblk + (reqpage - i)) 104926f9a767SRodney W. Grimes break; 10504dcc5c2dSMatthew Dillon if ((blk ^ iblk) & dmmax_mask) 10514dcc5c2dSMatthew Dillon break; 105226f9a767SRodney W. Grimes } 10531c7c3c6aSMatthew Dillon ++i; 10541c7c3c6aSMatthew Dillon 10551c7c3c6aSMatthew Dillon for (j = reqpage + 1; j < count; ++j) { 10561c7c3c6aSMatthew Dillon daddr_t jblk; 10571c7c3c6aSMatthew Dillon 10581c7c3c6aSMatthew Dillon jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0); 10591c7c3c6aSMatthew Dillon if (blk != jblk - (j - reqpage)) 10601c7c3c6aSMatthew Dillon break; 10614dcc5c2dSMatthew Dillon if ((blk ^ jblk) & dmmax_mask) 10624dcc5c2dSMatthew Dillon break; 10631c7c3c6aSMatthew Dillon } 10641c7c3c6aSMatthew Dillon 10651c7c3c6aSMatthew Dillon /* 10661c7c3c6aSMatthew Dillon * free pages outside our collection range. Note: we never free 10671c7c3c6aSMatthew Dillon * mreq, it must remain busy throughout. 10681c7c3c6aSMatthew Dillon */ 10691c7c3c6aSMatthew Dillon 10701c7c3c6aSMatthew Dillon { 10711c7c3c6aSMatthew Dillon int k; 10721c7c3c6aSMatthew Dillon 10734dcc5c2dSMatthew Dillon for (k = 0; k < i; ++k) 10744dcc5c2dSMatthew Dillon vm_page_free(m[k]); 10754dcc5c2dSMatthew Dillon for (k = j; k < count; ++k) 10761c7c3c6aSMatthew Dillon vm_page_free(m[k]); 10771c7c3c6aSMatthew Dillon } 10784dcc5c2dSMatthew Dillon splx(s); 10794dcc5c2dSMatthew Dillon 10801c7c3c6aSMatthew Dillon 10811c7c3c6aSMatthew Dillon /* 10824dcc5c2dSMatthew Dillon * Return VM_PAGER_FAIL if we have nothing to do. Return mreq 10834dcc5c2dSMatthew Dillon * still busy, but the others unbusied. 10841c7c3c6aSMatthew Dillon */ 10851c7c3c6aSMatthew Dillon 10864dcc5c2dSMatthew Dillon if (blk == SWAPBLK_NONE) 108726f9a767SRodney W. Grimes return(VM_PAGER_FAIL); 1088df8bae1dSRodney W. Grimes 108916f62314SDavid Greenman /* 109016f62314SDavid Greenman * Get a swap buffer header to perform the IO 109116f62314SDavid Greenman */ 10921c7c3c6aSMatthew Dillon 10931c7c3c6aSMatthew Dillon bp = getpbuf(&nsw_rcount); 109416f62314SDavid Greenman kva = (vm_offset_t) bp->b_data; 109526f9a767SRodney W. Grimes 109616f62314SDavid Greenman /* 109716f62314SDavid Greenman * map our page(s) into kva for input 10981c7c3c6aSMatthew Dillon * 10991c7c3c6aSMatthew Dillon * NOTE: B_PAGING is set by pbgetvp() 110016f62314SDavid Greenman */ 110116f62314SDavid Greenman 11021c7c3c6aSMatthew Dillon pmap_qenter(kva, m + i, j - i); 11031c7c3c6aSMatthew Dillon 110421144e3bSPoul-Henning Kamp bp->b_iocmd = BIO_READ; 11051c7c3c6aSMatthew Dillon bp->b_iodone = swp_pager_async_iodone; 1106b0eeea20SPoul-Henning Kamp bp->b_rcred = bp->b_wcred = proc0.p_ucred; 1107a5296b05SJulian Elischer bp->b_data = (caddr_t) kva; 110826f9a767SRodney W. Grimes crhold(bp->b_rcred); 110926f9a767SRodney W. Grimes crhold(bp->b_wcred); 11101c7c3c6aSMatthew Dillon bp->b_blkno = blk - (reqpage - i); 11111c7c3c6aSMatthew Dillon bp->b_bcount = PAGE_SIZE * (j - i); 11121c7c3c6aSMatthew Dillon bp->b_bufsize = PAGE_SIZE * (j - i); 11131c7c3c6aSMatthew Dillon bp->b_pager.pg_reqpage = reqpage - i; 11141c7c3c6aSMatthew Dillon 11151c7c3c6aSMatthew Dillon { 11161c7c3c6aSMatthew Dillon int k; 11171c7c3c6aSMatthew Dillon 11181c7c3c6aSMatthew Dillon for (k = i; k < j; ++k) { 11191c7c3c6aSMatthew Dillon bp->b_pages[k - i] = m[k]; 11201c7c3c6aSMatthew Dillon vm_page_flag_set(m[k], PG_SWAPINPROG); 11211c7c3c6aSMatthew Dillon } 11221c7c3c6aSMatthew Dillon } 11231c7c3c6aSMatthew Dillon bp->b_npages = j - i; 112426f9a767SRodney W. Grimes 11250d94caffSDavid Greenman pbgetvp(swapdev_vp, bp); 1126df8bae1dSRodney W. Grimes 1127976e77fcSDavid Greenman cnt.v_swapin++; 11281c7c3c6aSMatthew Dillon cnt.v_swappgsin += bp->b_npages; 11291c7c3c6aSMatthew Dillon 1130df8bae1dSRodney W. Grimes /* 11311c7c3c6aSMatthew Dillon * We still hold the lock on mreq, and our automatic completion routine 11321c7c3c6aSMatthew Dillon * does not remove it. 1133df8bae1dSRodney W. Grimes */ 11341c7c3c6aSMatthew Dillon 11351c7c3c6aSMatthew Dillon vm_object_pip_add(mreq->object, bp->b_npages); 11361c7c3c6aSMatthew Dillon lastpindex = m[j-1]->pindex; 11371c7c3c6aSMatthew Dillon 11381c7c3c6aSMatthew Dillon /* 11391c7c3c6aSMatthew Dillon * perform the I/O. NOTE!!! bp cannot be considered valid after 11401c7c3c6aSMatthew Dillon * this point because we automatically release it on completion. 11411c7c3c6aSMatthew Dillon * Instead, we look at the one page we are interested in which we 11421c7c3c6aSMatthew Dillon * still hold a lock on even through the I/O completion. 11431c7c3c6aSMatthew Dillon * 11441c7c3c6aSMatthew Dillon * The other pages in our m[] array are also released on completion, 11451c7c3c6aSMatthew Dillon * so we cannot assume they are valid anymore either. 11461c7c3c6aSMatthew Dillon * 1147ea3aecf5SPeter Wemm * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 11481c7c3c6aSMatthew Dillon */ 11491c7c3c6aSMatthew Dillon 1150b890cb2cSPeter Wemm BUF_KERNPROC(bp); 1151b99c307aSPoul-Henning Kamp BUF_STRATEGY(bp); 115226f9a767SRodney W. Grimes 115326f9a767SRodney W. Grimes /* 11541c7c3c6aSMatthew Dillon * wait for the page we want to complete. PG_SWAPINPROG is always 11551c7c3c6aSMatthew Dillon * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 11561c7c3c6aSMatthew Dillon * is set in the meta-data. 115726f9a767SRodney W. Grimes */ 11581b119d9dSDavid Greenman 11591c7c3c6aSMatthew Dillon s = splvm(); 11601c7c3c6aSMatthew Dillon 11611c7c3c6aSMatthew Dillon while ((mreq->flags & PG_SWAPINPROG) != 0) { 11621c7c3c6aSMatthew Dillon vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED); 11631c7c3c6aSMatthew Dillon cnt.v_intrans++; 11641c7c3c6aSMatthew Dillon if (tsleep(mreq, PSWP, "swread", hz*20)) { 1165ac1e407bSBruce Evans printf( 11661c7c3c6aSMatthew Dillon "swap_pager: indefinite wait buffer: device:" 1167af647ddeSBruce Evans " %s, blkno: %ld, size: %ld\n", 1168af647ddeSBruce Evans devtoname(bp->b_dev), (long)bp->b_blkno, 1169af647ddeSBruce Evans bp->b_bcount 11701c7c3c6aSMatthew Dillon ); 11711c7c3c6aSMatthew Dillon } 11721b119d9dSDavid Greenman } 117326f9a767SRodney W. Grimes 1174df8bae1dSRodney W. Grimes splx(s); 117526f9a767SRodney W. Grimes 117626f9a767SRodney W. Grimes /* 11771c7c3c6aSMatthew Dillon * mreq is left bussied after completion, but all the other pages 11781c7c3c6aSMatthew Dillon * are freed. If we had an unrecoverable read error the page will 11791c7c3c6aSMatthew Dillon * not be valid. 118026f9a767SRodney W. Grimes */ 118126f9a767SRodney W. Grimes 11821c7c3c6aSMatthew Dillon if (mreq->valid != VM_PAGE_BITS_ALL) { 11831c7c3c6aSMatthew Dillon return(VM_PAGER_ERROR); 118426f9a767SRodney W. Grimes } else { 11851c7c3c6aSMatthew Dillon return(VM_PAGER_OK); 118626f9a767SRodney W. Grimes } 11871c7c3c6aSMatthew Dillon 11881c7c3c6aSMatthew Dillon /* 11891c7c3c6aSMatthew Dillon * A final note: in a low swap situation, we cannot deallocate swap 11901c7c3c6aSMatthew Dillon * and mark a page dirty here because the caller is likely to mark 11911c7c3c6aSMatthew Dillon * the page clean when we return, causing the page to possibly revert 11921c7c3c6aSMatthew Dillon * to all-zero's later. 11931c7c3c6aSMatthew Dillon */ 1194df8bae1dSRodney W. Grimes } 1195df8bae1dSRodney W. Grimes 11961c7c3c6aSMatthew Dillon /* 11971c7c3c6aSMatthew Dillon * swap_pager_putpages: 11981c7c3c6aSMatthew Dillon * 11991c7c3c6aSMatthew Dillon * Assign swap (if necessary) and initiate I/O on the specified pages. 12001c7c3c6aSMatthew Dillon * 12011c7c3c6aSMatthew Dillon * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 12021c7c3c6aSMatthew Dillon * are automatically converted to SWAP objects. 12031c7c3c6aSMatthew Dillon * 1204ea3aecf5SPeter Wemm * In a low memory situation we may block in VOP_STRATEGY(), but the new 12051c7c3c6aSMatthew Dillon * vm_page reservation system coupled with properly written VFS devices 12061c7c3c6aSMatthew Dillon * should ensure that no low-memory deadlock occurs. This is an area 12071c7c3c6aSMatthew Dillon * which needs work. 12081c7c3c6aSMatthew Dillon * 12091c7c3c6aSMatthew Dillon * The parent has N vm_object_pip_add() references prior to 12101c7c3c6aSMatthew Dillon * calling us and will remove references for rtvals[] that are 12111c7c3c6aSMatthew Dillon * not set to VM_PAGER_PEND. We need to remove the rest on I/O 12121c7c3c6aSMatthew Dillon * completion. 12131c7c3c6aSMatthew Dillon * 12141c7c3c6aSMatthew Dillon * The parent has soft-busy'd the pages it passes us and will unbusy 12151c7c3c6aSMatthew Dillon * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 12161c7c3c6aSMatthew Dillon * We need to unbusy the rest on I/O completion. 12171c7c3c6aSMatthew Dillon */ 12181c7c3c6aSMatthew Dillon 1219e4542174SMatthew Dillon void 122024a1cce3SDavid Greenman swap_pager_putpages(object, m, count, sync, rtvals) 122124a1cce3SDavid Greenman vm_object_t object; 122226f9a767SRodney W. Grimes vm_page_t *m; 122326f9a767SRodney W. Grimes int count; 122424a1cce3SDavid Greenman boolean_t sync; 122526f9a767SRodney W. Grimes int *rtvals; 1226df8bae1dSRodney W. Grimes { 12271c7c3c6aSMatthew Dillon int i; 12281c7c3c6aSMatthew Dillon int n = 0; 1229df8bae1dSRodney W. Grimes 12301c7c3c6aSMatthew Dillon if (count && m[0]->object != object) { 12311c7c3c6aSMatthew Dillon panic("swap_pager_getpages: object mismatch %p/%p", 12321c7c3c6aSMatthew Dillon object, 12331c7c3c6aSMatthew Dillon m[0]->object 12341c7c3c6aSMatthew Dillon ); 12351c7c3c6aSMatthew Dillon } 12361c7c3c6aSMatthew Dillon /* 12371c7c3c6aSMatthew Dillon * Step 1 12381c7c3c6aSMatthew Dillon * 12391c7c3c6aSMatthew Dillon * Turn object into OBJT_SWAP 12401c7c3c6aSMatthew Dillon * check for bogus sysops 12411c7c3c6aSMatthew Dillon * force sync if not pageout process 12421c7c3c6aSMatthew Dillon */ 1243e736cd05SJohn Dyson 12444dcc5c2dSMatthew Dillon if (object->type != OBJT_SWAP) 12454dcc5c2dSMatthew Dillon swp_pager_meta_build(object, 0, SWAPBLK_NONE); 1246e47ed70bSJohn Dyson 1247e47ed70bSJohn Dyson if (curproc != pageproc) 1248e47ed70bSJohn Dyson sync = TRUE; 124926f9a767SRodney W. Grimes 12501c7c3c6aSMatthew Dillon /* 12511c7c3c6aSMatthew Dillon * Step 2 12521c7c3c6aSMatthew Dillon * 1253ad3cce20SMatthew Dillon * Update nsw parameters from swap_async_max sysctl values. 1254ad3cce20SMatthew Dillon * Do not let the sysop crash the machine with bogus numbers. 1255327f4e83SMatthew Dillon */ 1256327f4e83SMatthew Dillon 1257327f4e83SMatthew Dillon if (swap_async_max != nsw_wcount_async_max) { 1258327f4e83SMatthew Dillon int n; 1259327f4e83SMatthew Dillon int s; 1260327f4e83SMatthew Dillon 1261327f4e83SMatthew Dillon /* 1262327f4e83SMatthew Dillon * limit range 1263327f4e83SMatthew Dillon */ 1264327f4e83SMatthew Dillon if ((n = swap_async_max) > nswbuf / 2) 1265327f4e83SMatthew Dillon n = nswbuf / 2; 1266327f4e83SMatthew Dillon if (n < 1) 1267327f4e83SMatthew Dillon n = 1; 1268327f4e83SMatthew Dillon swap_async_max = n; 1269327f4e83SMatthew Dillon 1270327f4e83SMatthew Dillon /* 1271327f4e83SMatthew Dillon * Adjust difference ( if possible ). If the current async 1272327f4e83SMatthew Dillon * count is too low, we may not be able to make the adjustment 1273327f4e83SMatthew Dillon * at this time. 1274327f4e83SMatthew Dillon */ 1275327f4e83SMatthew Dillon s = splvm(); 1276327f4e83SMatthew Dillon n -= nsw_wcount_async_max; 1277327f4e83SMatthew Dillon if (nsw_wcount_async + n >= 0) { 1278327f4e83SMatthew Dillon nsw_wcount_async += n; 1279327f4e83SMatthew Dillon nsw_wcount_async_max += n; 1280327f4e83SMatthew Dillon wakeup(&nsw_wcount_async); 1281327f4e83SMatthew Dillon } 1282327f4e83SMatthew Dillon splx(s); 1283327f4e83SMatthew Dillon } 1284327f4e83SMatthew Dillon 1285327f4e83SMatthew Dillon /* 1286327f4e83SMatthew Dillon * Step 3 1287327f4e83SMatthew Dillon * 12881c7c3c6aSMatthew Dillon * Assign swap blocks and issue I/O. We reallocate swap on the fly. 12891c7c3c6aSMatthew Dillon * The page is left dirty until the pageout operation completes 12901c7c3c6aSMatthew Dillon * successfully. 12911c7c3c6aSMatthew Dillon */ 129226f9a767SRodney W. Grimes 12931c7c3c6aSMatthew Dillon for (i = 0; i < count; i += n) { 12941c7c3c6aSMatthew Dillon int s; 12951c7c3c6aSMatthew Dillon int j; 12961c7c3c6aSMatthew Dillon struct buf *bp; 1297a316d390SJohn Dyson daddr_t blk; 129826f9a767SRodney W. Grimes 1299df8bae1dSRodney W. Grimes /* 13001c7c3c6aSMatthew Dillon * Maximum I/O size is limited by a number of factors. 1301df8bae1dSRodney W. Grimes */ 130226f9a767SRodney W. Grimes 13031c7c3c6aSMatthew Dillon n = min(BLIST_MAX_ALLOC, count - i); 1304327f4e83SMatthew Dillon n = min(n, nsw_cluster_max); 13051c7c3c6aSMatthew Dillon 13064dcc5c2dSMatthew Dillon s = splvm(); 13074dcc5c2dSMatthew Dillon 130826f9a767SRodney W. Grimes /* 13091c7c3c6aSMatthew Dillon * Get biggest block of swap we can. If we fail, fall 13101c7c3c6aSMatthew Dillon * back and try to allocate a smaller block. Don't go 13111c7c3c6aSMatthew Dillon * overboard trying to allocate space if it would overly 13121c7c3c6aSMatthew Dillon * fragment swap. 131326f9a767SRodney W. Grimes */ 13141c7c3c6aSMatthew Dillon while ( 13151c7c3c6aSMatthew Dillon (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE && 13161c7c3c6aSMatthew Dillon n > 4 13171c7c3c6aSMatthew Dillon ) { 13181c7c3c6aSMatthew Dillon n >>= 1; 131926f9a767SRodney W. Grimes } 13201c7c3c6aSMatthew Dillon if (blk == SWAPBLK_NONE) { 13214dcc5c2dSMatthew Dillon for (j = 0; j < n; ++j) 13221c7c3c6aSMatthew Dillon rtvals[i+j] = VM_PAGER_FAIL; 13234dcc5c2dSMatthew Dillon splx(s); 13241c7c3c6aSMatthew Dillon continue; 132526f9a767SRodney W. Grimes } 132626f9a767SRodney W. Grimes 132726f9a767SRodney W. Grimes /* 13284dcc5c2dSMatthew Dillon * The I/O we are constructing cannot cross a physical 13294dcc5c2dSMatthew Dillon * disk boundry in the swap stripe. Note: we are still 13304dcc5c2dSMatthew Dillon * at splvm(). 133126f9a767SRodney W. Grimes */ 13321c7c3c6aSMatthew Dillon if ((blk ^ (blk + n)) & dmmax_mask) { 13331c7c3c6aSMatthew Dillon j = ((blk + dmmax) & dmmax_mask) - blk; 13341c7c3c6aSMatthew Dillon swp_pager_freeswapspace(blk + j, n - j); 13351c7c3c6aSMatthew Dillon n = j; 1336e47ed70bSJohn Dyson } 133726f9a767SRodney W. Grimes 133826f9a767SRodney W. Grimes /* 13391c7c3c6aSMatthew Dillon * All I/O parameters have been satisfied, build the I/O 13401c7c3c6aSMatthew Dillon * request and assign the swap space. 13411c7c3c6aSMatthew Dillon * 13421c7c3c6aSMatthew Dillon * NOTE: B_PAGING is set by pbgetvp() 134326f9a767SRodney W. Grimes */ 134426f9a767SRodney W. Grimes 1345327f4e83SMatthew Dillon if (sync == TRUE) { 1346327f4e83SMatthew Dillon bp = getpbuf(&nsw_wcount_sync); 1347327f4e83SMatthew Dillon } else { 1348327f4e83SMatthew Dillon bp = getpbuf(&nsw_wcount_async); 134921144e3bSPoul-Henning Kamp bp->b_flags = B_ASYNC; 1350327f4e83SMatthew Dillon } 1351912e4ae9SPoul-Henning Kamp bp->b_iocmd = BIO_WRITE; 13521c7c3c6aSMatthew Dillon bp->b_spc = NULL; /* not used, but NULL-out anyway */ 135326f9a767SRodney W. Grimes 13541c7c3c6aSMatthew Dillon pmap_qenter((vm_offset_t)bp->b_data, &m[i], n); 13551c7c3c6aSMatthew Dillon 1356b0eeea20SPoul-Henning Kamp bp->b_rcred = bp->b_wcred = proc0.p_ucred; 13571c7c3c6aSMatthew Dillon bp->b_bcount = PAGE_SIZE * n; 13581c7c3c6aSMatthew Dillon bp->b_bufsize = PAGE_SIZE * n; 13591c7c3c6aSMatthew Dillon bp->b_blkno = blk; 1360e47ed70bSJohn Dyson 1361a5296b05SJulian Elischer crhold(bp->b_rcred); 1362a5296b05SJulian Elischer crhold(bp->b_wcred); 1363a5296b05SJulian Elischer 1364a5296b05SJulian Elischer pbgetvp(swapdev_vp, bp); 1365a5296b05SJulian Elischer 13661c7c3c6aSMatthew Dillon for (j = 0; j < n; ++j) { 13671c7c3c6aSMatthew Dillon vm_page_t mreq = m[i+j]; 13681c7c3c6aSMatthew Dillon 13691c7c3c6aSMatthew Dillon swp_pager_meta_build( 13701c7c3c6aSMatthew Dillon mreq->object, 13711c7c3c6aSMatthew Dillon mreq->pindex, 13724dcc5c2dSMatthew Dillon blk + j 13731c7c3c6aSMatthew Dillon ); 13747dbf82dcSMatthew Dillon vm_page_dirty(mreq); 13751c7c3c6aSMatthew Dillon rtvals[i+j] = VM_PAGER_OK; 13761c7c3c6aSMatthew Dillon 13771c7c3c6aSMatthew Dillon vm_page_flag_set(mreq, PG_SWAPINPROG); 13781c7c3c6aSMatthew Dillon bp->b_pages[j] = mreq; 13791c7c3c6aSMatthew Dillon } 13801c7c3c6aSMatthew Dillon bp->b_npages = n; 1381a5296b05SJulian Elischer /* 1382a5296b05SJulian Elischer * Must set dirty range for NFS to work. 1383a5296b05SJulian Elischer */ 1384a5296b05SJulian Elischer bp->b_dirtyoff = 0; 1385a5296b05SJulian Elischer bp->b_dirtyend = bp->b_bcount; 13861c7c3c6aSMatthew Dillon 13871c7c3c6aSMatthew Dillon cnt.v_swapout++; 13881c7c3c6aSMatthew Dillon cnt.v_swappgsout += bp->b_npages; 138926f9a767SRodney W. Grimes swapdev_vp->v_numoutput++; 139026f9a767SRodney W. Grimes 13914dcc5c2dSMatthew Dillon splx(s); 13924dcc5c2dSMatthew Dillon 139326f9a767SRodney W. Grimes /* 13941c7c3c6aSMatthew Dillon * asynchronous 13951c7c3c6aSMatthew Dillon * 1396ea3aecf5SPeter Wemm * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 139726f9a767SRodney W. Grimes */ 1398e47ed70bSJohn Dyson 13991c7c3c6aSMatthew Dillon if (sync == FALSE) { 14001c7c3c6aSMatthew Dillon bp->b_iodone = swp_pager_async_iodone; 140167812eacSKirk McKusick BUF_KERNPROC(bp); 1402b99c307aSPoul-Henning Kamp BUF_STRATEGY(bp); 14031c7c3c6aSMatthew Dillon 14041c7c3c6aSMatthew Dillon for (j = 0; j < n; ++j) 14051c7c3c6aSMatthew Dillon rtvals[i+j] = VM_PAGER_PEND; 14061c7c3c6aSMatthew Dillon continue; 140726f9a767SRodney W. Grimes } 1408e47ed70bSJohn Dyson 140926f9a767SRodney W. Grimes /* 14101c7c3c6aSMatthew Dillon * synchronous 14111c7c3c6aSMatthew Dillon * 1412ea3aecf5SPeter Wemm * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 14131c7c3c6aSMatthew Dillon */ 14141c7c3c6aSMatthew Dillon 14151c7c3c6aSMatthew Dillon bp->b_iodone = swp_pager_sync_iodone; 1416b99c307aSPoul-Henning Kamp BUF_STRATEGY(bp); 14171c7c3c6aSMatthew Dillon 14181c7c3c6aSMatthew Dillon /* 14191c7c3c6aSMatthew Dillon * Wait for the sync I/O to complete, then update rtvals. 14201c7c3c6aSMatthew Dillon * We just set the rtvals[] to VM_PAGER_PEND so we can call 14211c7c3c6aSMatthew Dillon * our async completion routine at the end, thus avoiding a 14221c7c3c6aSMatthew Dillon * double-free. 142326f9a767SRodney W. Grimes */ 14244dcc5c2dSMatthew Dillon s = splbio(); 14254dcc5c2dSMatthew Dillon 142626f9a767SRodney W. Grimes while ((bp->b_flags & B_DONE) == 0) { 142724a1cce3SDavid Greenman tsleep(bp, PVM, "swwrt", 0); 142826f9a767SRodney W. Grimes } 1429e47ed70bSJohn Dyson 14301c7c3c6aSMatthew Dillon for (j = 0; j < n; ++j) 14311c7c3c6aSMatthew Dillon rtvals[i+j] = VM_PAGER_PEND; 143226f9a767SRodney W. Grimes 14331c7c3c6aSMatthew Dillon /* 14341c7c3c6aSMatthew Dillon * Now that we are through with the bp, we can call the 14351c7c3c6aSMatthew Dillon * normal async completion, which frees everything up. 14361c7c3c6aSMatthew Dillon */ 14371c7c3c6aSMatthew Dillon 14381c7c3c6aSMatthew Dillon swp_pager_async_iodone(bp); 143926f9a767SRodney W. Grimes 144026f9a767SRodney W. Grimes splx(s); 14411c7c3c6aSMatthew Dillon } 14421c7c3c6aSMatthew Dillon } 14431c7c3c6aSMatthew Dillon 14441c7c3c6aSMatthew Dillon /* 14451c7c3c6aSMatthew Dillon * swap_pager_sync_iodone: 14461c7c3c6aSMatthew Dillon * 14471c7c3c6aSMatthew Dillon * Completion routine for synchronous reads and writes from/to swap. 14481c7c3c6aSMatthew Dillon * We just mark the bp is complete and wake up anyone waiting on it. 14491c7c3c6aSMatthew Dillon * 14504dcc5c2dSMatthew Dillon * This routine may not block. This routine is called at splbio() or better. 14511c7c3c6aSMatthew Dillon */ 14521c7c3c6aSMatthew Dillon 14531c7c3c6aSMatthew Dillon static void 14541c7c3c6aSMatthew Dillon swp_pager_sync_iodone(bp) 14551c7c3c6aSMatthew Dillon struct buf *bp; 14561c7c3c6aSMatthew Dillon { 14571c7c3c6aSMatthew Dillon bp->b_flags |= B_DONE; 14581c7c3c6aSMatthew Dillon bp->b_flags &= ~B_ASYNC; 14591c7c3c6aSMatthew Dillon wakeup(bp); 14601c7c3c6aSMatthew Dillon } 14611c7c3c6aSMatthew Dillon 14621c7c3c6aSMatthew Dillon /* 14631c7c3c6aSMatthew Dillon * swp_pager_async_iodone: 14641c7c3c6aSMatthew Dillon * 14651c7c3c6aSMatthew Dillon * Completion routine for asynchronous reads and writes from/to swap. 14661c7c3c6aSMatthew Dillon * Also called manually by synchronous code to finish up a bp. 14671c7c3c6aSMatthew Dillon * 14681c7c3c6aSMatthew Dillon * For READ operations, the pages are PG_BUSY'd. For WRITE operations, 14691c7c3c6aSMatthew Dillon * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY 14701c7c3c6aSMatthew Dillon * unbusy all pages except the 'main' request page. For WRITE 14711c7c3c6aSMatthew Dillon * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 14721c7c3c6aSMatthew Dillon * because we marked them all VM_PAGER_PEND on return from putpages ). 14731c7c3c6aSMatthew Dillon * 14741c7c3c6aSMatthew Dillon * This routine may not block. 14754dcc5c2dSMatthew Dillon * This routine is called at splbio() or better 14764dcc5c2dSMatthew Dillon * 14774dcc5c2dSMatthew Dillon * We up ourselves to splvm() as required for various vm_page related 14784dcc5c2dSMatthew Dillon * calls. 14791c7c3c6aSMatthew Dillon */ 14801c7c3c6aSMatthew Dillon 14811c7c3c6aSMatthew Dillon static void 14821c7c3c6aSMatthew Dillon swp_pager_async_iodone(bp) 14831c7c3c6aSMatthew Dillon register struct buf *bp; 14841c7c3c6aSMatthew Dillon { 14851c7c3c6aSMatthew Dillon int s; 14861c7c3c6aSMatthew Dillon int i; 14871c7c3c6aSMatthew Dillon vm_object_t object = NULL; 14881c7c3c6aSMatthew Dillon 14891c7c3c6aSMatthew Dillon bp->b_flags |= B_DONE; 14901c7c3c6aSMatthew Dillon 14911c7c3c6aSMatthew Dillon /* 14921c7c3c6aSMatthew Dillon * report error 14931c7c3c6aSMatthew Dillon */ 14941c7c3c6aSMatthew Dillon 1495c244d2deSPoul-Henning Kamp if (bp->b_ioflags & BIO_ERROR) { 14961c7c3c6aSMatthew Dillon printf( 14971c7c3c6aSMatthew Dillon "swap_pager: I/O error - %s failed; blkno %ld," 14981c7c3c6aSMatthew Dillon "size %ld, error %d\n", 149921144e3bSPoul-Henning Kamp ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"), 15001c7c3c6aSMatthew Dillon (long)bp->b_blkno, 15011c7c3c6aSMatthew Dillon (long)bp->b_bcount, 15021c7c3c6aSMatthew Dillon bp->b_error 15031c7c3c6aSMatthew Dillon ); 15041c7c3c6aSMatthew Dillon } 15051c7c3c6aSMatthew Dillon 15061c7c3c6aSMatthew Dillon /* 15074dcc5c2dSMatthew Dillon * set object, raise to splvm(). 15081c7c3c6aSMatthew Dillon */ 15091c7c3c6aSMatthew Dillon 15101c7c3c6aSMatthew Dillon if (bp->b_npages) 15111c7c3c6aSMatthew Dillon object = bp->b_pages[0]->object; 15124dcc5c2dSMatthew Dillon s = splvm(); 151326f9a767SRodney W. Grimes 151426f9a767SRodney W. Grimes /* 151526f9a767SRodney W. Grimes * remove the mapping for kernel virtual 151626f9a767SRodney W. Grimes */ 15171c7c3c6aSMatthew Dillon 15181c7c3c6aSMatthew Dillon pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); 151926f9a767SRodney W. Grimes 152026f9a767SRodney W. Grimes /* 15211c7c3c6aSMatthew Dillon * cleanup pages. If an error occurs writing to swap, we are in 15221c7c3c6aSMatthew Dillon * very serious trouble. If it happens to be a disk error, though, 15231c7c3c6aSMatthew Dillon * we may be able to recover by reassigning the swap later on. So 15241c7c3c6aSMatthew Dillon * in this case we remove the m->swapblk assignment for the page 15251c7c3c6aSMatthew Dillon * but do not free it in the rlist. The errornous block(s) are thus 15261c7c3c6aSMatthew Dillon * never reallocated as swap. Redirty the page and continue. 152726f9a767SRodney W. Grimes */ 152826f9a767SRodney W. Grimes 15291c7c3c6aSMatthew Dillon for (i = 0; i < bp->b_npages; ++i) { 15301c7c3c6aSMatthew Dillon vm_page_t m = bp->b_pages[i]; 1531e47ed70bSJohn Dyson 15321c7c3c6aSMatthew Dillon vm_page_flag_clear(m, PG_SWAPINPROG); 1533e47ed70bSJohn Dyson 1534c244d2deSPoul-Henning Kamp if (bp->b_ioflags & BIO_ERROR) { 1535ffc82b0aSJohn Dyson /* 15361c7c3c6aSMatthew Dillon * If an error occurs I'd love to throw the swapblk 15371c7c3c6aSMatthew Dillon * away without freeing it back to swapspace, so it 15381c7c3c6aSMatthew Dillon * can never be used again. But I can't from an 15391c7c3c6aSMatthew Dillon * interrupt. 1540ffc82b0aSJohn Dyson */ 15411c7c3c6aSMatthew Dillon 154221144e3bSPoul-Henning Kamp if (bp->b_iocmd == BIO_READ) { 15431c7c3c6aSMatthew Dillon /* 15441c7c3c6aSMatthew Dillon * When reading, reqpage needs to stay 15451c7c3c6aSMatthew Dillon * locked for the parent, but all other 15461c7c3c6aSMatthew Dillon * pages can be freed. We still want to 15471c7c3c6aSMatthew Dillon * wakeup the parent waiting on the page, 15481c7c3c6aSMatthew Dillon * though. ( also: pg_reqpage can be -1 and 15491c7c3c6aSMatthew Dillon * not match anything ). 15501c7c3c6aSMatthew Dillon * 15511c7c3c6aSMatthew Dillon * We have to wake specifically requested pages 15521c7c3c6aSMatthew Dillon * up too because we cleared PG_SWAPINPROG and 15531c7c3c6aSMatthew Dillon * someone may be waiting for that. 15541c7c3c6aSMatthew Dillon * 15551c7c3c6aSMatthew Dillon * NOTE: for reads, m->dirty will probably 1556956f3135SPhilippe Charnier * be overridden by the original caller of 15571c7c3c6aSMatthew Dillon * getpages so don't play cute tricks here. 15581c7c3c6aSMatthew Dillon * 1559279d7226SMatthew Dillon * XXX IT IS NOT LEGAL TO FREE THE PAGE HERE 1560279d7226SMatthew Dillon * AS THIS MESSES WITH object->memq, and it is 1561279d7226SMatthew Dillon * not legal to mess with object->memq from an 1562279d7226SMatthew Dillon * interrupt. 15631c7c3c6aSMatthew Dillon */ 15641c7c3c6aSMatthew Dillon 15651c7c3c6aSMatthew Dillon m->valid = 0; 15661c7c3c6aSMatthew Dillon vm_page_flag_clear(m, PG_ZERO); 15671c7c3c6aSMatthew Dillon 15681c7c3c6aSMatthew Dillon if (i != bp->b_pager.pg_reqpage) 15691c7c3c6aSMatthew Dillon vm_page_free(m); 15701c7c3c6aSMatthew Dillon else 15711c7c3c6aSMatthew Dillon vm_page_flash(m); 15721c7c3c6aSMatthew Dillon /* 15731c7c3c6aSMatthew Dillon * If i == bp->b_pager.pg_reqpage, do not wake 15741c7c3c6aSMatthew Dillon * the page up. The caller needs to. 15751c7c3c6aSMatthew Dillon */ 15761c7c3c6aSMatthew Dillon } else { 15771c7c3c6aSMatthew Dillon /* 15781c7c3c6aSMatthew Dillon * If a write error occurs, reactivate page 15791c7c3c6aSMatthew Dillon * so it doesn't clog the inactive list, 15801c7c3c6aSMatthew Dillon * then finish the I/O. 15811c7c3c6aSMatthew Dillon */ 15827dbf82dcSMatthew Dillon vm_page_dirty(m); 15831c7c3c6aSMatthew Dillon vm_page_activate(m); 15841c7c3c6aSMatthew Dillon vm_page_io_finish(m); 15851c7c3c6aSMatthew Dillon } 158621144e3bSPoul-Henning Kamp } else if (bp->b_iocmd == BIO_READ) { 15871c7c3c6aSMatthew Dillon /* 15881c7c3c6aSMatthew Dillon * For read success, clear dirty bits. Nobody should 15891c7c3c6aSMatthew Dillon * have this page mapped but don't take any chances, 15901c7c3c6aSMatthew Dillon * make sure the pmap modify bits are also cleared. 15911c7c3c6aSMatthew Dillon * 15921c7c3c6aSMatthew Dillon * NOTE: for reads, m->dirty will probably be 1593956f3135SPhilippe Charnier * overridden by the original caller of getpages so 15941c7c3c6aSMatthew Dillon * we cannot set them in order to free the underlying 15951c7c3c6aSMatthew Dillon * swap in a low-swap situation. I don't think we'd 15961c7c3c6aSMatthew Dillon * want to do that anyway, but it was an optimization 15971c7c3c6aSMatthew Dillon * that existed in the old swapper for a time before 15981c7c3c6aSMatthew Dillon * it got ripped out due to precisely this problem. 15991c7c3c6aSMatthew Dillon * 16001c7c3c6aSMatthew Dillon * clear PG_ZERO in page. 16011c7c3c6aSMatthew Dillon * 16021c7c3c6aSMatthew Dillon * If not the requested page then deactivate it. 16031c7c3c6aSMatthew Dillon * 16041c7c3c6aSMatthew Dillon * Note that the requested page, reqpage, is left 16051c7c3c6aSMatthew Dillon * busied, but we still have to wake it up. The 16061c7c3c6aSMatthew Dillon * other pages are released (unbusied) by 16071c7c3c6aSMatthew Dillon * vm_page_wakeup(). We do not set reqpage's 16081c7c3c6aSMatthew Dillon * valid bits here, it is up to the caller. 16091c7c3c6aSMatthew Dillon */ 16101c7c3c6aSMatthew Dillon 16110385347cSPeter Wemm pmap_clear_modify(m); 16121c7c3c6aSMatthew Dillon m->valid = VM_PAGE_BITS_ALL; 16132c28a105SAlan Cox vm_page_undirty(m); 16141c7c3c6aSMatthew Dillon vm_page_flag_clear(m, PG_ZERO); 16151c7c3c6aSMatthew Dillon 16161c7c3c6aSMatthew Dillon /* 16171c7c3c6aSMatthew Dillon * We have to wake specifically requested pages 16181c7c3c6aSMatthew Dillon * up too because we cleared PG_SWAPINPROG and 16191c7c3c6aSMatthew Dillon * could be waiting for it in getpages. However, 16201c7c3c6aSMatthew Dillon * be sure to not unbusy getpages specifically 16211c7c3c6aSMatthew Dillon * requested page - getpages expects it to be 16221c7c3c6aSMatthew Dillon * left busy. 16231c7c3c6aSMatthew Dillon */ 16241c7c3c6aSMatthew Dillon if (i != bp->b_pager.pg_reqpage) { 16251c7c3c6aSMatthew Dillon vm_page_deactivate(m); 16261c7c3c6aSMatthew Dillon vm_page_wakeup(m); 16271c7c3c6aSMatthew Dillon } else { 16281c7c3c6aSMatthew Dillon vm_page_flash(m); 16291c7c3c6aSMatthew Dillon } 16301c7c3c6aSMatthew Dillon } else { 16311c7c3c6aSMatthew Dillon /* 16321c7c3c6aSMatthew Dillon * For write success, clear the modify and dirty 16331c7c3c6aSMatthew Dillon * status, then finish the I/O ( which decrements the 16341c7c3c6aSMatthew Dillon * busy count and possibly wakes waiter's up ). 16351c7c3c6aSMatthew Dillon */ 16360385347cSPeter Wemm pmap_clear_modify(m); 1637c52e7044SAlan Cox vm_page_undirty(m); 16381c7c3c6aSMatthew Dillon vm_page_io_finish(m); 1639936524aaSMatthew Dillon if (!vm_page_count_severe() || !vm_page_try_to_cache(m)) 1640936524aaSMatthew Dillon vm_page_protect(m, VM_PROT_READ); 1641ffc82b0aSJohn Dyson } 1642df8bae1dSRodney W. Grimes } 164326f9a767SRodney W. Grimes 16441c7c3c6aSMatthew Dillon /* 16451c7c3c6aSMatthew Dillon * adjust pip. NOTE: the original parent may still have its own 16461c7c3c6aSMatthew Dillon * pip refs on the object. 16471c7c3c6aSMatthew Dillon */ 16480d94caffSDavid Greenman 16491c7c3c6aSMatthew Dillon if (object) 16501c7c3c6aSMatthew Dillon vm_object_pip_wakeupn(object, bp->b_npages); 165126f9a767SRodney W. Grimes 16521c7c3c6aSMatthew Dillon /* 16531c7c3c6aSMatthew Dillon * release the physical I/O buffer 16541c7c3c6aSMatthew Dillon */ 1655e47ed70bSJohn Dyson 1656327f4e83SMatthew Dillon relpbuf( 1657327f4e83SMatthew Dillon bp, 165821144e3bSPoul-Henning Kamp ((bp->b_iocmd == BIO_READ) ? &nsw_rcount : 1659327f4e83SMatthew Dillon ((bp->b_flags & B_ASYNC) ? 1660327f4e83SMatthew Dillon &nsw_wcount_async : 1661327f4e83SMatthew Dillon &nsw_wcount_sync 1662327f4e83SMatthew Dillon ) 1663327f4e83SMatthew Dillon ) 1664327f4e83SMatthew Dillon ); 166526f9a767SRodney W. Grimes splx(s); 166626f9a767SRodney W. Grimes } 16671c7c3c6aSMatthew Dillon 16681c7c3c6aSMatthew Dillon /************************************************************************ 16691c7c3c6aSMatthew Dillon * SWAP META DATA * 16701c7c3c6aSMatthew Dillon ************************************************************************ 16711c7c3c6aSMatthew Dillon * 16721c7c3c6aSMatthew Dillon * These routines manipulate the swap metadata stored in the 16734dcc5c2dSMatthew Dillon * OBJT_SWAP object. All swp_*() routines must be called at 16744dcc5c2dSMatthew Dillon * splvm() because swap can be freed up by the low level vm_page 16754dcc5c2dSMatthew Dillon * code which might be called from interrupts beyond what splbio() covers. 16761c7c3c6aSMatthew Dillon * 16774dcc5c2dSMatthew Dillon * Swap metadata is implemented with a global hash and not directly 16784dcc5c2dSMatthew Dillon * linked into the object. Instead the object simply contains 16794dcc5c2dSMatthew Dillon * appropriate tracking counters. 16801c7c3c6aSMatthew Dillon */ 16811c7c3c6aSMatthew Dillon 16821c7c3c6aSMatthew Dillon /* 16831c7c3c6aSMatthew Dillon * SWP_PAGER_HASH() - hash swap meta data 16841c7c3c6aSMatthew Dillon * 16854dcc5c2dSMatthew Dillon * This is an inline helper function which hashes the swapblk given 16861c7c3c6aSMatthew Dillon * the object and page index. It returns a pointer to a pointer 16871c7c3c6aSMatthew Dillon * to the object, or a pointer to a NULL pointer if it could not 16881c7c3c6aSMatthew Dillon * find a swapblk. 16894dcc5c2dSMatthew Dillon * 16904dcc5c2dSMatthew Dillon * This routine must be called at splvm(). 16911c7c3c6aSMatthew Dillon */ 16921c7c3c6aSMatthew Dillon 16931c7c3c6aSMatthew Dillon static __inline struct swblock ** 16944dcc5c2dSMatthew Dillon swp_pager_hash(vm_object_t object, vm_pindex_t index) 16951c7c3c6aSMatthew Dillon { 16961c7c3c6aSMatthew Dillon struct swblock **pswap; 16971c7c3c6aSMatthew Dillon struct swblock *swap; 16981c7c3c6aSMatthew Dillon 16991c7c3c6aSMatthew Dillon index &= ~SWAP_META_MASK; 1700af647ddeSBruce Evans pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask]; 17011c7c3c6aSMatthew Dillon 17021c7c3c6aSMatthew Dillon while ((swap = *pswap) != NULL) { 17031c7c3c6aSMatthew Dillon if (swap->swb_object == object && 17041c7c3c6aSMatthew Dillon swap->swb_index == index 17051c7c3c6aSMatthew Dillon ) { 17061c7c3c6aSMatthew Dillon break; 17071c7c3c6aSMatthew Dillon } 17081c7c3c6aSMatthew Dillon pswap = &swap->swb_hnext; 17091c7c3c6aSMatthew Dillon } 17101c7c3c6aSMatthew Dillon return(pswap); 17111c7c3c6aSMatthew Dillon } 17121c7c3c6aSMatthew Dillon 17131c7c3c6aSMatthew Dillon /* 17141c7c3c6aSMatthew Dillon * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 17151c7c3c6aSMatthew Dillon * 17161c7c3c6aSMatthew Dillon * We first convert the object to a swap object if it is a default 17171c7c3c6aSMatthew Dillon * object. 17181c7c3c6aSMatthew Dillon * 17191c7c3c6aSMatthew Dillon * The specified swapblk is added to the object's swap metadata. If 17201c7c3c6aSMatthew Dillon * the swapblk is not valid, it is freed instead. Any previously 17211c7c3c6aSMatthew Dillon * assigned swapblk is freed. 17224dcc5c2dSMatthew Dillon * 17234dcc5c2dSMatthew Dillon * This routine must be called at splvm(), except when used to convert 17244dcc5c2dSMatthew Dillon * an OBJT_DEFAULT object into an OBJT_SWAP object. 17254dcc5c2dSMatthew Dillon 17261c7c3c6aSMatthew Dillon */ 17271c7c3c6aSMatthew Dillon 17281c7c3c6aSMatthew Dillon static void 17291c7c3c6aSMatthew Dillon swp_pager_meta_build( 17301c7c3c6aSMatthew Dillon vm_object_t object, 17314dcc5c2dSMatthew Dillon vm_pindex_t index, 17324dcc5c2dSMatthew Dillon daddr_t swapblk 17331c7c3c6aSMatthew Dillon ) { 17341c7c3c6aSMatthew Dillon struct swblock *swap; 17351c7c3c6aSMatthew Dillon struct swblock **pswap; 17361c7c3c6aSMatthew Dillon 17371c7c3c6aSMatthew Dillon /* 17381c7c3c6aSMatthew Dillon * Convert default object to swap object if necessary 17391c7c3c6aSMatthew Dillon */ 17401c7c3c6aSMatthew Dillon 17411c7c3c6aSMatthew Dillon if (object->type != OBJT_SWAP) { 17421c7c3c6aSMatthew Dillon object->type = OBJT_SWAP; 17431c7c3c6aSMatthew Dillon object->un_pager.swp.swp_bcount = 0; 17441c7c3c6aSMatthew Dillon 17451c7c3c6aSMatthew Dillon if (object->handle != NULL) { 17461c7c3c6aSMatthew Dillon TAILQ_INSERT_TAIL( 17471c7c3c6aSMatthew Dillon NOBJLIST(object->handle), 17481c7c3c6aSMatthew Dillon object, 17491c7c3c6aSMatthew Dillon pager_object_list 17501c7c3c6aSMatthew Dillon ); 17511c7c3c6aSMatthew Dillon } else { 17521c7c3c6aSMatthew Dillon TAILQ_INSERT_TAIL( 17531c7c3c6aSMatthew Dillon &swap_pager_un_object_list, 17541c7c3c6aSMatthew Dillon object, 17551c7c3c6aSMatthew Dillon pager_object_list 17561c7c3c6aSMatthew Dillon ); 17571c7c3c6aSMatthew Dillon } 17581c7c3c6aSMatthew Dillon } 17591c7c3c6aSMatthew Dillon 17601c7c3c6aSMatthew Dillon /* 17611c7c3c6aSMatthew Dillon * Locate hash entry. If not found create, but if we aren't adding 17624dcc5c2dSMatthew Dillon * anything just return. If we run out of space in the map we wait 17634dcc5c2dSMatthew Dillon * and, since the hash table may have changed, retry. 17641c7c3c6aSMatthew Dillon */ 17651c7c3c6aSMatthew Dillon 17664dcc5c2dSMatthew Dillon retry: 17671c7c3c6aSMatthew Dillon pswap = swp_pager_hash(object, index); 17681c7c3c6aSMatthew Dillon 17691c7c3c6aSMatthew Dillon if ((swap = *pswap) == NULL) { 17701c7c3c6aSMatthew Dillon int i; 17711c7c3c6aSMatthew Dillon 17721c7c3c6aSMatthew Dillon if (swapblk == SWAPBLK_NONE) 17731c7c3c6aSMatthew Dillon return; 17741c7c3c6aSMatthew Dillon 17751c7c3c6aSMatthew Dillon swap = *pswap = zalloc(swap_zone); 17764dcc5c2dSMatthew Dillon if (swap == NULL) { 17774dcc5c2dSMatthew Dillon VM_WAIT; 17784dcc5c2dSMatthew Dillon goto retry; 17794dcc5c2dSMatthew Dillon } 17801c7c3c6aSMatthew Dillon swap->swb_hnext = NULL; 17811c7c3c6aSMatthew Dillon swap->swb_object = object; 17821c7c3c6aSMatthew Dillon swap->swb_index = index & ~SWAP_META_MASK; 17831c7c3c6aSMatthew Dillon swap->swb_count = 0; 17841c7c3c6aSMatthew Dillon 17851c7c3c6aSMatthew Dillon ++object->un_pager.swp.swp_bcount; 17861c7c3c6aSMatthew Dillon 17871c7c3c6aSMatthew Dillon for (i = 0; i < SWAP_META_PAGES; ++i) 17881c7c3c6aSMatthew Dillon swap->swb_pages[i] = SWAPBLK_NONE; 17891c7c3c6aSMatthew Dillon } 17901c7c3c6aSMatthew Dillon 17911c7c3c6aSMatthew Dillon /* 17921c7c3c6aSMatthew Dillon * Delete prior contents of metadata 17931c7c3c6aSMatthew Dillon */ 17941c7c3c6aSMatthew Dillon 17951c7c3c6aSMatthew Dillon index &= SWAP_META_MASK; 17961c7c3c6aSMatthew Dillon 17971c7c3c6aSMatthew Dillon if (swap->swb_pages[index] != SWAPBLK_NONE) { 17984dcc5c2dSMatthew Dillon swp_pager_freeswapspace(swap->swb_pages[index], 1); 17991c7c3c6aSMatthew Dillon --swap->swb_count; 18001c7c3c6aSMatthew Dillon } 18011c7c3c6aSMatthew Dillon 18021c7c3c6aSMatthew Dillon /* 18031c7c3c6aSMatthew Dillon * Enter block into metadata 18041c7c3c6aSMatthew Dillon */ 18051c7c3c6aSMatthew Dillon 18061c7c3c6aSMatthew Dillon swap->swb_pages[index] = swapblk; 18074dcc5c2dSMatthew Dillon if (swapblk != SWAPBLK_NONE) 18081c7c3c6aSMatthew Dillon ++swap->swb_count; 18091c7c3c6aSMatthew Dillon } 18101c7c3c6aSMatthew Dillon 18111c7c3c6aSMatthew Dillon /* 18121c7c3c6aSMatthew Dillon * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 18131c7c3c6aSMatthew Dillon * 18141c7c3c6aSMatthew Dillon * The requested range of blocks is freed, with any associated swap 18151c7c3c6aSMatthew Dillon * returned to the swap bitmap. 18161c7c3c6aSMatthew Dillon * 18171c7c3c6aSMatthew Dillon * This routine will free swap metadata structures as they are cleaned 18181c7c3c6aSMatthew Dillon * out. This routine does *NOT* operate on swap metadata associated 18191c7c3c6aSMatthew Dillon * with resident pages. 18201c7c3c6aSMatthew Dillon * 18211c7c3c6aSMatthew Dillon * This routine must be called at splvm() 18221c7c3c6aSMatthew Dillon */ 18231c7c3c6aSMatthew Dillon 18241c7c3c6aSMatthew Dillon static void 18254dcc5c2dSMatthew Dillon swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count) 18261c7c3c6aSMatthew Dillon { 18271c7c3c6aSMatthew Dillon if (object->type != OBJT_SWAP) 18281c7c3c6aSMatthew Dillon return; 18291c7c3c6aSMatthew Dillon 18301c7c3c6aSMatthew Dillon while (count > 0) { 18311c7c3c6aSMatthew Dillon struct swblock **pswap; 18321c7c3c6aSMatthew Dillon struct swblock *swap; 18331c7c3c6aSMatthew Dillon 18341c7c3c6aSMatthew Dillon pswap = swp_pager_hash(object, index); 18351c7c3c6aSMatthew Dillon 18361c7c3c6aSMatthew Dillon if ((swap = *pswap) != NULL) { 18371c7c3c6aSMatthew Dillon daddr_t v = swap->swb_pages[index & SWAP_META_MASK]; 18381c7c3c6aSMatthew Dillon 18391c7c3c6aSMatthew Dillon if (v != SWAPBLK_NONE) { 18401c7c3c6aSMatthew Dillon swp_pager_freeswapspace(v, 1); 18411c7c3c6aSMatthew Dillon swap->swb_pages[index & SWAP_META_MASK] = 18421c7c3c6aSMatthew Dillon SWAPBLK_NONE; 18431c7c3c6aSMatthew Dillon if (--swap->swb_count == 0) { 18441c7c3c6aSMatthew Dillon *pswap = swap->swb_hnext; 18451c7c3c6aSMatthew Dillon zfree(swap_zone, swap); 18461c7c3c6aSMatthew Dillon --object->un_pager.swp.swp_bcount; 18471c7c3c6aSMatthew Dillon } 18481c7c3c6aSMatthew Dillon } 18491c7c3c6aSMatthew Dillon --count; 18501c7c3c6aSMatthew Dillon ++index; 18511c7c3c6aSMatthew Dillon } else { 18524dcc5c2dSMatthew Dillon int n = SWAP_META_PAGES - (index & SWAP_META_MASK); 18531c7c3c6aSMatthew Dillon count -= n; 18541c7c3c6aSMatthew Dillon index += n; 18551c7c3c6aSMatthew Dillon } 18561c7c3c6aSMatthew Dillon } 18571c7c3c6aSMatthew Dillon } 18581c7c3c6aSMatthew Dillon 18591c7c3c6aSMatthew Dillon /* 18601c7c3c6aSMatthew Dillon * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 18611c7c3c6aSMatthew Dillon * 18621c7c3c6aSMatthew Dillon * This routine locates and destroys all swap metadata associated with 18631c7c3c6aSMatthew Dillon * an object. 18644dcc5c2dSMatthew Dillon * 18654dcc5c2dSMatthew Dillon * This routine must be called at splvm() 18661c7c3c6aSMatthew Dillon */ 18671c7c3c6aSMatthew Dillon 18681c7c3c6aSMatthew Dillon static void 18691c7c3c6aSMatthew Dillon swp_pager_meta_free_all(vm_object_t object) 18701c7c3c6aSMatthew Dillon { 18711c7c3c6aSMatthew Dillon daddr_t index = 0; 18721c7c3c6aSMatthew Dillon 18731c7c3c6aSMatthew Dillon if (object->type != OBJT_SWAP) 18741c7c3c6aSMatthew Dillon return; 18751c7c3c6aSMatthew Dillon 18761c7c3c6aSMatthew Dillon while (object->un_pager.swp.swp_bcount) { 18771c7c3c6aSMatthew Dillon struct swblock **pswap; 18781c7c3c6aSMatthew Dillon struct swblock *swap; 18791c7c3c6aSMatthew Dillon 18801c7c3c6aSMatthew Dillon pswap = swp_pager_hash(object, index); 18811c7c3c6aSMatthew Dillon if ((swap = *pswap) != NULL) { 18821c7c3c6aSMatthew Dillon int i; 18831c7c3c6aSMatthew Dillon 18841c7c3c6aSMatthew Dillon for (i = 0; i < SWAP_META_PAGES; ++i) { 18851c7c3c6aSMatthew Dillon daddr_t v = swap->swb_pages[i]; 18861c7c3c6aSMatthew Dillon if (v != SWAPBLK_NONE) { 18871c7c3c6aSMatthew Dillon --swap->swb_count; 18884dcc5c2dSMatthew Dillon swp_pager_freeswapspace(v, 1); 18891c7c3c6aSMatthew Dillon } 18901c7c3c6aSMatthew Dillon } 18911c7c3c6aSMatthew Dillon if (swap->swb_count != 0) 18921c7c3c6aSMatthew Dillon panic("swap_pager_meta_free_all: swb_count != 0"); 18931c7c3c6aSMatthew Dillon *pswap = swap->swb_hnext; 18941c7c3c6aSMatthew Dillon zfree(swap_zone, swap); 18951c7c3c6aSMatthew Dillon --object->un_pager.swp.swp_bcount; 18961c7c3c6aSMatthew Dillon } 18971c7c3c6aSMatthew Dillon index += SWAP_META_PAGES; 18981c7c3c6aSMatthew Dillon if (index > 0x20000000) 18991c7c3c6aSMatthew Dillon panic("swp_pager_meta_free_all: failed to locate all swap meta blocks"); 19001c7c3c6aSMatthew Dillon } 19011c7c3c6aSMatthew Dillon } 19021c7c3c6aSMatthew Dillon 19031c7c3c6aSMatthew Dillon /* 19041c7c3c6aSMatthew Dillon * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 19051c7c3c6aSMatthew Dillon * 19061c7c3c6aSMatthew Dillon * This routine is capable of looking up, popping, or freeing 19071c7c3c6aSMatthew Dillon * swapblk assignments in the swap meta data or in the vm_page_t. 19081c7c3c6aSMatthew Dillon * The routine typically returns the swapblk being looked-up, or popped, 19091c7c3c6aSMatthew Dillon * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 19101c7c3c6aSMatthew Dillon * was invalid. This routine will automatically free any invalid 19111c7c3c6aSMatthew Dillon * meta-data swapblks. 19121c7c3c6aSMatthew Dillon * 19131c7c3c6aSMatthew Dillon * It is not possible to store invalid swapblks in the swap meta data 19141c7c3c6aSMatthew Dillon * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 19151c7c3c6aSMatthew Dillon * 19161c7c3c6aSMatthew Dillon * When acting on a busy resident page and paging is in progress, we 19171c7c3c6aSMatthew Dillon * have to wait until paging is complete but otherwise can act on the 19181c7c3c6aSMatthew Dillon * busy page. 19191c7c3c6aSMatthew Dillon * 19204dcc5c2dSMatthew Dillon * This routine must be called at splvm(). 19211c7c3c6aSMatthew Dillon * 19224dcc5c2dSMatthew Dillon * SWM_FREE remove and free swap block from metadata 19231c7c3c6aSMatthew Dillon * SWM_POP remove from meta data but do not free.. pop it out 19241c7c3c6aSMatthew Dillon */ 19251c7c3c6aSMatthew Dillon 19261c7c3c6aSMatthew Dillon static daddr_t 19271c7c3c6aSMatthew Dillon swp_pager_meta_ctl( 19281c7c3c6aSMatthew Dillon vm_object_t object, 19291c7c3c6aSMatthew Dillon vm_pindex_t index, 19301c7c3c6aSMatthew Dillon int flags 19311c7c3c6aSMatthew Dillon ) { 19324dcc5c2dSMatthew Dillon struct swblock **pswap; 19334dcc5c2dSMatthew Dillon struct swblock *swap; 19344dcc5c2dSMatthew Dillon daddr_t r1; 19354dcc5c2dSMatthew Dillon 19361c7c3c6aSMatthew Dillon /* 19371c7c3c6aSMatthew Dillon * The meta data only exists of the object is OBJT_SWAP 19381c7c3c6aSMatthew Dillon * and even then might not be allocated yet. 19391c7c3c6aSMatthew Dillon */ 19401c7c3c6aSMatthew Dillon 19414dcc5c2dSMatthew Dillon if (object->type != OBJT_SWAP) 19421c7c3c6aSMatthew Dillon return(SWAPBLK_NONE); 19431c7c3c6aSMatthew Dillon 19444dcc5c2dSMatthew Dillon r1 = SWAPBLK_NONE; 19451c7c3c6aSMatthew Dillon pswap = swp_pager_hash(object, index); 19461c7c3c6aSMatthew Dillon 19471c7c3c6aSMatthew Dillon if ((swap = *pswap) != NULL) { 19484dcc5c2dSMatthew Dillon index &= SWAP_META_MASK; 19491c7c3c6aSMatthew Dillon r1 = swap->swb_pages[index]; 19501c7c3c6aSMatthew Dillon 19511c7c3c6aSMatthew Dillon if (r1 != SWAPBLK_NONE) { 19521c7c3c6aSMatthew Dillon if (flags & SWM_FREE) { 19534dcc5c2dSMatthew Dillon swp_pager_freeswapspace(r1, 1); 19541c7c3c6aSMatthew Dillon r1 = SWAPBLK_NONE; 19551c7c3c6aSMatthew Dillon } 19561c7c3c6aSMatthew Dillon if (flags & (SWM_FREE|SWM_POP)) { 19571c7c3c6aSMatthew Dillon swap->swb_pages[index] = SWAPBLK_NONE; 19581c7c3c6aSMatthew Dillon if (--swap->swb_count == 0) { 19591c7c3c6aSMatthew Dillon *pswap = swap->swb_hnext; 19601c7c3c6aSMatthew Dillon zfree(swap_zone, swap); 19611c7c3c6aSMatthew Dillon --object->un_pager.swp.swp_bcount; 19621c7c3c6aSMatthew Dillon } 19631c7c3c6aSMatthew Dillon } 19641c7c3c6aSMatthew Dillon } 19651c7c3c6aSMatthew Dillon } 19661c7c3c6aSMatthew Dillon return(r1); 19671c7c3c6aSMatthew Dillon } 19681c7c3c6aSMatthew Dillon 1969e4057dbdSPoul-Henning Kamp /******************************************************** 1970e4057dbdSPoul-Henning Kamp * CHAINING FUNCTIONS * 1971e4057dbdSPoul-Henning Kamp ******************************************************** 1972e4057dbdSPoul-Henning Kamp * 1973e4057dbdSPoul-Henning Kamp * These functions support recursion of I/O operations 1974e4057dbdSPoul-Henning Kamp * on bp's, typically by chaining one or more 'child' bp's 1975e4057dbdSPoul-Henning Kamp * to the parent. Synchronous, asynchronous, and semi-synchronous 1976e4057dbdSPoul-Henning Kamp * chaining is possible. 1977e4057dbdSPoul-Henning Kamp */ 1978e4057dbdSPoul-Henning Kamp 1979e4057dbdSPoul-Henning Kamp /* 1980e4057dbdSPoul-Henning Kamp * vm_pager_chain_iodone: 1981e4057dbdSPoul-Henning Kamp * 1982e4057dbdSPoul-Henning Kamp * io completion routine for child bp. Currently we fudge a bit 1983e4057dbdSPoul-Henning Kamp * on dealing with b_resid. Since users of these routines may issue 1984e4057dbdSPoul-Henning Kamp * multiple children simultaneously, sequencing of the error can be lost. 1985e4057dbdSPoul-Henning Kamp */ 1986e4057dbdSPoul-Henning Kamp 1987e4057dbdSPoul-Henning Kamp static void 1988e4057dbdSPoul-Henning Kamp vm_pager_chain_iodone(struct buf *nbp) 1989e4057dbdSPoul-Henning Kamp { 19900b441832SPoul-Henning Kamp struct bio *bp; 19910b441832SPoul-Henning Kamp u_int *count; 1992e4057dbdSPoul-Henning Kamp 19930b441832SPoul-Henning Kamp bp = nbp->b_caller1; 19940b441832SPoul-Henning Kamp count = (u_int *)&(bp->bio_caller1); 19950b441832SPoul-Henning Kamp if (bp != NULL) { 1996e4057dbdSPoul-Henning Kamp if (nbp->b_ioflags & BIO_ERROR) { 19970b441832SPoul-Henning Kamp bp->bio_flags |= BIO_ERROR; 19980b441832SPoul-Henning Kamp bp->bio_error = nbp->b_error; 1999e4057dbdSPoul-Henning Kamp } else if (nbp->b_resid != 0) { 20000b441832SPoul-Henning Kamp bp->bio_flags |= BIO_ERROR; 20010b441832SPoul-Henning Kamp bp->bio_error = EINVAL; 2002e4057dbdSPoul-Henning Kamp } else { 20030b441832SPoul-Henning Kamp bp->bio_resid -= nbp->b_bcount; 2004e4057dbdSPoul-Henning Kamp } 20050b441832SPoul-Henning Kamp nbp->b_caller1 = NULL; 20060b441832SPoul-Henning Kamp --(*count); 20070b441832SPoul-Henning Kamp if (bp->bio_flags & BIO_FLAG1) { 20080b441832SPoul-Henning Kamp bp->bio_flags &= ~BIO_FLAG1; 2009e4057dbdSPoul-Henning Kamp wakeup(bp); 2010e4057dbdSPoul-Henning Kamp } 2011e4057dbdSPoul-Henning Kamp } 2012e4057dbdSPoul-Henning Kamp nbp->b_flags |= B_DONE; 2013e4057dbdSPoul-Henning Kamp nbp->b_flags &= ~B_ASYNC; 2014e4057dbdSPoul-Henning Kamp relpbuf(nbp, NULL); 2015e4057dbdSPoul-Henning Kamp } 2016e4057dbdSPoul-Henning Kamp 2017e4057dbdSPoul-Henning Kamp /* 2018e4057dbdSPoul-Henning Kamp * getchainbuf: 2019e4057dbdSPoul-Henning Kamp * 2020e4057dbdSPoul-Henning Kamp * Obtain a physical buffer and chain it to its parent buffer. When 2021e4057dbdSPoul-Henning Kamp * I/O completes, the parent buffer will be B_SIGNAL'd. Errors are 2022e4057dbdSPoul-Henning Kamp * automatically propagated to the parent 2023e4057dbdSPoul-Henning Kamp */ 2024e4057dbdSPoul-Henning Kamp 2025e4057dbdSPoul-Henning Kamp struct buf * 20260b441832SPoul-Henning Kamp getchainbuf(struct bio *bp, struct vnode *vp, int flags) 2027e4057dbdSPoul-Henning Kamp { 2028e4057dbdSPoul-Henning Kamp struct buf *nbp = getpbuf(NULL); 20290b441832SPoul-Henning Kamp u_int *count = (u_int *)&(bp->bio_caller1); 2030e4057dbdSPoul-Henning Kamp 20310b441832SPoul-Henning Kamp nbp->b_caller1 = bp; 20320b441832SPoul-Henning Kamp ++(*count); 2033e4057dbdSPoul-Henning Kamp 20340b441832SPoul-Henning Kamp if (*count > 4) 2035e4057dbdSPoul-Henning Kamp waitchainbuf(bp, 4, 0); 2036e4057dbdSPoul-Henning Kamp 20370b441832SPoul-Henning Kamp nbp->b_iocmd = bp->bio_cmd; 20380b441832SPoul-Henning Kamp nbp->b_ioflags = bp->bio_flags & BIO_ORDERED; 2039e4057dbdSPoul-Henning Kamp nbp->b_flags = flags; 2040e4057dbdSPoul-Henning Kamp nbp->b_rcred = nbp->b_wcred = proc0.p_ucred; 2041e4057dbdSPoul-Henning Kamp nbp->b_iodone = vm_pager_chain_iodone; 2042e4057dbdSPoul-Henning Kamp 2043e4057dbdSPoul-Henning Kamp crhold(nbp->b_rcred); 2044e4057dbdSPoul-Henning Kamp crhold(nbp->b_wcred); 2045e4057dbdSPoul-Henning Kamp 2046e4057dbdSPoul-Henning Kamp if (vp) 2047e4057dbdSPoul-Henning Kamp pbgetvp(vp, nbp); 2048e4057dbdSPoul-Henning Kamp return(nbp); 2049e4057dbdSPoul-Henning Kamp } 2050e4057dbdSPoul-Henning Kamp 2051e4057dbdSPoul-Henning Kamp void 2052e4057dbdSPoul-Henning Kamp flushchainbuf(struct buf *nbp) 2053e4057dbdSPoul-Henning Kamp { 2054e4057dbdSPoul-Henning Kamp if (nbp->b_bcount) { 2055e4057dbdSPoul-Henning Kamp nbp->b_bufsize = nbp->b_bcount; 2056e4057dbdSPoul-Henning Kamp if (nbp->b_iocmd == BIO_WRITE) 2057e4057dbdSPoul-Henning Kamp nbp->b_dirtyend = nbp->b_bcount; 2058e4057dbdSPoul-Henning Kamp BUF_KERNPROC(nbp); 2059e4057dbdSPoul-Henning Kamp BUF_STRATEGY(nbp); 2060e4057dbdSPoul-Henning Kamp } else { 2061e4057dbdSPoul-Henning Kamp bufdone(nbp); 2062e4057dbdSPoul-Henning Kamp } 2063e4057dbdSPoul-Henning Kamp } 2064e4057dbdSPoul-Henning Kamp 2065e4057dbdSPoul-Henning Kamp void 20660b441832SPoul-Henning Kamp waitchainbuf(struct bio *bp, int limit, int done) 2067e4057dbdSPoul-Henning Kamp { 2068e4057dbdSPoul-Henning Kamp int s; 20690b441832SPoul-Henning Kamp u_int *count = (u_int *)&(bp->bio_caller1); 2070e4057dbdSPoul-Henning Kamp 2071e4057dbdSPoul-Henning Kamp s = splbio(); 20720b441832SPoul-Henning Kamp while (*count > limit) { 20730b441832SPoul-Henning Kamp bp->bio_flags |= BIO_FLAG1; 2074e4057dbdSPoul-Henning Kamp tsleep(bp, PRIBIO + 4, "bpchain", 0); 2075e4057dbdSPoul-Henning Kamp } 2076e4057dbdSPoul-Henning Kamp if (done) { 20770b441832SPoul-Henning Kamp if (bp->bio_resid != 0 && !(bp->bio_flags & BIO_ERROR)) { 20780b441832SPoul-Henning Kamp bp->bio_flags |= BIO_ERROR; 20790b441832SPoul-Henning Kamp bp->bio_error = EINVAL; 2080e4057dbdSPoul-Henning Kamp } 20810b441832SPoul-Henning Kamp biodone(bp); 2082e4057dbdSPoul-Henning Kamp } 2083e4057dbdSPoul-Henning Kamp splx(s); 2084e4057dbdSPoul-Henning Kamp } 2085e4057dbdSPoul-Henning Kamp 2086