1df8bae1dSRodney W. Grimes /* 21c7c3c6aSMatthew Dillon * Copyright (c) 1998 Matthew Dillon, 326f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 4df8bae1dSRodney W. Grimes * Copyright (c) 1990 University of Utah. 5df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 6df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 7df8bae1dSRodney W. Grimes * 8df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 9df8bae1dSRodney W. Grimes * the Systems Programming Group of the University of Utah Computer 10df8bae1dSRodney W. Grimes * Science Department. 11df8bae1dSRodney W. Grimes * 12df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 13df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 14df8bae1dSRodney W. Grimes * are met: 15df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 17df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 18df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 19df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 20df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 21df8bae1dSRodney W. Grimes * must display the following acknowledgement: 22df8bae1dSRodney W. Grimes * This product includes software developed by the University of 23df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 24df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 25df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 26df8bae1dSRodney W. Grimes * without specific prior written permission. 27df8bae1dSRodney W. Grimes * 28df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38df8bae1dSRodney W. Grimes * SUCH DAMAGE. 39df8bae1dSRodney W. Grimes * 401c7c3c6aSMatthew Dillon * New Swap System 411c7c3c6aSMatthew Dillon * Matthew Dillon 421c7c3c6aSMatthew Dillon * 431c7c3c6aSMatthew Dillon * Radix Bitmap 'blists'. 441c7c3c6aSMatthew Dillon * 451c7c3c6aSMatthew Dillon * - The new swapper uses the new radix bitmap code. This should scale 461c7c3c6aSMatthew Dillon * to arbitrarily small or arbitrarily large swap spaces and an almost 471c7c3c6aSMatthew Dillon * arbitrary degree of fragmentation. 481c7c3c6aSMatthew Dillon * 491c7c3c6aSMatthew Dillon * Features: 501c7c3c6aSMatthew Dillon * 511c7c3c6aSMatthew Dillon * - on the fly reallocation of swap during putpages. The new system 521c7c3c6aSMatthew Dillon * does not try to keep previously allocated swap blocks for dirty 531c7c3c6aSMatthew Dillon * pages. 541c7c3c6aSMatthew Dillon * 551c7c3c6aSMatthew Dillon * - on the fly deallocation of swap 561c7c3c6aSMatthew Dillon * 571c7c3c6aSMatthew Dillon * - No more garbage collection required. Unnecessarily allocated swap 581c7c3c6aSMatthew Dillon * blocks only exist for dirty vm_page_t's now and these are already 591c7c3c6aSMatthew Dillon * cycled (in a high-load system) by the pager. We also do on-the-fly 601c7c3c6aSMatthew Dillon * removal of invalidated swap blocks when a page is destroyed 611c7c3c6aSMatthew Dillon * or renamed. 621c7c3c6aSMatthew Dillon * 63df8bae1dSRodney W. Grimes * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 64df8bae1dSRodney W. Grimes * 65df8bae1dSRodney W. Grimes * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 661c7c3c6aSMatthew Dillon * 67af647ddeSBruce Evans * $Id: swap_pager.c,v 1.123 1999/08/17 05:56:00 alc Exp $ 68df8bae1dSRodney W. Grimes */ 69df8bae1dSRodney W. Grimes 70df8bae1dSRodney W. Grimes #include <sys/param.h> 71df8bae1dSRodney W. Grimes #include <sys/systm.h> 72af647ddeSBruce Evans #include <sys/conf.h> 7364abb5a5SDavid Greenman #include <sys/kernel.h> 74df8bae1dSRodney W. Grimes #include <sys/proc.h> 75df8bae1dSRodney W. Grimes #include <sys/buf.h> 76df8bae1dSRodney W. Grimes #include <sys/vnode.h> 77df8bae1dSRodney W. Grimes #include <sys/malloc.h> 78efeaf95aSDavid Greenman #include <sys/vmmeter.h> 79327f4e83SMatthew Dillon #include <sys/sysctl.h> 801c7c3c6aSMatthew Dillon #include <sys/blist.h> 811c7c3c6aSMatthew Dillon #include <sys/lock.h> 82df8bae1dSRodney W. Grimes 83e47ed70bSJohn Dyson #ifndef MAX_PAGEOUT_CLUSTER 84ffc82b0aSJohn Dyson #define MAX_PAGEOUT_CLUSTER 16 85e47ed70bSJohn Dyson #endif 86e47ed70bSJohn Dyson 87e47ed70bSJohn Dyson #define SWB_NPAGES MAX_PAGEOUT_CLUSTER 88e47ed70bSJohn Dyson 891c7c3c6aSMatthew Dillon #include "opt_swap.h" 90df8bae1dSRodney W. Grimes #include <vm/vm.h> 91efeaf95aSDavid Greenman #include <vm/vm_prot.h> 92efeaf95aSDavid Greenman #include <vm/vm_object.h> 93df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 94efeaf95aSDavid Greenman #include <vm/vm_pager.h> 95df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 96df8bae1dSRodney W. Grimes #include <vm/swap_pager.h> 97efeaf95aSDavid Greenman #include <vm/vm_extern.h> 981c7c3c6aSMatthew Dillon #include <vm/vm_zone.h> 99df8bae1dSRodney W. Grimes 1001c7c3c6aSMatthew Dillon #define SWM_FREE 0x02 /* free, period */ 1011c7c3c6aSMatthew Dillon #define SWM_POP 0x04 /* pop out */ 10226f9a767SRodney W. Grimes 10324a1cce3SDavid Greenman /* 1041c7c3c6aSMatthew Dillon * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks 1051c7c3c6aSMatthew Dillon * in the old system. 10624a1cce3SDavid Greenman */ 1071c7c3c6aSMatthew Dillon 1081c7c3c6aSMatthew Dillon extern int vm_swap_size; /* number of free swap blocks, in pages */ 1091c7c3c6aSMatthew Dillon 11020d3034fSMatthew Dillon int swap_pager_full; /* swap space exhaustion (task killing) */ 11120d3034fSMatthew Dillon static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/ 1121c7c3c6aSMatthew Dillon static int nsw_rcount; /* free read buffers */ 113327f4e83SMatthew Dillon static int nsw_wcount_sync; /* limit write buffers / synchronous */ 114327f4e83SMatthew Dillon static int nsw_wcount_async; /* limit write buffers / asynchronous */ 115327f4e83SMatthew Dillon static int nsw_wcount_async_max;/* assigned maximum */ 116327f4e83SMatthew Dillon static int nsw_cluster_max; /* maximum VOP I/O allowed */ 1171c7c3c6aSMatthew Dillon static int sw_alloc_interlock; /* swap pager allocation interlock */ 1181c7c3c6aSMatthew Dillon 1191c7c3c6aSMatthew Dillon struct blist *swapblist; 1201c7c3c6aSMatthew Dillon static struct swblock **swhash; 1211c7c3c6aSMatthew Dillon static int swhash_mask; 122327f4e83SMatthew Dillon static int swap_async_max = 4; /* maximum in-progress async I/O's */ 123327f4e83SMatthew Dillon 124327f4e83SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 125327f4e83SMatthew Dillon CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 1261c7c3c6aSMatthew Dillon 1271c7c3c6aSMatthew Dillon /* 1281c7c3c6aSMatthew Dillon * "named" and "unnamed" anon region objects. Try to reduce the overhead 1291c7c3c6aSMatthew Dillon * of searching a named list by hashing it just a little. 1301c7c3c6aSMatthew Dillon */ 1311c7c3c6aSMatthew Dillon 1321c7c3c6aSMatthew Dillon #define NOBJLISTS 8 1331c7c3c6aSMatthew Dillon 1341c7c3c6aSMatthew Dillon #define NOBJLIST(handle) \ 135af647ddeSBruce Evans (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)]) 1361c7c3c6aSMatthew Dillon 1371c7c3c6aSMatthew Dillon static struct pagerlst swap_pager_object_list[NOBJLISTS]; 1381c7c3c6aSMatthew Dillon struct pagerlst swap_pager_un_object_list; 1391c7c3c6aSMatthew Dillon vm_zone_t swap_zone; 1401c7c3c6aSMatthew Dillon 1411c7c3c6aSMatthew Dillon /* 1421c7c3c6aSMatthew Dillon * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 1431c7c3c6aSMatthew Dillon * calls hooked from other parts of the VM system and do not appear here. 1441c7c3c6aSMatthew Dillon * (see vm/swap_pager.h). 1451c7c3c6aSMatthew Dillon */ 1461c7c3c6aSMatthew Dillon 147ff98689dSBruce Evans static vm_object_t 1486cde7a16SDavid Greenman swap_pager_alloc __P((void *handle, vm_ooffset_t size, 149a316d390SJohn Dyson vm_prot_t prot, vm_ooffset_t offset)); 150ff98689dSBruce Evans static void swap_pager_dealloc __P((vm_object_t object)); 151f708ef1bSPoul-Henning Kamp static int swap_pager_getpages __P((vm_object_t, vm_page_t *, int, int)); 152ff98689dSBruce Evans static void swap_pager_init __P((void)); 1531c7c3c6aSMatthew Dillon static void swap_pager_unswapped __P((vm_page_t)); 154a5296b05SJulian Elischer static void swap_pager_strategy __P((vm_object_t, struct buf *)); 155f708ef1bSPoul-Henning Kamp 156df8bae1dSRodney W. Grimes struct pagerops swappagerops = { 1571c7c3c6aSMatthew Dillon swap_pager_init, /* early system initialization of pager */ 1581c7c3c6aSMatthew Dillon swap_pager_alloc, /* allocate an OBJT_SWAP object */ 1591c7c3c6aSMatthew Dillon swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 1601c7c3c6aSMatthew Dillon swap_pager_getpages, /* pagein */ 1611c7c3c6aSMatthew Dillon swap_pager_putpages, /* pageout */ 1621c7c3c6aSMatthew Dillon swap_pager_haspage, /* get backing store status for page */ 163a5296b05SJulian Elischer swap_pager_unswapped, /* remove swap related to page */ 164a5296b05SJulian Elischer swap_pager_strategy /* pager strategy call */ 165df8bae1dSRodney W. Grimes }; 166df8bae1dSRodney W. Grimes 1671c7c3c6aSMatthew Dillon /* 1681c7c3c6aSMatthew Dillon * dmmax is in page-sized chunks with the new swap system. It was 1691c7c3c6aSMatthew Dillon * dev-bsized chunks in the old. 1701c7c3c6aSMatthew Dillon * 1711c7c3c6aSMatthew Dillon * swap_*() routines are externally accessible. swp_*() routines are 1721c7c3c6aSMatthew Dillon * internal. 1731c7c3c6aSMatthew Dillon */ 1741c7c3c6aSMatthew Dillon 175f708ef1bSPoul-Henning Kamp int dmmax; 1761c7c3c6aSMatthew Dillon static int dmmax_mask; 17720d3034fSMatthew Dillon int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 17820d3034fSMatthew Dillon int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 17926f9a767SRodney W. Grimes 1801c7c3c6aSMatthew Dillon static __inline void swp_sizecheck __P((void)); 1811c7c3c6aSMatthew Dillon static void swp_pager_sync_iodone __P((struct buf *bp)); 1821c7c3c6aSMatthew Dillon static void swp_pager_async_iodone __P((struct buf *bp)); 18324a1cce3SDavid Greenman 1841c7c3c6aSMatthew Dillon /* 1851c7c3c6aSMatthew Dillon * Swap bitmap functions 1861c7c3c6aSMatthew Dillon */ 1871c7c3c6aSMatthew Dillon 1881c7c3c6aSMatthew Dillon static __inline void swp_pager_freeswapspace __P((daddr_t blk, int npages)); 1891c7c3c6aSMatthew Dillon static __inline daddr_t swp_pager_getswapspace __P((int npages)); 1901c7c3c6aSMatthew Dillon 1911c7c3c6aSMatthew Dillon /* 1921c7c3c6aSMatthew Dillon * Metadata functions 1931c7c3c6aSMatthew Dillon */ 1941c7c3c6aSMatthew Dillon 1951c7c3c6aSMatthew Dillon static void swp_pager_meta_build __P((vm_object_t, daddr_t, daddr_t, int)); 1961c7c3c6aSMatthew Dillon static void swp_pager_meta_free __P((vm_object_t, daddr_t, daddr_t)); 1971c7c3c6aSMatthew Dillon static void swp_pager_meta_free_all __P((vm_object_t)); 1981c7c3c6aSMatthew Dillon static daddr_t swp_pager_meta_ctl __P((vm_object_t, vm_pindex_t, int)); 1991c7c3c6aSMatthew Dillon 2001c7c3c6aSMatthew Dillon /* 2011c7c3c6aSMatthew Dillon * SWP_SIZECHECK() - update swap_pager_full indication 2021c7c3c6aSMatthew Dillon * 20320d3034fSMatthew Dillon * update the swap_pager_almost_full indication and warn when we are 20420d3034fSMatthew Dillon * about to run out of swap space, using lowat/hiwat hysteresis. 20520d3034fSMatthew Dillon * 20620d3034fSMatthew Dillon * Clear swap_pager_full ( task killing ) indication when lowat is met. 2071c7c3c6aSMatthew Dillon * 2081c7c3c6aSMatthew Dillon * No restrictions on call 2091c7c3c6aSMatthew Dillon * This routine may not block. 2101c7c3c6aSMatthew Dillon * This routine must be called at splvm() 2111c7c3c6aSMatthew Dillon */ 212de5f6a77SJohn Dyson 213c1087c13SBruce Evans static __inline void 2141c7c3c6aSMatthew Dillon swp_sizecheck() 2150d94caffSDavid Greenman { 2161c7c3c6aSMatthew Dillon if (vm_swap_size < nswap_lowat) { 21720d3034fSMatthew Dillon if (swap_pager_almost_full == 0) { 2181af87c92SDavid Greenman printf("swap_pager: out of swap space\n"); 21920d3034fSMatthew Dillon swap_pager_almost_full = 1; 2202b0d37a4SMatthew Dillon } 22120d3034fSMatthew Dillon } else { 22226f9a767SRodney W. Grimes swap_pager_full = 0; 22320d3034fSMatthew Dillon if (vm_swap_size > nswap_hiwat) 22420d3034fSMatthew Dillon swap_pager_almost_full = 0; 22526f9a767SRodney W. Grimes } 2261c7c3c6aSMatthew Dillon } 2271c7c3c6aSMatthew Dillon 2281c7c3c6aSMatthew Dillon /* 2291c7c3c6aSMatthew Dillon * SWAP_PAGER_INIT() - initialize the swap pager! 2301c7c3c6aSMatthew Dillon * 2311c7c3c6aSMatthew Dillon * Expected to be started from system init. NOTE: This code is run 2321c7c3c6aSMatthew Dillon * before much else so be careful what you depend on. Most of the VM 2331c7c3c6aSMatthew Dillon * system has yet to be initialized at this point. 2341c7c3c6aSMatthew Dillon */ 23526f9a767SRodney W. Grimes 236f5a12711SPoul-Henning Kamp static void 237df8bae1dSRodney W. Grimes swap_pager_init() 238df8bae1dSRodney W. Grimes { 2391c7c3c6aSMatthew Dillon /* 2401c7c3c6aSMatthew Dillon * Initialize object lists 2411c7c3c6aSMatthew Dillon */ 2421c7c3c6aSMatthew Dillon int i; 2431c7c3c6aSMatthew Dillon 2441c7c3c6aSMatthew Dillon for (i = 0; i < NOBJLISTS; ++i) 2451c7c3c6aSMatthew Dillon TAILQ_INIT(&swap_pager_object_list[i]); 24624a1cce3SDavid Greenman TAILQ_INIT(&swap_pager_un_object_list); 247df8bae1dSRodney W. Grimes 248df8bae1dSRodney W. Grimes /* 2491c7c3c6aSMatthew Dillon * Device Stripe, in PAGE_SIZE'd blocks 250df8bae1dSRodney W. Grimes */ 2511c7c3c6aSMatthew Dillon 2521c7c3c6aSMatthew Dillon dmmax = SWB_NPAGES * 2; 2531c7c3c6aSMatthew Dillon dmmax_mask = ~(dmmax - 1); 2541c7c3c6aSMatthew Dillon } 25526f9a767SRodney W. Grimes 256df8bae1dSRodney W. Grimes /* 2571c7c3c6aSMatthew Dillon * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 2581c7c3c6aSMatthew Dillon * 2591c7c3c6aSMatthew Dillon * Expected to be started from pageout process once, prior to entering 2601c7c3c6aSMatthew Dillon * its main loop. 261df8bae1dSRodney W. Grimes */ 262df8bae1dSRodney W. Grimes 26324a1cce3SDavid Greenman void 26424a1cce3SDavid Greenman swap_pager_swap_init() 265df8bae1dSRodney W. Grimes { 2661c7c3c6aSMatthew Dillon int n; 2670d94caffSDavid Greenman 26826f9a767SRodney W. Grimes /* 2691c7c3c6aSMatthew Dillon * Number of in-transit swap bp operations. Don't 2701c7c3c6aSMatthew Dillon * exhaust the pbufs completely. Make sure we 2711c7c3c6aSMatthew Dillon * initialize workable values (0 will work for hysteresis 2721c7c3c6aSMatthew Dillon * but it isn't very efficient). 2731c7c3c6aSMatthew Dillon * 274327f4e83SMatthew Dillon * The nsw_cluster_max is constrained by the bp->b_pages[] 2751c7c3c6aSMatthew Dillon * array (MAXPHYS/PAGE_SIZE) and our locally defined 2761c7c3c6aSMatthew Dillon * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 2771c7c3c6aSMatthew Dillon * constrained by the swap device interleave stripe size. 278327f4e83SMatthew Dillon * 279327f4e83SMatthew Dillon * Currently we hardwire nsw_wcount_async to 4. This limit is 280327f4e83SMatthew Dillon * designed to prevent other I/O from having high latencies due to 281327f4e83SMatthew Dillon * our pageout I/O. The value 4 works well for one or two active swap 282327f4e83SMatthew Dillon * devices but is probably a little low if you have more. Even so, 283327f4e83SMatthew Dillon * a higher value would probably generate only a limited improvement 284327f4e83SMatthew Dillon * with three or four active swap devices since the system does not 285327f4e83SMatthew Dillon * typically have to pageout at extreme bandwidths. We will want 286327f4e83SMatthew Dillon * at least 2 per swap devices, and 4 is a pretty good value if you 287327f4e83SMatthew Dillon * have one NFS swap device due to the command/ack latency over NFS. 288327f4e83SMatthew Dillon * So it all works out pretty well. 28926f9a767SRodney W. Grimes */ 29024a1cce3SDavid Greenman 291ad3cce20SMatthew Dillon nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 292327f4e83SMatthew Dillon 2931c7c3c6aSMatthew Dillon nsw_rcount = (nswbuf + 1) / 2; 294327f4e83SMatthew Dillon nsw_wcount_sync = (nswbuf + 3) / 4; 295327f4e83SMatthew Dillon nsw_wcount_async = 4; 296327f4e83SMatthew Dillon nsw_wcount_async_max = nsw_wcount_async; 29724a1cce3SDavid Greenman 2981c7c3c6aSMatthew Dillon /* 2991c7c3c6aSMatthew Dillon * Initialize our zone. Right now I'm just guessing on the number 3001c7c3c6aSMatthew Dillon * we need based on the number of pages in the system. Each swblock 3011c7c3c6aSMatthew Dillon * can hold 16 pages, so this is probably overkill. 3021c7c3c6aSMatthew Dillon */ 30324a1cce3SDavid Greenman 3041c7c3c6aSMatthew Dillon n = cnt.v_page_count * 2; 30526f9a767SRodney W. Grimes 3061c7c3c6aSMatthew Dillon swap_zone = zinit( 3071c7c3c6aSMatthew Dillon "SWAPMETA", 3081c7c3c6aSMatthew Dillon sizeof(struct swblock), 3091c7c3c6aSMatthew Dillon n, 3101c7c3c6aSMatthew Dillon ZONE_INTERRUPT, 3111c7c3c6aSMatthew Dillon 1 3121c7c3c6aSMatthew Dillon ); 31324a1cce3SDavid Greenman 3141c7c3c6aSMatthew Dillon /* 3151c7c3c6aSMatthew Dillon * Initialize our meta-data hash table. The swapper does not need to 3161c7c3c6aSMatthew Dillon * be quite as efficient as the VM system, so we do not use an 3171c7c3c6aSMatthew Dillon * oversized hash table. 3181c7c3c6aSMatthew Dillon * 3191c7c3c6aSMatthew Dillon * n: size of hash table, must be power of 2 3201c7c3c6aSMatthew Dillon * swhash_mask: hash table index mask 3211c7c3c6aSMatthew Dillon */ 322df8bae1dSRodney W. Grimes 3231c7c3c6aSMatthew Dillon for (n = 1; n < cnt.v_page_count / 4; n <<= 1) 3241c7c3c6aSMatthew Dillon ; 3251c7c3c6aSMatthew Dillon 3261c7c3c6aSMatthew Dillon swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK); 3271c7c3c6aSMatthew Dillon bzero(swhash, sizeof(struct swblock *) * n); 3281c7c3c6aSMatthew Dillon 3291c7c3c6aSMatthew Dillon swhash_mask = n - 1; 33024a1cce3SDavid Greenman } 33124a1cce3SDavid Greenman 33224a1cce3SDavid Greenman /* 3331c7c3c6aSMatthew Dillon * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 3341c7c3c6aSMatthew Dillon * its metadata structures. 3351c7c3c6aSMatthew Dillon * 3361c7c3c6aSMatthew Dillon * This routine is called from the mmap and fork code to create a new 3371c7c3c6aSMatthew Dillon * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 3381c7c3c6aSMatthew Dillon * and then converting it with swp_pager_meta_build(). 3391c7c3c6aSMatthew Dillon * 3401c7c3c6aSMatthew Dillon * This routine may block in vm_object_allocate() and create a named 3411c7c3c6aSMatthew Dillon * object lookup race, so we must interlock. We must also run at 3421c7c3c6aSMatthew Dillon * splvm() for the object lookup to handle races with interrupts, but 3431c7c3c6aSMatthew Dillon * we do not have to maintain splvm() in between the lookup and the 3441c7c3c6aSMatthew Dillon * add because (I believe) it is not possible to attempt to create 3451c7c3c6aSMatthew Dillon * a new swap object w/handle when a default object with that handle 3461c7c3c6aSMatthew Dillon * already exists. 34724a1cce3SDavid Greenman */ 3481c7c3c6aSMatthew Dillon 349f5a12711SPoul-Henning Kamp static vm_object_t 3506cde7a16SDavid Greenman swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 351b9dcd593SBruce Evans vm_ooffset_t offset) 35224a1cce3SDavid Greenman { 35324a1cce3SDavid Greenman vm_object_t object; 35424a1cce3SDavid Greenman 35524a1cce3SDavid Greenman if (handle) { 3561c7c3c6aSMatthew Dillon /* 3571c7c3c6aSMatthew Dillon * Reference existing named region or allocate new one. There 3581c7c3c6aSMatthew Dillon * should not be a race here against swp_pager_meta_build() 3591c7c3c6aSMatthew Dillon * as called from vm_page_remove() in regards to the lookup 3601c7c3c6aSMatthew Dillon * of the handle. 3611c7c3c6aSMatthew Dillon */ 3621c7c3c6aSMatthew Dillon 3631c7c3c6aSMatthew Dillon while (sw_alloc_interlock) { 3641c7c3c6aSMatthew Dillon sw_alloc_interlock = -1; 3651c7c3c6aSMatthew Dillon tsleep(&sw_alloc_interlock, PVM, "swpalc", 0); 3661c7c3c6aSMatthew Dillon } 3671c7c3c6aSMatthew Dillon sw_alloc_interlock = 1; 3681c7c3c6aSMatthew Dillon 3691c7c3c6aSMatthew Dillon object = vm_pager_object_lookup(NOBJLIST(handle), handle); 3701c7c3c6aSMatthew Dillon 37124a1cce3SDavid Greenman if (object != NULL) { 37224a1cce3SDavid Greenman vm_object_reference(object); 37324a1cce3SDavid Greenman } else { 3741c7c3c6aSMatthew Dillon object = vm_object_allocate(OBJT_DEFAULT, 3756cde7a16SDavid Greenman OFF_TO_IDX(offset + PAGE_MASK + size)); 37624a1cce3SDavid Greenman object->handle = handle; 3771c7c3c6aSMatthew Dillon 3781c7c3c6aSMatthew Dillon swp_pager_meta_build( 3791c7c3c6aSMatthew Dillon object, 3801c7c3c6aSMatthew Dillon 0, 3811c7c3c6aSMatthew Dillon SWAPBLK_NONE, 3821c7c3c6aSMatthew Dillon 0 3831c7c3c6aSMatthew Dillon ); 38424a1cce3SDavid Greenman } 3851c7c3c6aSMatthew Dillon 3861c7c3c6aSMatthew Dillon if (sw_alloc_interlock < 0) 3871c7c3c6aSMatthew Dillon wakeup(&sw_alloc_interlock); 3881c7c3c6aSMatthew Dillon 3891c7c3c6aSMatthew Dillon sw_alloc_interlock = 0; 39024a1cce3SDavid Greenman } else { 3911c7c3c6aSMatthew Dillon object = vm_object_allocate(OBJT_DEFAULT, 3926cde7a16SDavid Greenman OFF_TO_IDX(offset + PAGE_MASK + size)); 3931c7c3c6aSMatthew Dillon 3941c7c3c6aSMatthew Dillon swp_pager_meta_build( 3951c7c3c6aSMatthew Dillon object, 3961c7c3c6aSMatthew Dillon 0, 3971c7c3c6aSMatthew Dillon SWAPBLK_NONE, 3981c7c3c6aSMatthew Dillon 0 3991c7c3c6aSMatthew Dillon ); 40024a1cce3SDavid Greenman } 40124a1cce3SDavid Greenman 40224a1cce3SDavid Greenman return (object); 403df8bae1dSRodney W. Grimes } 404df8bae1dSRodney W. Grimes 40526f9a767SRodney W. Grimes /* 4061c7c3c6aSMatthew Dillon * SWAP_PAGER_DEALLOC() - remove swap metadata from object 4071c7c3c6aSMatthew Dillon * 4081c7c3c6aSMatthew Dillon * The swap backing for the object is destroyed. The code is 4091c7c3c6aSMatthew Dillon * designed such that we can reinstantiate it later, but this 4101c7c3c6aSMatthew Dillon * routine is typically called only when the entire object is 4111c7c3c6aSMatthew Dillon * about to be destroyed. 4121c7c3c6aSMatthew Dillon * 4131c7c3c6aSMatthew Dillon * This routine may block, but no longer does. 4141c7c3c6aSMatthew Dillon * 4151c7c3c6aSMatthew Dillon * The object must be locked or unreferenceable. 41626f9a767SRodney W. Grimes */ 41726f9a767SRodney W. Grimes 418df8bae1dSRodney W. Grimes static void 4191c7c3c6aSMatthew Dillon swap_pager_dealloc(object) 4202a4895f4SDavid Greenman vm_object_t object; 42126f9a767SRodney W. Grimes { 42226f9a767SRodney W. Grimes /* 4231c7c3c6aSMatthew Dillon * Remove from list right away so lookups will fail if we block for 4241c7c3c6aSMatthew Dillon * pageout completion. 42526f9a767SRodney W. Grimes */ 426b44e4b7aSJohn Dyson 4271c7c3c6aSMatthew Dillon if (object->handle == NULL) { 4281c7c3c6aSMatthew Dillon TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list); 42924ea4a96SDavid Greenman } else { 4301c7c3c6aSMatthew Dillon TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list); 43126f9a767SRodney W. Grimes } 4321c7c3c6aSMatthew Dillon 4331c7c3c6aSMatthew Dillon vm_object_pip_wait(object, "swpdea"); 4341c7c3c6aSMatthew Dillon 4351c7c3c6aSMatthew Dillon /* 4361c7c3c6aSMatthew Dillon * Free all remaining metadata. We only bother to free it from 4371c7c3c6aSMatthew Dillon * the swap meta data. We do not attempt to free swapblk's still 4381c7c3c6aSMatthew Dillon * associated with vm_page_t's for this object. We do not care 4391c7c3c6aSMatthew Dillon * if paging is still in progress on some objects. 4401c7c3c6aSMatthew Dillon */ 4411c7c3c6aSMatthew Dillon 4421c7c3c6aSMatthew Dillon swp_pager_meta_free_all(object); 4431c7c3c6aSMatthew Dillon } 4441c7c3c6aSMatthew Dillon 4451c7c3c6aSMatthew Dillon /************************************************************************ 4461c7c3c6aSMatthew Dillon * SWAP PAGER BITMAP ROUTINES * 4471c7c3c6aSMatthew Dillon ************************************************************************/ 4481c7c3c6aSMatthew Dillon 4491c7c3c6aSMatthew Dillon /* 4501c7c3c6aSMatthew Dillon * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 4511c7c3c6aSMatthew Dillon * 4521c7c3c6aSMatthew Dillon * Allocate swap for the requested number of pages. The starting 4531c7c3c6aSMatthew Dillon * swap block number (a page index) is returned or SWAPBLK_NONE 4541c7c3c6aSMatthew Dillon * if the allocation failed. 4551c7c3c6aSMatthew Dillon * 4561c7c3c6aSMatthew Dillon * Also has the side effect of advising that somebody made a mistake 4571c7c3c6aSMatthew Dillon * when they configured swap and didn't configure enough. 4581c7c3c6aSMatthew Dillon * 4591c7c3c6aSMatthew Dillon * Must be called at splvm() to avoid races with bitmap frees from 4601c7c3c6aSMatthew Dillon * vm_page_remove() aka swap_pager_page_removed(). 4611c7c3c6aSMatthew Dillon * 4621c7c3c6aSMatthew Dillon * This routine may not block 4631c7c3c6aSMatthew Dillon * This routine must be called at splvm(). 4641c7c3c6aSMatthew Dillon */ 4651c7c3c6aSMatthew Dillon 4661c7c3c6aSMatthew Dillon static __inline daddr_t 4671c7c3c6aSMatthew Dillon swp_pager_getswapspace(npages) 4681c7c3c6aSMatthew Dillon int npages; 4691c7c3c6aSMatthew Dillon { 4701c7c3c6aSMatthew Dillon daddr_t blk; 4711c7c3c6aSMatthew Dillon 4721c7c3c6aSMatthew Dillon if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) { 4732b0d37a4SMatthew Dillon if (swap_pager_full != 2) { 4741c7c3c6aSMatthew Dillon printf("swap_pager_getswapspace: failed\n"); 4752b0d37a4SMatthew Dillon swap_pager_full = 2; 47620d3034fSMatthew Dillon swap_pager_almost_full = 1; 4772b0d37a4SMatthew Dillon } 4781c7c3c6aSMatthew Dillon } else { 4791c7c3c6aSMatthew Dillon vm_swap_size -= npages; 4801c7c3c6aSMatthew Dillon swp_sizecheck(); 4811c7c3c6aSMatthew Dillon } 4821c7c3c6aSMatthew Dillon return(blk); 48326f9a767SRodney W. Grimes } 48426f9a767SRodney W. Grimes 48526f9a767SRodney W. Grimes /* 4861c7c3c6aSMatthew Dillon * SWP_PAGER_FREESWAPSPACE() - free raw swap space 4871c7c3c6aSMatthew Dillon * 4881c7c3c6aSMatthew Dillon * This routine returns the specified swap blocks back to the bitmap. 4891c7c3c6aSMatthew Dillon * 4901c7c3c6aSMatthew Dillon * Note: This routine may not block (it could in the old swap code), 4911c7c3c6aSMatthew Dillon * and through the use of the new blist routines it does not block. 4921c7c3c6aSMatthew Dillon * 4931c7c3c6aSMatthew Dillon * We must be called at splvm() to avoid races with bitmap frees from 4941c7c3c6aSMatthew Dillon * vm_page_remove() aka swap_pager_page_removed(). 4951c7c3c6aSMatthew Dillon * 4961c7c3c6aSMatthew Dillon * This routine may not block 4971c7c3c6aSMatthew Dillon * This routine must be called at splvm(). 49826f9a767SRodney W. Grimes */ 4991c7c3c6aSMatthew Dillon 5001c7c3c6aSMatthew Dillon static __inline void 5011c7c3c6aSMatthew Dillon swp_pager_freeswapspace(blk, npages) 5021c7c3c6aSMatthew Dillon daddr_t blk; 5031c7c3c6aSMatthew Dillon int npages; 5040d94caffSDavid Greenman { 5051c7c3c6aSMatthew Dillon blist_free(swapblist, blk, npages); 5061c7c3c6aSMatthew Dillon vm_swap_size += npages; 5071c7c3c6aSMatthew Dillon swp_sizecheck(); 50826f9a767SRodney W. Grimes } 5091c7c3c6aSMatthew Dillon 51026f9a767SRodney W. Grimes /* 5111c7c3c6aSMatthew Dillon * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 5121c7c3c6aSMatthew Dillon * range within an object. 5131c7c3c6aSMatthew Dillon * 5141c7c3c6aSMatthew Dillon * This is a globally accessible routine. 5151c7c3c6aSMatthew Dillon * 5161c7c3c6aSMatthew Dillon * This routine removes swapblk assignments from swap metadata. 5171c7c3c6aSMatthew Dillon * 5181c7c3c6aSMatthew Dillon * The external callers of this routine typically have already destroyed 5191c7c3c6aSMatthew Dillon * or renamed vm_page_t's associated with this range in the object so 5201c7c3c6aSMatthew Dillon * we should be ok. 52126f9a767SRodney W. Grimes */ 5221c7c3c6aSMatthew Dillon 52326f9a767SRodney W. Grimes void 52424a1cce3SDavid Greenman swap_pager_freespace(object, start, size) 52524a1cce3SDavid Greenman vm_object_t object; 526a316d390SJohn Dyson vm_pindex_t start; 527a316d390SJohn Dyson vm_size_t size; 52826f9a767SRodney W. Grimes { 5291c7c3c6aSMatthew Dillon swp_pager_meta_free(object, start, size); 53026f9a767SRodney W. Grimes } 53126f9a767SRodney W. Grimes 5320a47b48bSJohn Dyson /* 5331c7c3c6aSMatthew Dillon * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 5341c7c3c6aSMatthew Dillon * and destroy the source. 5351c7c3c6aSMatthew Dillon * 5361c7c3c6aSMatthew Dillon * Copy any valid swapblks from the source to the destination. In 5371c7c3c6aSMatthew Dillon * cases where both the source and destination have a valid swapblk, 5381c7c3c6aSMatthew Dillon * we keep the destination's. 5391c7c3c6aSMatthew Dillon * 5401c7c3c6aSMatthew Dillon * This routine is allowed to block. It may block allocating metadata 5411c7c3c6aSMatthew Dillon * indirectly through swp_pager_meta_build() or if paging is still in 5421c7c3c6aSMatthew Dillon * progress on the source. 5431c7c3c6aSMatthew Dillon * 5441c7c3c6aSMatthew Dillon * XXX vm_page_collapse() kinda expects us not to block because we 5451c7c3c6aSMatthew Dillon * supposedly do not need to allocate memory, but for the moment we 5461c7c3c6aSMatthew Dillon * *may* have to get a little memory from the zone allocator, but 5471c7c3c6aSMatthew Dillon * it is taken from the interrupt memory. We should be ok. 5481c7c3c6aSMatthew Dillon * 5491c7c3c6aSMatthew Dillon * The source object contains no vm_page_t's (which is just as well) 5501c7c3c6aSMatthew Dillon * 5511c7c3c6aSMatthew Dillon * The source object is of type OBJT_SWAP. 5521c7c3c6aSMatthew Dillon * 5531c7c3c6aSMatthew Dillon * The source and destination objects must be 5545e24f1a2SMatthew Dillon * locked or inaccessible (XXX are they ?) 55526f9a767SRodney W. Grimes */ 55626f9a767SRodney W. Grimes 55726f9a767SRodney W. Grimes void 5581c7c3c6aSMatthew Dillon swap_pager_copy(srcobject, dstobject, offset, destroysource) 55924a1cce3SDavid Greenman vm_object_t srcobject; 56024a1cce3SDavid Greenman vm_object_t dstobject; 561a316d390SJohn Dyson vm_pindex_t offset; 562c0877f10SJohn Dyson int destroysource; 56326f9a767SRodney W. Grimes { 564a316d390SJohn Dyson vm_pindex_t i; 56526f9a767SRodney W. Grimes 56626f9a767SRodney W. Grimes /* 5671c7c3c6aSMatthew Dillon * If destroysource is set, we remove the source object from the 5681c7c3c6aSMatthew Dillon * swap_pager internal queue now. 56926f9a767SRodney W. Grimes */ 5701c7c3c6aSMatthew Dillon 571cbd8ec09SJohn Dyson if (destroysource) { 57224a1cce3SDavid Greenman if (srcobject->handle == NULL) { 5731c7c3c6aSMatthew Dillon TAILQ_REMOVE( 5741c7c3c6aSMatthew Dillon &swap_pager_un_object_list, 5751c7c3c6aSMatthew Dillon srcobject, 5761c7c3c6aSMatthew Dillon pager_object_list 5771c7c3c6aSMatthew Dillon ); 57826f9a767SRodney W. Grimes } else { 5791c7c3c6aSMatthew Dillon TAILQ_REMOVE( 5801c7c3c6aSMatthew Dillon NOBJLIST(srcobject->handle), 5811c7c3c6aSMatthew Dillon srcobject, 5821c7c3c6aSMatthew Dillon pager_object_list 5831c7c3c6aSMatthew Dillon ); 58426f9a767SRodney W. Grimes } 585cbd8ec09SJohn Dyson } 58626f9a767SRodney W. Grimes 5871c7c3c6aSMatthew Dillon /* 5881c7c3c6aSMatthew Dillon * transfer source to destination. 5891c7c3c6aSMatthew Dillon */ 5901c7c3c6aSMatthew Dillon 5911c7c3c6aSMatthew Dillon for (i = 0; i < dstobject->size; ++i) { 5921c7c3c6aSMatthew Dillon daddr_t dstaddr; 5931c7c3c6aSMatthew Dillon 5941c7c3c6aSMatthew Dillon /* 5951c7c3c6aSMatthew Dillon * Locate (without changing) the swapblk on the destination, 5961c7c3c6aSMatthew Dillon * unless it is invalid in which case free it silently, or 5971c7c3c6aSMatthew Dillon * if the destination is a resident page, in which case the 5981c7c3c6aSMatthew Dillon * source is thrown away. 5991c7c3c6aSMatthew Dillon */ 6001c7c3c6aSMatthew Dillon 6011c7c3c6aSMatthew Dillon dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 6021c7c3c6aSMatthew Dillon 6031c7c3c6aSMatthew Dillon if (dstaddr == SWAPBLK_NONE) { 6041c7c3c6aSMatthew Dillon /* 6051c7c3c6aSMatthew Dillon * Destination has no swapblk and is not resident, 6061c7c3c6aSMatthew Dillon * copy source. 6071c7c3c6aSMatthew Dillon */ 6081c7c3c6aSMatthew Dillon daddr_t srcaddr; 6091c7c3c6aSMatthew Dillon 6101c7c3c6aSMatthew Dillon srcaddr = swp_pager_meta_ctl( 6111c7c3c6aSMatthew Dillon srcobject, 6121c7c3c6aSMatthew Dillon i + offset, 6131c7c3c6aSMatthew Dillon SWM_POP 6141c7c3c6aSMatthew Dillon ); 6151c7c3c6aSMatthew Dillon 6161c7c3c6aSMatthew Dillon if (srcaddr != SWAPBLK_NONE) 6171c7c3c6aSMatthew Dillon swp_pager_meta_build(dstobject, i, srcaddr, 1); 6181c7c3c6aSMatthew Dillon } else { 6191c7c3c6aSMatthew Dillon /* 6201c7c3c6aSMatthew Dillon * Destination has valid swapblk or it is represented 6211c7c3c6aSMatthew Dillon * by a resident page. We destroy the sourceblock. 6221c7c3c6aSMatthew Dillon */ 6231c7c3c6aSMatthew Dillon 6241c7c3c6aSMatthew Dillon swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE); 6251c7c3c6aSMatthew Dillon } 62626f9a767SRodney W. Grimes } 62726f9a767SRodney W. Grimes 62826f9a767SRodney W. Grimes /* 6291c7c3c6aSMatthew Dillon * Free left over swap blocks in source. 6301c7c3c6aSMatthew Dillon * 6311c7c3c6aSMatthew Dillon * We have to revert the type to OBJT_DEFAULT so we do not accidently 6321c7c3c6aSMatthew Dillon * double-remove the object from the swap queues. 63326f9a767SRodney W. Grimes */ 63426f9a767SRodney W. Grimes 635c0877f10SJohn Dyson if (destroysource) { 6361c7c3c6aSMatthew Dillon swp_pager_meta_free_all(srcobject); 6371c7c3c6aSMatthew Dillon /* 6381c7c3c6aSMatthew Dillon * Reverting the type is not necessary, the caller is going 6391c7c3c6aSMatthew Dillon * to destroy srcobject directly, but I'm doing it here 6401c7c3c6aSMatthew Dillon * for consistancy since we've removed the object from its 6411c7c3c6aSMatthew Dillon * queues. 6421c7c3c6aSMatthew Dillon */ 6431c7c3c6aSMatthew Dillon srcobject->type = OBJT_DEFAULT; 644c0877f10SJohn Dyson } 64526f9a767SRodney W. Grimes return; 64626f9a767SRodney W. Grimes } 64726f9a767SRodney W. Grimes 648df8bae1dSRodney W. Grimes /* 6491c7c3c6aSMatthew Dillon * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 6501c7c3c6aSMatthew Dillon * the requested page. 6511c7c3c6aSMatthew Dillon * 6521c7c3c6aSMatthew Dillon * We determine whether good backing store exists for the requested 6531c7c3c6aSMatthew Dillon * page and return TRUE if it does, FALSE if it doesn't. 6541c7c3c6aSMatthew Dillon * 6551c7c3c6aSMatthew Dillon * If TRUE, we also try to determine how much valid, contiguous backing 6561c7c3c6aSMatthew Dillon * store exists before and after the requested page within a reasonable 6571c7c3c6aSMatthew Dillon * distance. We do not try to restrict it to the swap device stripe 6581c7c3c6aSMatthew Dillon * (that is handled in getpages/putpages). It probably isn't worth 6591c7c3c6aSMatthew Dillon * doing here. 660df8bae1dSRodney W. Grimes */ 66126f9a767SRodney W. Grimes 6621c7c3c6aSMatthew Dillon boolean_t 663a316d390SJohn Dyson swap_pager_haspage(object, pindex, before, after) 66424a1cce3SDavid Greenman vm_object_t object; 665a316d390SJohn Dyson vm_pindex_t pindex; 66624a1cce3SDavid Greenman int *before; 66724a1cce3SDavid Greenman int *after; 66826f9a767SRodney W. Grimes { 6691c7c3c6aSMatthew Dillon daddr_t blk0; 67026f9a767SRodney W. Grimes 6711c7c3c6aSMatthew Dillon /* 6721c7c3c6aSMatthew Dillon * do we have good backing store at the requested index ? 6731c7c3c6aSMatthew Dillon */ 6741c7c3c6aSMatthew Dillon 6751c7c3c6aSMatthew Dillon blk0 = swp_pager_meta_ctl(object, pindex, 0); 6761c7c3c6aSMatthew Dillon 6771c7c3c6aSMatthew Dillon if (blk0 & SWAPBLK_NONE) { 6781c7c3c6aSMatthew Dillon if (before) 67924a1cce3SDavid Greenman *before = 0; 6801c7c3c6aSMatthew Dillon if (after) 68124a1cce3SDavid Greenman *after = 0; 68226f9a767SRodney W. Grimes return (FALSE); 68326f9a767SRodney W. Grimes } 68426f9a767SRodney W. Grimes 68526f9a767SRodney W. Grimes /* 6861c7c3c6aSMatthew Dillon * find backwards-looking contiguous good backing store 687e47ed70bSJohn Dyson */ 688e47ed70bSJohn Dyson 6891c7c3c6aSMatthew Dillon if (before != NULL) { 69026f9a767SRodney W. Grimes int i; 6910d94caffSDavid Greenman 6921c7c3c6aSMatthew Dillon for (i = 1; i < (SWB_NPAGES/2); ++i) { 6931c7c3c6aSMatthew Dillon daddr_t blk; 6941c7c3c6aSMatthew Dillon 6951c7c3c6aSMatthew Dillon if (i > pindex) 6961c7c3c6aSMatthew Dillon break; 6971c7c3c6aSMatthew Dillon blk = swp_pager_meta_ctl(object, pindex - i, 0); 6981c7c3c6aSMatthew Dillon if (blk & SWAPBLK_NONE) 6991c7c3c6aSMatthew Dillon break; 7001c7c3c6aSMatthew Dillon if (blk != blk0 - i) 7011c7c3c6aSMatthew Dillon break; 702ffc82b0aSJohn Dyson } 7031c7c3c6aSMatthew Dillon *before = (i - 1); 70426f9a767SRodney W. Grimes } 70526f9a767SRodney W. Grimes 70626f9a767SRodney W. Grimes /* 7071c7c3c6aSMatthew Dillon * find forward-looking contiguous good backing store 70826f9a767SRodney W. Grimes */ 7091c7c3c6aSMatthew Dillon 7101c7c3c6aSMatthew Dillon if (after != NULL) { 7111c7c3c6aSMatthew Dillon int i; 7121c7c3c6aSMatthew Dillon 7131c7c3c6aSMatthew Dillon for (i = 1; i < (SWB_NPAGES/2); ++i) { 7141c7c3c6aSMatthew Dillon daddr_t blk; 7151c7c3c6aSMatthew Dillon 7161c7c3c6aSMatthew Dillon blk = swp_pager_meta_ctl(object, pindex + i, 0); 7171c7c3c6aSMatthew Dillon if (blk & SWAPBLK_NONE) 7181c7c3c6aSMatthew Dillon break; 7191c7c3c6aSMatthew Dillon if (blk != blk0 + i) 7201c7c3c6aSMatthew Dillon break; 72126f9a767SRodney W. Grimes } 7221c7c3c6aSMatthew Dillon *after = (i - 1); 7231c7c3c6aSMatthew Dillon } 7241c7c3c6aSMatthew Dillon 7251c7c3c6aSMatthew Dillon return (TRUE); 7261c7c3c6aSMatthew Dillon } 7271c7c3c6aSMatthew Dillon 7281c7c3c6aSMatthew Dillon /* 7291c7c3c6aSMatthew Dillon * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 7301c7c3c6aSMatthew Dillon * 7311c7c3c6aSMatthew Dillon * This removes any associated swap backing store, whether valid or 7321c7c3c6aSMatthew Dillon * not, from the page. 7331c7c3c6aSMatthew Dillon * 7341c7c3c6aSMatthew Dillon * This routine is typically called when a page is made dirty, at 7351c7c3c6aSMatthew Dillon * which point any associated swap can be freed. MADV_FREE also 7361c7c3c6aSMatthew Dillon * calls us in a special-case situation 7371c7c3c6aSMatthew Dillon * 7381c7c3c6aSMatthew Dillon * NOTE!!! If the page is clean and the swap was valid, the caller 7391c7c3c6aSMatthew Dillon * should make the page dirty before calling this routine. This routine 7401c7c3c6aSMatthew Dillon * does NOT change the m->dirty status of the page. Also: MADV_FREE 7411c7c3c6aSMatthew Dillon * depends on it. 7421c7c3c6aSMatthew Dillon * 7431c7c3c6aSMatthew Dillon * This routine may not block 7441c7c3c6aSMatthew Dillon */ 7451c7c3c6aSMatthew Dillon 7461c7c3c6aSMatthew Dillon static void 7471c7c3c6aSMatthew Dillon swap_pager_unswapped(m) 7481c7c3c6aSMatthew Dillon vm_page_t m; 7491c7c3c6aSMatthew Dillon { 7501c7c3c6aSMatthew Dillon swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 7511c7c3c6aSMatthew Dillon } 7521c7c3c6aSMatthew Dillon 7531c7c3c6aSMatthew Dillon /* 754a5296b05SJulian Elischer * SWAP_PAGER_STRATEGY() - read, write, free blocks 755a5296b05SJulian Elischer * 756a5296b05SJulian Elischer * This implements the vm_pager_strategy() interface to swap and allows 757a5296b05SJulian Elischer * other parts of the system to directly access swap as backing store 758a5296b05SJulian Elischer * through vm_objects of type OBJT_SWAP. This is intended to be a 759a5296b05SJulian Elischer * cacheless interface ( i.e. caching occurs at higher levels ). 760a5296b05SJulian Elischer * Therefore we do not maintain any resident pages. All I/O goes 761a5296b05SJulian Elischer * directly from and to the swap device. 762a5296b05SJulian Elischer * 763a5296b05SJulian Elischer * Note that b_blkno is scaled for PAGE_SIZE 764a5296b05SJulian Elischer * 765a5296b05SJulian Elischer * We currently attempt to run I/O synchronously or asynchronously as 766a5296b05SJulian Elischer * the caller requests. This isn't perfect because we loose error 767a5296b05SJulian Elischer * sequencing when we run multiple ops in parallel to satisfy a request. 768a5296b05SJulian Elischer * But this is swap, so we let it all hang out. 769a5296b05SJulian Elischer */ 770a5296b05SJulian Elischer 771a5296b05SJulian Elischer static void 772a5296b05SJulian Elischer swap_pager_strategy(vm_object_t object, struct buf *bp) 773a5296b05SJulian Elischer { 774a5296b05SJulian Elischer vm_pindex_t start; 775a5296b05SJulian Elischer int count; 776a5296b05SJulian Elischer char *data; 777a5296b05SJulian Elischer struct buf *nbp = NULL; 778a5296b05SJulian Elischer 779a5296b05SJulian Elischer if (bp->b_bcount & PAGE_MASK) { 780a5296b05SJulian Elischer bp->b_error = EINVAL; 781a5296b05SJulian Elischer bp->b_flags |= B_ERROR | B_INVAL; 782a5296b05SJulian Elischer biodone(bp); 783a5296b05SJulian Elischer printf("swap_pager_strategy: bp %p b_vp %p blk %d size %d, not page bounded\n", bp, bp->b_vp, (int)bp->b_pblkno, (int)bp->b_bcount); 784a5296b05SJulian Elischer return; 785a5296b05SJulian Elischer } 786a5296b05SJulian Elischer 787a5296b05SJulian Elischer /* 788a5296b05SJulian Elischer * Clear error indication, initialize page index, count, data pointer. 789a5296b05SJulian Elischer */ 790a5296b05SJulian Elischer 791a5296b05SJulian Elischer bp->b_error = 0; 792a5296b05SJulian Elischer bp->b_flags &= ~B_ERROR; 793a5296b05SJulian Elischer bp->b_resid = bp->b_bcount; 794a5296b05SJulian Elischer 795a5296b05SJulian Elischer start = bp->b_pblkno; 796a5296b05SJulian Elischer count = howmany(bp->b_bcount, PAGE_SIZE); 797a5296b05SJulian Elischer data = bp->b_data; 798a5296b05SJulian Elischer 799a5296b05SJulian Elischer /* 800a5296b05SJulian Elischer * Execute strategy function 801a5296b05SJulian Elischer */ 802a5296b05SJulian Elischer 803a5296b05SJulian Elischer if (bp->b_flags & B_FREEBUF) { 804a5296b05SJulian Elischer /* 805a5296b05SJulian Elischer * FREE PAGE(s) - destroy underlying swap that is no longer 806a5296b05SJulian Elischer * needed. 807a5296b05SJulian Elischer */ 808a5296b05SJulian Elischer int s; 809a5296b05SJulian Elischer 810a5296b05SJulian Elischer s = splvm(); 811a5296b05SJulian Elischer swp_pager_meta_free(object, start, count); 812a5296b05SJulian Elischer splx(s); 813a5296b05SJulian Elischer bp->b_resid = 0; 814a5296b05SJulian Elischer } else if (bp->b_flags & B_READ) { 815a5296b05SJulian Elischer /* 816a5296b05SJulian Elischer * READ FROM SWAP - read directly from swap backing store, 817a5296b05SJulian Elischer * zero-fill as appropriate. 818a5296b05SJulian Elischer * 819a5296b05SJulian Elischer * Note: the count == 0 case is beyond the end of the 820a5296b05SJulian Elischer * buffer. This is a special case to close out any 821a5296b05SJulian Elischer * left over nbp. 822a5296b05SJulian Elischer */ 823a5296b05SJulian Elischer 824a5296b05SJulian Elischer while (count > 0) { 825a5296b05SJulian Elischer daddr_t blk; 826a5296b05SJulian Elischer int s; 827a5296b05SJulian Elischer 828a5296b05SJulian Elischer s = splvm(); 829a5296b05SJulian Elischer blk = swp_pager_meta_ctl(object, start, 0); 830a5296b05SJulian Elischer splx(s); 831a5296b05SJulian Elischer 832a5296b05SJulian Elischer /* 833a5296b05SJulian Elischer * Do we have to flush our current collection? 834a5296b05SJulian Elischer */ 835a5296b05SJulian Elischer 836a5296b05SJulian Elischer if ( 837a5296b05SJulian Elischer nbp && ( 838a5296b05SJulian Elischer (blk & SWAPBLK_NONE) || 839a5296b05SJulian Elischer nbp->b_blkno + btoc(nbp->b_bcount) != blk 840a5296b05SJulian Elischer ) 841a5296b05SJulian Elischer ) { 842a5296b05SJulian Elischer ++cnt.v_swapin; 843a5296b05SJulian Elischer cnt.v_swappgsin += btoc(nbp->b_bcount); 844a5296b05SJulian Elischer flushchainbuf(nbp); 845a5296b05SJulian Elischer nbp = NULL; 846a5296b05SJulian Elischer } 847a5296b05SJulian Elischer 848a5296b05SJulian Elischer /* 849a5296b05SJulian Elischer * Add to collection 850a5296b05SJulian Elischer */ 851a5296b05SJulian Elischer if (blk & SWAPBLK_NONE) { 852a5296b05SJulian Elischer s = splbio(); 853a5296b05SJulian Elischer bp->b_resid -= PAGE_SIZE; 854a5296b05SJulian Elischer splx(s); 855a5296b05SJulian Elischer bzero(data, PAGE_SIZE); 856a5296b05SJulian Elischer } else { 857a5296b05SJulian Elischer if (nbp == NULL) { 858a5296b05SJulian Elischer nbp = getchainbuf(bp, swapdev_vp, B_READ|B_ASYNC); 859a5296b05SJulian Elischer nbp->b_blkno = blk; 860a5296b05SJulian Elischer nbp->b_data = data; 861a5296b05SJulian Elischer } 862a5296b05SJulian Elischer nbp->b_bcount += PAGE_SIZE; 863a5296b05SJulian Elischer } 864a5296b05SJulian Elischer --count; 865a5296b05SJulian Elischer ++start; 866a5296b05SJulian Elischer data += PAGE_SIZE; 867a5296b05SJulian Elischer } 868a5296b05SJulian Elischer } else { 869a5296b05SJulian Elischer /* 870a5296b05SJulian Elischer * WRITE TO SWAP - [re]allocate swap and write. 871a5296b05SJulian Elischer */ 872a5296b05SJulian Elischer while (count > 0) { 873a5296b05SJulian Elischer int i; 874a5296b05SJulian Elischer int s; 875a5296b05SJulian Elischer int n; 876a5296b05SJulian Elischer daddr_t blk; 877a5296b05SJulian Elischer 878a5296b05SJulian Elischer n = min(count, BLIST_MAX_ALLOC); 879a5296b05SJulian Elischer n = min(n, nsw_cluster_max); 880a5296b05SJulian Elischer 881a5296b05SJulian Elischer s = splvm(); 882a5296b05SJulian Elischer for (;;) { 883a5296b05SJulian Elischer blk = swp_pager_getswapspace(n); 884a5296b05SJulian Elischer if (blk != SWAPBLK_NONE) 885a5296b05SJulian Elischer break; 886a5296b05SJulian Elischer n >>= 1; 887a5296b05SJulian Elischer if (n == 0) 888a5296b05SJulian Elischer break; 889a5296b05SJulian Elischer } 890a5296b05SJulian Elischer if (n == 0) { 891a5296b05SJulian Elischer bp->b_error = ENOMEM; 892a5296b05SJulian Elischer bp->b_flags |= B_ERROR; 893a5296b05SJulian Elischer splx(s); 894a5296b05SJulian Elischer break; 895a5296b05SJulian Elischer } 896a5296b05SJulian Elischer 897a5296b05SJulian Elischer /* 898a5296b05SJulian Elischer * Oops, too big if it crosses a stripe 899a5296b05SJulian Elischer * 900a5296b05SJulian Elischer * 1111000000 901a5296b05SJulian Elischer * 111111 902a5296b05SJulian Elischer * 1000001 903a5296b05SJulian Elischer */ 904a5296b05SJulian Elischer if ((blk ^ (blk + n)) & dmmax_mask) { 905a5296b05SJulian Elischer int j = ((blk + dmmax) & dmmax_mask) - blk; 906a5296b05SJulian Elischer swp_pager_freeswapspace(blk + j, n - j); 907a5296b05SJulian Elischer n = j; 908a5296b05SJulian Elischer } 909a5296b05SJulian Elischer 910a5296b05SJulian Elischer swp_pager_meta_free(object, start, n); 911a5296b05SJulian Elischer 912a5296b05SJulian Elischer splx(s); 913a5296b05SJulian Elischer 914a5296b05SJulian Elischer if (nbp) { 915a5296b05SJulian Elischer ++cnt.v_swapout; 916a5296b05SJulian Elischer cnt.v_swappgsout += btoc(nbp->b_bcount); 917a5296b05SJulian Elischer flushchainbuf(nbp); 918a5296b05SJulian Elischer } 919a5296b05SJulian Elischer 920a5296b05SJulian Elischer nbp = getchainbuf(bp, swapdev_vp, B_ASYNC); 921a5296b05SJulian Elischer 922a5296b05SJulian Elischer nbp->b_blkno = blk; 923a5296b05SJulian Elischer nbp->b_data = data; 924a5296b05SJulian Elischer nbp->b_bcount = PAGE_SIZE * n; 925a5296b05SJulian Elischer 926a5296b05SJulian Elischer /* 927a5296b05SJulian Elischer * Must set dirty range for NFS to work. dirtybeg & 928a5296b05SJulian Elischer * off are already 0. 929a5296b05SJulian Elischer */ 930a5296b05SJulian Elischer nbp->b_dirtyend = nbp->b_bcount; 931a5296b05SJulian Elischer 932a5296b05SJulian Elischer ++cnt.v_swapout; 933a5296b05SJulian Elischer cnt.v_swappgsout += n; 934a5296b05SJulian Elischer 935a5296b05SJulian Elischer s = splbio(); 936a5296b05SJulian Elischer for (i = 0; i < n; ++i) { 937a5296b05SJulian Elischer swp_pager_meta_build( 938a5296b05SJulian Elischer object, 939a5296b05SJulian Elischer start + i, 940a5296b05SJulian Elischer blk + i, 941a5296b05SJulian Elischer 1 942a5296b05SJulian Elischer ); 943a5296b05SJulian Elischer } 944a5296b05SJulian Elischer splx(s); 945a5296b05SJulian Elischer 946a5296b05SJulian Elischer count -= n; 947a5296b05SJulian Elischer start += n; 948a5296b05SJulian Elischer data += PAGE_SIZE * n; 949a5296b05SJulian Elischer } 950a5296b05SJulian Elischer } 951a5296b05SJulian Elischer 952a5296b05SJulian Elischer /* 953a5296b05SJulian Elischer * Cleanup. Commit last nbp either async or sync, and either 954a5296b05SJulian Elischer * wait for it synchronously or make it auto-biodone itself and 955a5296b05SJulian Elischer * the parent bp. 956a5296b05SJulian Elischer */ 957a5296b05SJulian Elischer 958a5296b05SJulian Elischer if (nbp) { 959a5296b05SJulian Elischer if ((bp->b_flags & B_ASYNC) == 0) 960a5296b05SJulian Elischer nbp->b_flags &= ~B_ASYNC; 961a5296b05SJulian Elischer if (nbp->b_flags & B_READ) { 962a5296b05SJulian Elischer ++cnt.v_swapin; 963a5296b05SJulian Elischer cnt.v_swappgsin += btoc(nbp->b_bcount); 964a5296b05SJulian Elischer } else { 965a5296b05SJulian Elischer ++cnt.v_swapout; 966a5296b05SJulian Elischer cnt.v_swappgsout += btoc(nbp->b_bcount); 967a5296b05SJulian Elischer } 968a5296b05SJulian Elischer flushchainbuf(nbp); 969a5296b05SJulian Elischer } 970a5296b05SJulian Elischer if (bp->b_flags & B_ASYNC) { 971a5296b05SJulian Elischer autochaindone(bp); 972a5296b05SJulian Elischer } else { 973a5296b05SJulian Elischer waitchainbuf(bp, 0, 1); 974a5296b05SJulian Elischer } 975a5296b05SJulian Elischer } 976a5296b05SJulian Elischer 977a5296b05SJulian Elischer /* 9781c7c3c6aSMatthew Dillon * SWAP_PAGER_GETPAGES() - bring pages in from swap 9791c7c3c6aSMatthew Dillon * 9801c7c3c6aSMatthew Dillon * Attempt to retrieve (m, count) pages from backing store, but make 9811c7c3c6aSMatthew Dillon * sure we retrieve at least m[reqpage]. We try to load in as large 9821c7c3c6aSMatthew Dillon * a chunk surrounding m[reqpage] as is contiguous in swap and which 9831c7c3c6aSMatthew Dillon * belongs to the same object. 9841c7c3c6aSMatthew Dillon * 9851c7c3c6aSMatthew Dillon * The code is designed for asynchronous operation and 9861c7c3c6aSMatthew Dillon * immediate-notification of 'reqpage' but tends not to be 9871c7c3c6aSMatthew Dillon * used that way. Please do not optimize-out this algorithmic 9881c7c3c6aSMatthew Dillon * feature, I intend to improve on it in the future. 9891c7c3c6aSMatthew Dillon * 9901c7c3c6aSMatthew Dillon * The parent has a single vm_object_pip_add() reference prior to 9911c7c3c6aSMatthew Dillon * calling us and we should return with the same. 9921c7c3c6aSMatthew Dillon * 9931c7c3c6aSMatthew Dillon * The parent has BUSY'd the pages. We should return with 'm' 9941c7c3c6aSMatthew Dillon * left busy, but the others adjusted. 9951c7c3c6aSMatthew Dillon */ 99626f9a767SRodney W. Grimes 997f708ef1bSPoul-Henning Kamp static int 99824a1cce3SDavid Greenman swap_pager_getpages(object, m, count, reqpage) 99924a1cce3SDavid Greenman vm_object_t object; 100026f9a767SRodney W. Grimes vm_page_t *m; 100126f9a767SRodney W. Grimes int count, reqpage; 1002df8bae1dSRodney W. Grimes { 10031c7c3c6aSMatthew Dillon struct buf *bp; 10041c7c3c6aSMatthew Dillon vm_page_t mreq; 10051c7c3c6aSMatthew Dillon int s; 100626f9a767SRodney W. Grimes int i; 100726f9a767SRodney W. Grimes int j; 10081c7c3c6aSMatthew Dillon daddr_t blk; 10091c7c3c6aSMatthew Dillon vm_offset_t kva; 10101c7c3c6aSMatthew Dillon vm_pindex_t lastpindex; 10110d94caffSDavid Greenman 10121c7c3c6aSMatthew Dillon mreq = m[reqpage]; 10131c7c3c6aSMatthew Dillon 10141c7c3c6aSMatthew Dillon #if !defined(MAX_PERF) 10151c7c3c6aSMatthew Dillon if (mreq->object != object) { 10161c7c3c6aSMatthew Dillon panic("swap_pager_getpages: object mismatch %p/%p", 10171c7c3c6aSMatthew Dillon object, 10181c7c3c6aSMatthew Dillon mreq->object 10191c7c3c6aSMatthew Dillon ); 102026f9a767SRodney W. Grimes } 10211c7c3c6aSMatthew Dillon #endif 10221c7c3c6aSMatthew Dillon /* 10231c7c3c6aSMatthew Dillon * Calculate range to retrieve. The pages have already been assigned 10241c7c3c6aSMatthew Dillon * their swapblks. We require a *contiguous* range that falls entirely 10251c7c3c6aSMatthew Dillon * within a single device stripe. If we do not supply it, bad things 10261c7c3c6aSMatthew Dillon * happen. 10271c7c3c6aSMatthew Dillon */ 10281c7c3c6aSMatthew Dillon 10291c7c3c6aSMatthew Dillon 10301c7c3c6aSMatthew Dillon blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 10311c7c3c6aSMatthew Dillon 10321c7c3c6aSMatthew Dillon for (i = reqpage - 1; i >= 0; --i) { 10331c7c3c6aSMatthew Dillon daddr_t iblk; 10341c7c3c6aSMatthew Dillon 10351c7c3c6aSMatthew Dillon iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0); 10361c7c3c6aSMatthew Dillon if (iblk & SWAPBLK_NONE) 10371c7c3c6aSMatthew Dillon break; 10381c7c3c6aSMatthew Dillon 10391c7c3c6aSMatthew Dillon if ((blk ^ iblk) & dmmax_mask) 10401c7c3c6aSMatthew Dillon break; 10411c7c3c6aSMatthew Dillon 10421c7c3c6aSMatthew Dillon if (blk != iblk + (reqpage - i)) 104326f9a767SRodney W. Grimes break; 104426f9a767SRodney W. Grimes } 10451c7c3c6aSMatthew Dillon ++i; 10461c7c3c6aSMatthew Dillon 10471c7c3c6aSMatthew Dillon for (j = reqpage + 1; j < count; ++j) { 10481c7c3c6aSMatthew Dillon daddr_t jblk; 10491c7c3c6aSMatthew Dillon 10501c7c3c6aSMatthew Dillon jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0); 10511c7c3c6aSMatthew Dillon if (jblk & SWAPBLK_NONE) 10521c7c3c6aSMatthew Dillon break; 10531c7c3c6aSMatthew Dillon 10541c7c3c6aSMatthew Dillon if ((blk ^ jblk) & dmmax_mask) 10551c7c3c6aSMatthew Dillon break; 10561c7c3c6aSMatthew Dillon 10571c7c3c6aSMatthew Dillon if (blk != jblk - (j - reqpage)) 10581c7c3c6aSMatthew Dillon break; 105926f9a767SRodney W. Grimes } 106026f9a767SRodney W. Grimes 10611c7c3c6aSMatthew Dillon /* 10621c7c3c6aSMatthew Dillon * If blk itself is bad, well, we can't do any I/O. This should 10631c7c3c6aSMatthew Dillon * already be covered as a side effect, but I'm making sure. 10641c7c3c6aSMatthew Dillon */ 106526f9a767SRodney W. Grimes 10661c7c3c6aSMatthew Dillon if (blk & SWAPBLK_NONE) { 10671c7c3c6aSMatthew Dillon i = reqpage; 10681c7c3c6aSMatthew Dillon j = reqpage + 1; 10691c7c3c6aSMatthew Dillon } 10701c7c3c6aSMatthew Dillon 10711c7c3c6aSMatthew Dillon /* 10721c7c3c6aSMatthew Dillon * free pages outside our collection range. Note: we never free 10731c7c3c6aSMatthew Dillon * mreq, it must remain busy throughout. 10741c7c3c6aSMatthew Dillon */ 10751c7c3c6aSMatthew Dillon 10761c7c3c6aSMatthew Dillon { 10771c7c3c6aSMatthew Dillon int k; 10781c7c3c6aSMatthew Dillon 10791c7c3c6aSMatthew Dillon for (k = 0; k < i; ++k) { 10801c7c3c6aSMatthew Dillon vm_page_free(m[k]); 10811c7c3c6aSMatthew Dillon } 10821c7c3c6aSMatthew Dillon for (k = j; k < count; ++k) { 10831c7c3c6aSMatthew Dillon vm_page_free(m[k]); 10841c7c3c6aSMatthew Dillon } 10851c7c3c6aSMatthew Dillon } 10861c7c3c6aSMatthew Dillon 10871c7c3c6aSMatthew Dillon /* 10881c7c3c6aSMatthew Dillon * Return VM_PAGER_FAIL if we have nothing 10891c7c3c6aSMatthew Dillon * to do. Return mreq still busy, but the 10901c7c3c6aSMatthew Dillon * others unbusied. 10911c7c3c6aSMatthew Dillon */ 10921c7c3c6aSMatthew Dillon 10931c7c3c6aSMatthew Dillon if (blk & SWAPBLK_NONE) 109426f9a767SRodney W. Grimes return(VM_PAGER_FAIL); 1095df8bae1dSRodney W. Grimes 109626f9a767SRodney W. Grimes 109716f62314SDavid Greenman /* 109816f62314SDavid Greenman * Get a swap buffer header to perform the IO 109916f62314SDavid Greenman */ 11001c7c3c6aSMatthew Dillon 11011c7c3c6aSMatthew Dillon bp = getpbuf(&nsw_rcount); 110216f62314SDavid Greenman kva = (vm_offset_t) bp->b_data; 110326f9a767SRodney W. Grimes 110416f62314SDavid Greenman /* 110516f62314SDavid Greenman * map our page(s) into kva for input 11061c7c3c6aSMatthew Dillon * 11071c7c3c6aSMatthew Dillon * NOTE: B_PAGING is set by pbgetvp() 110816f62314SDavid Greenman */ 110916f62314SDavid Greenman 11101c7c3c6aSMatthew Dillon pmap_qenter(kva, m + i, j - i); 11111c7c3c6aSMatthew Dillon 111267812eacSKirk McKusick bp->b_flags = B_READ | B_CALL; 11131c7c3c6aSMatthew Dillon bp->b_iodone = swp_pager_async_iodone; 1114b0eeea20SPoul-Henning Kamp bp->b_rcred = bp->b_wcred = proc0.p_ucred; 1115a5296b05SJulian Elischer bp->b_data = (caddr_t) kva; 111626f9a767SRodney W. Grimes crhold(bp->b_rcred); 111726f9a767SRodney W. Grimes crhold(bp->b_wcred); 11181c7c3c6aSMatthew Dillon /* 11191c7c3c6aSMatthew Dillon * b_blkno is in page-sized chunks. swapblk is valid, too, so 11201c7c3c6aSMatthew Dillon * we don't have to mask it against SWAPBLK_MASK. 11211c7c3c6aSMatthew Dillon */ 11221c7c3c6aSMatthew Dillon bp->b_blkno = blk - (reqpage - i); 11231c7c3c6aSMatthew Dillon bp->b_bcount = PAGE_SIZE * (j - i); 11241c7c3c6aSMatthew Dillon bp->b_bufsize = PAGE_SIZE * (j - i); 11251c7c3c6aSMatthew Dillon bp->b_pager.pg_reqpage = reqpage - i; 11261c7c3c6aSMatthew Dillon 11271c7c3c6aSMatthew Dillon { 11281c7c3c6aSMatthew Dillon int k; 11291c7c3c6aSMatthew Dillon 11301c7c3c6aSMatthew Dillon for (k = i; k < j; ++k) { 11311c7c3c6aSMatthew Dillon bp->b_pages[k - i] = m[k]; 11321c7c3c6aSMatthew Dillon vm_page_flag_set(m[k], PG_SWAPINPROG); 11331c7c3c6aSMatthew Dillon } 11341c7c3c6aSMatthew Dillon } 11351c7c3c6aSMatthew Dillon bp->b_npages = j - i; 113626f9a767SRodney W. Grimes 11370d94caffSDavid Greenman pbgetvp(swapdev_vp, bp); 1138df8bae1dSRodney W. Grimes 1139976e77fcSDavid Greenman cnt.v_swapin++; 11401c7c3c6aSMatthew Dillon cnt.v_swappgsin += bp->b_npages; 11411c7c3c6aSMatthew Dillon 1142df8bae1dSRodney W. Grimes /* 11431c7c3c6aSMatthew Dillon * We still hold the lock on mreq, and our automatic completion routine 11441c7c3c6aSMatthew Dillon * does not remove it. 1145df8bae1dSRodney W. Grimes */ 11461c7c3c6aSMatthew Dillon 11471c7c3c6aSMatthew Dillon vm_object_pip_add(mreq->object, bp->b_npages); 11481c7c3c6aSMatthew Dillon lastpindex = m[j-1]->pindex; 11491c7c3c6aSMatthew Dillon 11501c7c3c6aSMatthew Dillon /* 11511c7c3c6aSMatthew Dillon * perform the I/O. NOTE!!! bp cannot be considered valid after 11521c7c3c6aSMatthew Dillon * this point because we automatically release it on completion. 11531c7c3c6aSMatthew Dillon * Instead, we look at the one page we are interested in which we 11541c7c3c6aSMatthew Dillon * still hold a lock on even through the I/O completion. 11551c7c3c6aSMatthew Dillon * 11561c7c3c6aSMatthew Dillon * The other pages in our m[] array are also released on completion, 11571c7c3c6aSMatthew Dillon * so we cannot assume they are valid anymore either. 11581c7c3c6aSMatthew Dillon * 11591c7c3c6aSMatthew Dillon * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 11601c7c3c6aSMatthew Dillon */ 11611c7c3c6aSMatthew Dillon 1162b890cb2cSPeter Wemm BUF_KERNPROC(bp); 1163fd5d1124SJulian Elischer VOP_STRATEGY(bp->b_vp, bp); 116426f9a767SRodney W. Grimes 116526f9a767SRodney W. Grimes /* 11661c7c3c6aSMatthew Dillon * wait for the page we want to complete. PG_SWAPINPROG is always 11671c7c3c6aSMatthew Dillon * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 11681c7c3c6aSMatthew Dillon * is set in the meta-data. 116926f9a767SRodney W. Grimes */ 11701b119d9dSDavid Greenman 11711c7c3c6aSMatthew Dillon s = splvm(); 11721c7c3c6aSMatthew Dillon 11731c7c3c6aSMatthew Dillon while ((mreq->flags & PG_SWAPINPROG) != 0) { 11741c7c3c6aSMatthew Dillon vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED); 11751c7c3c6aSMatthew Dillon cnt.v_intrans++; 11761c7c3c6aSMatthew Dillon if (tsleep(mreq, PSWP, "swread", hz*20)) { 1177ac1e407bSBruce Evans printf( 11781c7c3c6aSMatthew Dillon "swap_pager: indefinite wait buffer: device:" 1179af647ddeSBruce Evans " %s, blkno: %ld, size: %ld\n", 1180af647ddeSBruce Evans devtoname(bp->b_dev), (long)bp->b_blkno, 1181af647ddeSBruce Evans bp->b_bcount 11821c7c3c6aSMatthew Dillon ); 11831c7c3c6aSMatthew Dillon } 11841b119d9dSDavid Greenman } 118526f9a767SRodney W. Grimes 1186df8bae1dSRodney W. Grimes splx(s); 118726f9a767SRodney W. Grimes 118826f9a767SRodney W. Grimes /* 11891c7c3c6aSMatthew Dillon * mreq is left bussied after completion, but all the other pages 11901c7c3c6aSMatthew Dillon * are freed. If we had an unrecoverable read error the page will 11911c7c3c6aSMatthew Dillon * not be valid. 119226f9a767SRodney W. Grimes */ 119326f9a767SRodney W. Grimes 11941c7c3c6aSMatthew Dillon if (mreq->valid != VM_PAGE_BITS_ALL) { 11951c7c3c6aSMatthew Dillon return(VM_PAGER_ERROR); 119626f9a767SRodney W. Grimes } else { 11971c7c3c6aSMatthew Dillon return(VM_PAGER_OK); 119826f9a767SRodney W. Grimes } 11991c7c3c6aSMatthew Dillon 12001c7c3c6aSMatthew Dillon /* 12011c7c3c6aSMatthew Dillon * A final note: in a low swap situation, we cannot deallocate swap 12021c7c3c6aSMatthew Dillon * and mark a page dirty here because the caller is likely to mark 12031c7c3c6aSMatthew Dillon * the page clean when we return, causing the page to possibly revert 12041c7c3c6aSMatthew Dillon * to all-zero's later. 12051c7c3c6aSMatthew Dillon */ 1206df8bae1dSRodney W. Grimes } 1207df8bae1dSRodney W. Grimes 12081c7c3c6aSMatthew Dillon /* 12091c7c3c6aSMatthew Dillon * swap_pager_putpages: 12101c7c3c6aSMatthew Dillon * 12111c7c3c6aSMatthew Dillon * Assign swap (if necessary) and initiate I/O on the specified pages. 12121c7c3c6aSMatthew Dillon * 12131c7c3c6aSMatthew Dillon * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 12141c7c3c6aSMatthew Dillon * are automatically converted to SWAP objects. 12151c7c3c6aSMatthew Dillon * 12161c7c3c6aSMatthew Dillon * In a low memory situation we may block in VOP_STRATEGY(), but the new 12171c7c3c6aSMatthew Dillon * vm_page reservation system coupled with properly written VFS devices 12181c7c3c6aSMatthew Dillon * should ensure that no low-memory deadlock occurs. This is an area 12191c7c3c6aSMatthew Dillon * which needs work. 12201c7c3c6aSMatthew Dillon * 12211c7c3c6aSMatthew Dillon * The parent has N vm_object_pip_add() references prior to 12221c7c3c6aSMatthew Dillon * calling us and will remove references for rtvals[] that are 12231c7c3c6aSMatthew Dillon * not set to VM_PAGER_PEND. We need to remove the rest on I/O 12241c7c3c6aSMatthew Dillon * completion. 12251c7c3c6aSMatthew Dillon * 12261c7c3c6aSMatthew Dillon * The parent has soft-busy'd the pages it passes us and will unbusy 12271c7c3c6aSMatthew Dillon * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 12281c7c3c6aSMatthew Dillon * We need to unbusy the rest on I/O completion. 12291c7c3c6aSMatthew Dillon */ 12301c7c3c6aSMatthew Dillon 1231e4542174SMatthew Dillon void 123224a1cce3SDavid Greenman swap_pager_putpages(object, m, count, sync, rtvals) 123324a1cce3SDavid Greenman vm_object_t object; 123426f9a767SRodney W. Grimes vm_page_t *m; 123526f9a767SRodney W. Grimes int count; 123624a1cce3SDavid Greenman boolean_t sync; 123726f9a767SRodney W. Grimes int *rtvals; 1238df8bae1dSRodney W. Grimes { 12391c7c3c6aSMatthew Dillon int i; 12401c7c3c6aSMatthew Dillon int n = 0; 1241df8bae1dSRodney W. Grimes 12421c7c3c6aSMatthew Dillon #if !defined(MAX_PERF) 12431c7c3c6aSMatthew Dillon if (count && m[0]->object != object) { 12441c7c3c6aSMatthew Dillon panic("swap_pager_getpages: object mismatch %p/%p", 12451c7c3c6aSMatthew Dillon object, 12461c7c3c6aSMatthew Dillon m[0]->object 12471c7c3c6aSMatthew Dillon ); 12481c7c3c6aSMatthew Dillon } 12491c7c3c6aSMatthew Dillon #endif 12501c7c3c6aSMatthew Dillon /* 12511c7c3c6aSMatthew Dillon * Step 1 12521c7c3c6aSMatthew Dillon * 12531c7c3c6aSMatthew Dillon * Turn object into OBJT_SWAP 12541c7c3c6aSMatthew Dillon * check for bogus sysops 12551c7c3c6aSMatthew Dillon * force sync if not pageout process 12561c7c3c6aSMatthew Dillon */ 1257e736cd05SJohn Dyson 12581c7c3c6aSMatthew Dillon if (object->type != OBJT_SWAP) { 12591c7c3c6aSMatthew Dillon swp_pager_meta_build(object, 0, SWAPBLK_NONE, 0); 12605663e6deSDavid Greenman } 1261e47ed70bSJohn Dyson 1262e47ed70bSJohn Dyson if (curproc != pageproc) 1263e47ed70bSJohn Dyson sync = TRUE; 126426f9a767SRodney W. Grimes 12651c7c3c6aSMatthew Dillon /* 12661c7c3c6aSMatthew Dillon * Step 2 12671c7c3c6aSMatthew Dillon * 1268ad3cce20SMatthew Dillon * Update nsw parameters from swap_async_max sysctl values. 1269ad3cce20SMatthew Dillon * Do not let the sysop crash the machine with bogus numbers. 1270327f4e83SMatthew Dillon */ 1271327f4e83SMatthew Dillon 1272327f4e83SMatthew Dillon if (swap_async_max != nsw_wcount_async_max) { 1273327f4e83SMatthew Dillon int n; 1274327f4e83SMatthew Dillon int s; 1275327f4e83SMatthew Dillon 1276327f4e83SMatthew Dillon /* 1277327f4e83SMatthew Dillon * limit range 1278327f4e83SMatthew Dillon */ 1279327f4e83SMatthew Dillon if ((n = swap_async_max) > nswbuf / 2) 1280327f4e83SMatthew Dillon n = nswbuf / 2; 1281327f4e83SMatthew Dillon if (n < 1) 1282327f4e83SMatthew Dillon n = 1; 1283327f4e83SMatthew Dillon swap_async_max = n; 1284327f4e83SMatthew Dillon 1285327f4e83SMatthew Dillon /* 1286327f4e83SMatthew Dillon * Adjust difference ( if possible ). If the current async 1287327f4e83SMatthew Dillon * count is too low, we may not be able to make the adjustment 1288327f4e83SMatthew Dillon * at this time. 1289327f4e83SMatthew Dillon */ 1290327f4e83SMatthew Dillon s = splvm(); 1291327f4e83SMatthew Dillon n -= nsw_wcount_async_max; 1292327f4e83SMatthew Dillon if (nsw_wcount_async + n >= 0) { 1293327f4e83SMatthew Dillon nsw_wcount_async += n; 1294327f4e83SMatthew Dillon nsw_wcount_async_max += n; 1295327f4e83SMatthew Dillon wakeup(&nsw_wcount_async); 1296327f4e83SMatthew Dillon } 1297327f4e83SMatthew Dillon splx(s); 1298327f4e83SMatthew Dillon } 1299327f4e83SMatthew Dillon 1300327f4e83SMatthew Dillon /* 1301327f4e83SMatthew Dillon * Step 3 1302327f4e83SMatthew Dillon * 13031c7c3c6aSMatthew Dillon * Assign swap blocks and issue I/O. We reallocate swap on the fly. 13041c7c3c6aSMatthew Dillon * The page is left dirty until the pageout operation completes 13051c7c3c6aSMatthew Dillon * successfully. 13061c7c3c6aSMatthew Dillon */ 130726f9a767SRodney W. Grimes 13081c7c3c6aSMatthew Dillon for (i = 0; i < count; i += n) { 13091c7c3c6aSMatthew Dillon int s; 13101c7c3c6aSMatthew Dillon int j; 13111c7c3c6aSMatthew Dillon struct buf *bp; 1312a316d390SJohn Dyson daddr_t blk; 131326f9a767SRodney W. Grimes 1314df8bae1dSRodney W. Grimes /* 13151c7c3c6aSMatthew Dillon * Maximum I/O size is limited by a number of factors. 1316df8bae1dSRodney W. Grimes */ 131726f9a767SRodney W. Grimes 13181c7c3c6aSMatthew Dillon n = min(BLIST_MAX_ALLOC, count - i); 1319327f4e83SMatthew Dillon n = min(n, nsw_cluster_max); 13201c7c3c6aSMatthew Dillon 132126f9a767SRodney W. Grimes /* 13221c7c3c6aSMatthew Dillon * Get biggest block of swap we can. If we fail, fall 13231c7c3c6aSMatthew Dillon * back and try to allocate a smaller block. Don't go 13241c7c3c6aSMatthew Dillon * overboard trying to allocate space if it would overly 13251c7c3c6aSMatthew Dillon * fragment swap. 132626f9a767SRodney W. Grimes */ 13271c7c3c6aSMatthew Dillon while ( 13281c7c3c6aSMatthew Dillon (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE && 13291c7c3c6aSMatthew Dillon n > 4 13301c7c3c6aSMatthew Dillon ) { 13311c7c3c6aSMatthew Dillon n >>= 1; 133226f9a767SRodney W. Grimes } 13331c7c3c6aSMatthew Dillon if (blk == SWAPBLK_NONE) { 13341c7c3c6aSMatthew Dillon for (j = 0; j < n; ++j) { 13351c7c3c6aSMatthew Dillon rtvals[i+j] = VM_PAGER_FAIL; 133626f9a767SRodney W. Grimes } 13371c7c3c6aSMatthew Dillon continue; 133826f9a767SRodney W. Grimes } 133926f9a767SRodney W. Grimes 134026f9a767SRodney W. Grimes /* 13411c7c3c6aSMatthew Dillon * Oops, too big if it crosses a stripe 13421c7c3c6aSMatthew Dillon * 13431c7c3c6aSMatthew Dillon * 1111000000 13441c7c3c6aSMatthew Dillon * 111111 13451c7c3c6aSMatthew Dillon * 1000001 134626f9a767SRodney W. Grimes */ 13471c7c3c6aSMatthew Dillon if ((blk ^ (blk + n)) & dmmax_mask) { 13481c7c3c6aSMatthew Dillon j = ((blk + dmmax) & dmmax_mask) - blk; 13491c7c3c6aSMatthew Dillon swp_pager_freeswapspace(blk + j, n - j); 13501c7c3c6aSMatthew Dillon n = j; 1351e47ed70bSJohn Dyson } 135226f9a767SRodney W. Grimes 135326f9a767SRodney W. Grimes /* 13541c7c3c6aSMatthew Dillon * All I/O parameters have been satisfied, build the I/O 13551c7c3c6aSMatthew Dillon * request and assign the swap space. 13561c7c3c6aSMatthew Dillon * 13571c7c3c6aSMatthew Dillon * NOTE: B_PAGING is set by pbgetvp() 135826f9a767SRodney W. Grimes */ 135926f9a767SRodney W. Grimes 1360327f4e83SMatthew Dillon if (sync == TRUE) { 1361327f4e83SMatthew Dillon bp = getpbuf(&nsw_wcount_sync); 136267812eacSKirk McKusick bp->b_flags = B_CALL; 1363327f4e83SMatthew Dillon } else { 1364327f4e83SMatthew Dillon bp = getpbuf(&nsw_wcount_async); 136567812eacSKirk McKusick bp->b_flags = B_CALL | B_ASYNC; 1366327f4e83SMatthew Dillon } 13671c7c3c6aSMatthew Dillon bp->b_spc = NULL; /* not used, but NULL-out anyway */ 136826f9a767SRodney W. Grimes 13691c7c3c6aSMatthew Dillon pmap_qenter((vm_offset_t)bp->b_data, &m[i], n); 13701c7c3c6aSMatthew Dillon 1371b0eeea20SPoul-Henning Kamp bp->b_rcred = bp->b_wcred = proc0.p_ucred; 13721c7c3c6aSMatthew Dillon bp->b_bcount = PAGE_SIZE * n; 13731c7c3c6aSMatthew Dillon bp->b_bufsize = PAGE_SIZE * n; 13741c7c3c6aSMatthew Dillon bp->b_blkno = blk; 1375e47ed70bSJohn Dyson 1376a5296b05SJulian Elischer crhold(bp->b_rcred); 1377a5296b05SJulian Elischer crhold(bp->b_wcred); 1378a5296b05SJulian Elischer 1379a5296b05SJulian Elischer pbgetvp(swapdev_vp, bp); 1380a5296b05SJulian Elischer 1381e47ed70bSJohn Dyson s = splvm(); 13821c7c3c6aSMatthew Dillon 13831c7c3c6aSMatthew Dillon for (j = 0; j < n; ++j) { 13841c7c3c6aSMatthew Dillon vm_page_t mreq = m[i+j]; 13851c7c3c6aSMatthew Dillon 13861c7c3c6aSMatthew Dillon swp_pager_meta_build( 13871c7c3c6aSMatthew Dillon mreq->object, 13881c7c3c6aSMatthew Dillon mreq->pindex, 13891c7c3c6aSMatthew Dillon blk + j, 13901c7c3c6aSMatthew Dillon 0 13911c7c3c6aSMatthew Dillon ); 13927dbf82dcSMatthew Dillon vm_page_dirty(mreq); 13931c7c3c6aSMatthew Dillon rtvals[i+j] = VM_PAGER_OK; 13941c7c3c6aSMatthew Dillon 13951c7c3c6aSMatthew Dillon vm_page_flag_set(mreq, PG_SWAPINPROG); 13961c7c3c6aSMatthew Dillon bp->b_pages[j] = mreq; 13971c7c3c6aSMatthew Dillon } 13981c7c3c6aSMatthew Dillon bp->b_npages = n; 1399a5296b05SJulian Elischer /* 1400a5296b05SJulian Elischer * Must set dirty range for NFS to work. 1401a5296b05SJulian Elischer */ 1402a5296b05SJulian Elischer bp->b_dirtyoff = 0; 1403a5296b05SJulian Elischer bp->b_dirtyend = bp->b_bcount; 14041c7c3c6aSMatthew Dillon 14051c7c3c6aSMatthew Dillon cnt.v_swapout++; 14061c7c3c6aSMatthew Dillon cnt.v_swappgsout += bp->b_npages; 140726f9a767SRodney W. Grimes swapdev_vp->v_numoutput++; 140826f9a767SRodney W. Grimes 140926f9a767SRodney W. Grimes /* 14101c7c3c6aSMatthew Dillon * asynchronous 14111c7c3c6aSMatthew Dillon * 14121c7c3c6aSMatthew Dillon * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 141326f9a767SRodney W. Grimes */ 1414e47ed70bSJohn Dyson 14151c7c3c6aSMatthew Dillon if (sync == FALSE) { 14161c7c3c6aSMatthew Dillon bp->b_iodone = swp_pager_async_iodone; 141767812eacSKirk McKusick BUF_KERNPROC(bp); 1418fd5d1124SJulian Elischer VOP_STRATEGY(bp->b_vp, bp); 14191c7c3c6aSMatthew Dillon 14201c7c3c6aSMatthew Dillon for (j = 0; j < n; ++j) 14211c7c3c6aSMatthew Dillon rtvals[i+j] = VM_PAGER_PEND; 14221c7c3c6aSMatthew Dillon 1423ccbbd927SBruce Evans splx(s); 14241c7c3c6aSMatthew Dillon continue; 142526f9a767SRodney W. Grimes } 1426e47ed70bSJohn Dyson 142726f9a767SRodney W. Grimes /* 14281c7c3c6aSMatthew Dillon * synchronous 14291c7c3c6aSMatthew Dillon * 14301c7c3c6aSMatthew Dillon * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 14311c7c3c6aSMatthew Dillon */ 14321c7c3c6aSMatthew Dillon 14331c7c3c6aSMatthew Dillon bp->b_iodone = swp_pager_sync_iodone; 14341c7c3c6aSMatthew Dillon VOP_STRATEGY(bp->b_vp, bp); 14351c7c3c6aSMatthew Dillon 14361c7c3c6aSMatthew Dillon /* 14371c7c3c6aSMatthew Dillon * Wait for the sync I/O to complete, then update rtvals. 14381c7c3c6aSMatthew Dillon * We just set the rtvals[] to VM_PAGER_PEND so we can call 14391c7c3c6aSMatthew Dillon * our async completion routine at the end, thus avoiding a 14401c7c3c6aSMatthew Dillon * double-free. 144126f9a767SRodney W. Grimes */ 144226f9a767SRodney W. Grimes while ((bp->b_flags & B_DONE) == 0) { 144324a1cce3SDavid Greenman tsleep(bp, PVM, "swwrt", 0); 144426f9a767SRodney W. Grimes } 1445e47ed70bSJohn Dyson 14461c7c3c6aSMatthew Dillon for (j = 0; j < n; ++j) 14471c7c3c6aSMatthew Dillon rtvals[i+j] = VM_PAGER_PEND; 144826f9a767SRodney W. Grimes 14491c7c3c6aSMatthew Dillon /* 14501c7c3c6aSMatthew Dillon * Now that we are through with the bp, we can call the 14511c7c3c6aSMatthew Dillon * normal async completion, which frees everything up. 14521c7c3c6aSMatthew Dillon */ 14531c7c3c6aSMatthew Dillon 14541c7c3c6aSMatthew Dillon swp_pager_async_iodone(bp); 145526f9a767SRodney W. Grimes 145626f9a767SRodney W. Grimes splx(s); 14571c7c3c6aSMatthew Dillon } 14581c7c3c6aSMatthew Dillon } 14591c7c3c6aSMatthew Dillon 14601c7c3c6aSMatthew Dillon /* 14611c7c3c6aSMatthew Dillon * swap_pager_sync_iodone: 14621c7c3c6aSMatthew Dillon * 14631c7c3c6aSMatthew Dillon * Completion routine for synchronous reads and writes from/to swap. 14641c7c3c6aSMatthew Dillon * We just mark the bp is complete and wake up anyone waiting on it. 14651c7c3c6aSMatthew Dillon * 14661c7c3c6aSMatthew Dillon * This routine may not block. 14671c7c3c6aSMatthew Dillon */ 14681c7c3c6aSMatthew Dillon 14691c7c3c6aSMatthew Dillon static void 14701c7c3c6aSMatthew Dillon swp_pager_sync_iodone(bp) 14711c7c3c6aSMatthew Dillon struct buf *bp; 14721c7c3c6aSMatthew Dillon { 14731c7c3c6aSMatthew Dillon bp->b_flags |= B_DONE; 14741c7c3c6aSMatthew Dillon bp->b_flags &= ~B_ASYNC; 14751c7c3c6aSMatthew Dillon wakeup(bp); 14761c7c3c6aSMatthew Dillon } 14771c7c3c6aSMatthew Dillon 14781c7c3c6aSMatthew Dillon /* 14791c7c3c6aSMatthew Dillon * swp_pager_async_iodone: 14801c7c3c6aSMatthew Dillon * 14811c7c3c6aSMatthew Dillon * Completion routine for asynchronous reads and writes from/to swap. 14821c7c3c6aSMatthew Dillon * Also called manually by synchronous code to finish up a bp. 14831c7c3c6aSMatthew Dillon * 14841c7c3c6aSMatthew Dillon * WARNING! This routine may be called from an interrupt. We cannot 14851c7c3c6aSMatthew Dillon * mess with swap metadata unless we want to run all our other routines 14861c7c3c6aSMatthew Dillon * at splbio() too, which I'd rather not do. We up ourselves 14871c7c3c6aSMatthew Dillon * to splvm() because we may call vm_page_free(), which can unlink a 14881c7c3c6aSMatthew Dillon * page from an object. 14891c7c3c6aSMatthew Dillon * 14901c7c3c6aSMatthew Dillon * XXX currently I do not believe any object routines protect 14911c7c3c6aSMatthew Dillon * object->memq at splvm(). The code must be gone over to determine 14921c7c3c6aSMatthew Dillon * the actual state of the problem. 14931c7c3c6aSMatthew Dillon * 14941c7c3c6aSMatthew Dillon * For READ operations, the pages are PG_BUSY'd. For WRITE operations, 14951c7c3c6aSMatthew Dillon * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY 14961c7c3c6aSMatthew Dillon * unbusy all pages except the 'main' request page. For WRITE 14971c7c3c6aSMatthew Dillon * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 14981c7c3c6aSMatthew Dillon * because we marked them all VM_PAGER_PEND on return from putpages ). 14991c7c3c6aSMatthew Dillon * 15001c7c3c6aSMatthew Dillon * This routine may not block. 15011c7c3c6aSMatthew Dillon * This routine is called at splbio() 15021c7c3c6aSMatthew Dillon */ 15031c7c3c6aSMatthew Dillon 15041c7c3c6aSMatthew Dillon static void 15051c7c3c6aSMatthew Dillon swp_pager_async_iodone(bp) 15061c7c3c6aSMatthew Dillon register struct buf *bp; 15071c7c3c6aSMatthew Dillon { 15081c7c3c6aSMatthew Dillon int s; 15091c7c3c6aSMatthew Dillon int i; 15101c7c3c6aSMatthew Dillon vm_object_t object = NULL; 15111c7c3c6aSMatthew Dillon 15121c7c3c6aSMatthew Dillon s = splvm(); 15131c7c3c6aSMatthew Dillon 15141c7c3c6aSMatthew Dillon bp->b_flags |= B_DONE; 15151c7c3c6aSMatthew Dillon 15161c7c3c6aSMatthew Dillon /* 15171c7c3c6aSMatthew Dillon * report error 15181c7c3c6aSMatthew Dillon */ 15191c7c3c6aSMatthew Dillon 15201c7c3c6aSMatthew Dillon if (bp->b_flags & B_ERROR) { 15211c7c3c6aSMatthew Dillon printf( 15221c7c3c6aSMatthew Dillon "swap_pager: I/O error - %s failed; blkno %ld," 15231c7c3c6aSMatthew Dillon "size %ld, error %d\n", 15241c7c3c6aSMatthew Dillon ((bp->b_flags & B_READ) ? "pagein" : "pageout"), 15251c7c3c6aSMatthew Dillon (long)bp->b_blkno, 15261c7c3c6aSMatthew Dillon (long)bp->b_bcount, 15271c7c3c6aSMatthew Dillon bp->b_error 15281c7c3c6aSMatthew Dillon ); 15291c7c3c6aSMatthew Dillon } 15301c7c3c6aSMatthew Dillon 15311c7c3c6aSMatthew Dillon /* 15321c7c3c6aSMatthew Dillon * set object. 15331c7c3c6aSMatthew Dillon */ 15341c7c3c6aSMatthew Dillon 15351c7c3c6aSMatthew Dillon if (bp->b_npages) 15361c7c3c6aSMatthew Dillon object = bp->b_pages[0]->object; 153726f9a767SRodney W. Grimes 153826f9a767SRodney W. Grimes /* 153926f9a767SRodney W. Grimes * remove the mapping for kernel virtual 154026f9a767SRodney W. Grimes */ 15411c7c3c6aSMatthew Dillon 15421c7c3c6aSMatthew Dillon pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); 154326f9a767SRodney W. Grimes 154426f9a767SRodney W. Grimes /* 15451c7c3c6aSMatthew Dillon * cleanup pages. If an error occurs writing to swap, we are in 15461c7c3c6aSMatthew Dillon * very serious trouble. If it happens to be a disk error, though, 15471c7c3c6aSMatthew Dillon * we may be able to recover by reassigning the swap later on. So 15481c7c3c6aSMatthew Dillon * in this case we remove the m->swapblk assignment for the page 15491c7c3c6aSMatthew Dillon * but do not free it in the rlist. The errornous block(s) are thus 15501c7c3c6aSMatthew Dillon * never reallocated as swap. Redirty the page and continue. 155126f9a767SRodney W. Grimes */ 155226f9a767SRodney W. Grimes 15531c7c3c6aSMatthew Dillon for (i = 0; i < bp->b_npages; ++i) { 15541c7c3c6aSMatthew Dillon vm_page_t m = bp->b_pages[i]; 1555e47ed70bSJohn Dyson 15561c7c3c6aSMatthew Dillon vm_page_flag_clear(m, PG_SWAPINPROG); 1557e47ed70bSJohn Dyson 155826f9a767SRodney W. Grimes if (bp->b_flags & B_ERROR) { 1559ffc82b0aSJohn Dyson /* 15601c7c3c6aSMatthew Dillon * If an error occurs I'd love to throw the swapblk 15611c7c3c6aSMatthew Dillon * away without freeing it back to swapspace, so it 15621c7c3c6aSMatthew Dillon * can never be used again. But I can't from an 15631c7c3c6aSMatthew Dillon * interrupt. 1564ffc82b0aSJohn Dyson */ 15651c7c3c6aSMatthew Dillon 15661c7c3c6aSMatthew Dillon if (bp->b_flags & B_READ) { 15671c7c3c6aSMatthew Dillon /* 15681c7c3c6aSMatthew Dillon * When reading, reqpage needs to stay 15691c7c3c6aSMatthew Dillon * locked for the parent, but all other 15701c7c3c6aSMatthew Dillon * pages can be freed. We still want to 15711c7c3c6aSMatthew Dillon * wakeup the parent waiting on the page, 15721c7c3c6aSMatthew Dillon * though. ( also: pg_reqpage can be -1 and 15731c7c3c6aSMatthew Dillon * not match anything ). 15741c7c3c6aSMatthew Dillon * 15751c7c3c6aSMatthew Dillon * We have to wake specifically requested pages 15761c7c3c6aSMatthew Dillon * up too because we cleared PG_SWAPINPROG and 15771c7c3c6aSMatthew Dillon * someone may be waiting for that. 15781c7c3c6aSMatthew Dillon * 15791c7c3c6aSMatthew Dillon * NOTE: for reads, m->dirty will probably 15801c7c3c6aSMatthew Dillon * be overriden by the original caller of 15811c7c3c6aSMatthew Dillon * getpages so don't play cute tricks here. 15821c7c3c6aSMatthew Dillon * 15831c7c3c6aSMatthew Dillon * XXX it may not be legal to free the page 15841c7c3c6aSMatthew Dillon * here as this messes with the object->memq's. 15851c7c3c6aSMatthew Dillon */ 15861c7c3c6aSMatthew Dillon 15871c7c3c6aSMatthew Dillon m->valid = 0; 15881c7c3c6aSMatthew Dillon vm_page_flag_clear(m, PG_ZERO); 15891c7c3c6aSMatthew Dillon 15901c7c3c6aSMatthew Dillon if (i != bp->b_pager.pg_reqpage) 15911c7c3c6aSMatthew Dillon vm_page_free(m); 15921c7c3c6aSMatthew Dillon else 15931c7c3c6aSMatthew Dillon vm_page_flash(m); 15941c7c3c6aSMatthew Dillon /* 15951c7c3c6aSMatthew Dillon * If i == bp->b_pager.pg_reqpage, do not wake 15961c7c3c6aSMatthew Dillon * the page up. The caller needs to. 15971c7c3c6aSMatthew Dillon */ 15981c7c3c6aSMatthew Dillon } else { 15991c7c3c6aSMatthew Dillon /* 16001c7c3c6aSMatthew Dillon * If a write error occurs, reactivate page 16011c7c3c6aSMatthew Dillon * so it doesn't clog the inactive list, 16021c7c3c6aSMatthew Dillon * then finish the I/O. 16031c7c3c6aSMatthew Dillon */ 16047dbf82dcSMatthew Dillon vm_page_dirty(m); 16051c7c3c6aSMatthew Dillon vm_page_activate(m); 16061c7c3c6aSMatthew Dillon vm_page_io_finish(m); 16071c7c3c6aSMatthew Dillon } 16081c7c3c6aSMatthew Dillon } else if (bp->b_flags & B_READ) { 16091c7c3c6aSMatthew Dillon /* 16101c7c3c6aSMatthew Dillon * For read success, clear dirty bits. Nobody should 16111c7c3c6aSMatthew Dillon * have this page mapped but don't take any chances, 16121c7c3c6aSMatthew Dillon * make sure the pmap modify bits are also cleared. 16131c7c3c6aSMatthew Dillon * 16141c7c3c6aSMatthew Dillon * NOTE: for reads, m->dirty will probably be 16151c7c3c6aSMatthew Dillon * overriden by the original caller of getpages so 16161c7c3c6aSMatthew Dillon * we cannot set them in order to free the underlying 16171c7c3c6aSMatthew Dillon * swap in a low-swap situation. I don't think we'd 16181c7c3c6aSMatthew Dillon * want to do that anyway, but it was an optimization 16191c7c3c6aSMatthew Dillon * that existed in the old swapper for a time before 16201c7c3c6aSMatthew Dillon * it got ripped out due to precisely this problem. 16211c7c3c6aSMatthew Dillon * 16221c7c3c6aSMatthew Dillon * clear PG_ZERO in page. 16231c7c3c6aSMatthew Dillon * 16241c7c3c6aSMatthew Dillon * If not the requested page then deactivate it. 16251c7c3c6aSMatthew Dillon * 16261c7c3c6aSMatthew Dillon * Note that the requested page, reqpage, is left 16271c7c3c6aSMatthew Dillon * busied, but we still have to wake it up. The 16281c7c3c6aSMatthew Dillon * other pages are released (unbusied) by 16291c7c3c6aSMatthew Dillon * vm_page_wakeup(). We do not set reqpage's 16301c7c3c6aSMatthew Dillon * valid bits here, it is up to the caller. 16311c7c3c6aSMatthew Dillon */ 16321c7c3c6aSMatthew Dillon 16331c7c3c6aSMatthew Dillon pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 16341c7c3c6aSMatthew Dillon m->valid = VM_PAGE_BITS_ALL; 16352c28a105SAlan Cox vm_page_undirty(m); 16361c7c3c6aSMatthew Dillon vm_page_flag_clear(m, PG_ZERO); 16371c7c3c6aSMatthew Dillon 16381c7c3c6aSMatthew Dillon /* 16391c7c3c6aSMatthew Dillon * We have to wake specifically requested pages 16401c7c3c6aSMatthew Dillon * up too because we cleared PG_SWAPINPROG and 16411c7c3c6aSMatthew Dillon * could be waiting for it in getpages. However, 16421c7c3c6aSMatthew Dillon * be sure to not unbusy getpages specifically 16431c7c3c6aSMatthew Dillon * requested page - getpages expects it to be 16441c7c3c6aSMatthew Dillon * left busy. 16451c7c3c6aSMatthew Dillon */ 16461c7c3c6aSMatthew Dillon if (i != bp->b_pager.pg_reqpage) { 16471c7c3c6aSMatthew Dillon vm_page_deactivate(m); 16481c7c3c6aSMatthew Dillon vm_page_wakeup(m); 16491c7c3c6aSMatthew Dillon } else { 16501c7c3c6aSMatthew Dillon vm_page_flash(m); 16511c7c3c6aSMatthew Dillon } 16521c7c3c6aSMatthew Dillon } else { 16531c7c3c6aSMatthew Dillon /* 16541c7c3c6aSMatthew Dillon * For write success, clear the modify and dirty 16551c7c3c6aSMatthew Dillon * status, then finish the I/O ( which decrements the 16561c7c3c6aSMatthew Dillon * busy count and possibly wakes waiter's up ). 16571c7c3c6aSMatthew Dillon */ 16581c7c3c6aSMatthew Dillon vm_page_protect(m, VM_PROT_READ); 16591c7c3c6aSMatthew Dillon pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 1660c52e7044SAlan Cox vm_page_undirty(m); 16611c7c3c6aSMatthew Dillon vm_page_io_finish(m); 1662ffc82b0aSJohn Dyson } 1663df8bae1dSRodney W. Grimes } 166426f9a767SRodney W. Grimes 16651c7c3c6aSMatthew Dillon /* 16661c7c3c6aSMatthew Dillon * adjust pip. NOTE: the original parent may still have its own 16671c7c3c6aSMatthew Dillon * pip refs on the object. 16681c7c3c6aSMatthew Dillon */ 16690d94caffSDavid Greenman 16701c7c3c6aSMatthew Dillon if (object) 16711c7c3c6aSMatthew Dillon vm_object_pip_wakeupn(object, bp->b_npages); 167226f9a767SRodney W. Grimes 16731c7c3c6aSMatthew Dillon /* 16741c7c3c6aSMatthew Dillon * release the physical I/O buffer 16751c7c3c6aSMatthew Dillon */ 1676e47ed70bSJohn Dyson 1677327f4e83SMatthew Dillon relpbuf( 1678327f4e83SMatthew Dillon bp, 1679327f4e83SMatthew Dillon ((bp->b_flags & B_READ) ? &nsw_rcount : 1680327f4e83SMatthew Dillon ((bp->b_flags & B_ASYNC) ? 1681327f4e83SMatthew Dillon &nsw_wcount_async : 1682327f4e83SMatthew Dillon &nsw_wcount_sync 1683327f4e83SMatthew Dillon ) 1684327f4e83SMatthew Dillon ) 1685327f4e83SMatthew Dillon ); 168626f9a767SRodney W. Grimes splx(s); 168726f9a767SRodney W. Grimes } 16881c7c3c6aSMatthew Dillon 16891c7c3c6aSMatthew Dillon /************************************************************************ 16901c7c3c6aSMatthew Dillon * SWAP META DATA * 16911c7c3c6aSMatthew Dillon ************************************************************************ 16921c7c3c6aSMatthew Dillon * 16931c7c3c6aSMatthew Dillon * These routines manipulate the swap metadata stored in the 16941c7c3c6aSMatthew Dillon * OBJT_SWAP object. 16951c7c3c6aSMatthew Dillon * 16961c7c3c6aSMatthew Dillon * In fact, we just have a few counters in the vm_object_t. The 16971c7c3c6aSMatthew Dillon * metadata is actually stored in a hash table. 16981c7c3c6aSMatthew Dillon */ 16991c7c3c6aSMatthew Dillon 17001c7c3c6aSMatthew Dillon /* 17011c7c3c6aSMatthew Dillon * SWP_PAGER_HASH() - hash swap meta data 17021c7c3c6aSMatthew Dillon * 17031c7c3c6aSMatthew Dillon * This is an inline helper function which hash the swapblk given 17041c7c3c6aSMatthew Dillon * the object and page index. It returns a pointer to a pointer 17051c7c3c6aSMatthew Dillon * to the object, or a pointer to a NULL pointer if it could not 17061c7c3c6aSMatthew Dillon * find a swapblk. 17071c7c3c6aSMatthew Dillon */ 17081c7c3c6aSMatthew Dillon 17091c7c3c6aSMatthew Dillon static __inline struct swblock ** 17101c7c3c6aSMatthew Dillon swp_pager_hash(vm_object_t object, daddr_t index) 17111c7c3c6aSMatthew Dillon { 17121c7c3c6aSMatthew Dillon struct swblock **pswap; 17131c7c3c6aSMatthew Dillon struct swblock *swap; 17141c7c3c6aSMatthew Dillon 17151c7c3c6aSMatthew Dillon index &= ~SWAP_META_MASK; 1716af647ddeSBruce Evans pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask]; 17171c7c3c6aSMatthew Dillon 17181c7c3c6aSMatthew Dillon while ((swap = *pswap) != NULL) { 17191c7c3c6aSMatthew Dillon if (swap->swb_object == object && 17201c7c3c6aSMatthew Dillon swap->swb_index == index 17211c7c3c6aSMatthew Dillon ) { 17221c7c3c6aSMatthew Dillon break; 17231c7c3c6aSMatthew Dillon } 17241c7c3c6aSMatthew Dillon pswap = &swap->swb_hnext; 17251c7c3c6aSMatthew Dillon } 17261c7c3c6aSMatthew Dillon return(pswap); 17271c7c3c6aSMatthew Dillon } 17281c7c3c6aSMatthew Dillon 17291c7c3c6aSMatthew Dillon /* 17301c7c3c6aSMatthew Dillon * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 17311c7c3c6aSMatthew Dillon * 17321c7c3c6aSMatthew Dillon * We first convert the object to a swap object if it is a default 17331c7c3c6aSMatthew Dillon * object. 17341c7c3c6aSMatthew Dillon * 17351c7c3c6aSMatthew Dillon * The specified swapblk is added to the object's swap metadata. If 17361c7c3c6aSMatthew Dillon * the swapblk is not valid, it is freed instead. Any previously 17371c7c3c6aSMatthew Dillon * assigned swapblk is freed. 17381c7c3c6aSMatthew Dillon */ 17391c7c3c6aSMatthew Dillon 17401c7c3c6aSMatthew Dillon static void 17411c7c3c6aSMatthew Dillon swp_pager_meta_build( 17421c7c3c6aSMatthew Dillon vm_object_t object, 17431c7c3c6aSMatthew Dillon daddr_t index, 17441c7c3c6aSMatthew Dillon daddr_t swapblk, 17451c7c3c6aSMatthew Dillon int waitok 17461c7c3c6aSMatthew Dillon ) { 17471c7c3c6aSMatthew Dillon struct swblock *swap; 17481c7c3c6aSMatthew Dillon struct swblock **pswap; 17491c7c3c6aSMatthew Dillon 17501c7c3c6aSMatthew Dillon /* 17511c7c3c6aSMatthew Dillon * Convert default object to swap object if necessary 17521c7c3c6aSMatthew Dillon */ 17531c7c3c6aSMatthew Dillon 17541c7c3c6aSMatthew Dillon if (object->type != OBJT_SWAP) { 17551c7c3c6aSMatthew Dillon object->type = OBJT_SWAP; 17561c7c3c6aSMatthew Dillon object->un_pager.swp.swp_bcount = 0; 17571c7c3c6aSMatthew Dillon 17581c7c3c6aSMatthew Dillon if (object->handle != NULL) { 17591c7c3c6aSMatthew Dillon TAILQ_INSERT_TAIL( 17601c7c3c6aSMatthew Dillon NOBJLIST(object->handle), 17611c7c3c6aSMatthew Dillon object, 17621c7c3c6aSMatthew Dillon pager_object_list 17631c7c3c6aSMatthew Dillon ); 17641c7c3c6aSMatthew Dillon } else { 17651c7c3c6aSMatthew Dillon TAILQ_INSERT_TAIL( 17661c7c3c6aSMatthew Dillon &swap_pager_un_object_list, 17671c7c3c6aSMatthew Dillon object, 17681c7c3c6aSMatthew Dillon pager_object_list 17691c7c3c6aSMatthew Dillon ); 17701c7c3c6aSMatthew Dillon } 17711c7c3c6aSMatthew Dillon } 17721c7c3c6aSMatthew Dillon 17731c7c3c6aSMatthew Dillon /* 17741c7c3c6aSMatthew Dillon * Wait for free memory when waitok is TRUE prior to calling the 17751c7c3c6aSMatthew Dillon * zone allocator. 17761c7c3c6aSMatthew Dillon */ 17771c7c3c6aSMatthew Dillon 17781c7c3c6aSMatthew Dillon while (waitok && cnt.v_free_count == 0) { 17791c7c3c6aSMatthew Dillon VM_WAIT; 17801c7c3c6aSMatthew Dillon } 17811c7c3c6aSMatthew Dillon 17821c7c3c6aSMatthew Dillon /* 17831c7c3c6aSMatthew Dillon * If swapblk being added is invalid, just free it. 17841c7c3c6aSMatthew Dillon */ 17851c7c3c6aSMatthew Dillon 17861c7c3c6aSMatthew Dillon if (swapblk & SWAPBLK_NONE) { 17871c7c3c6aSMatthew Dillon if (swapblk != SWAPBLK_NONE) { 17881c7c3c6aSMatthew Dillon swp_pager_freeswapspace( 17891c7c3c6aSMatthew Dillon index, 17901c7c3c6aSMatthew Dillon 1 17911c7c3c6aSMatthew Dillon ); 17921c7c3c6aSMatthew Dillon swapblk = SWAPBLK_NONE; 17931c7c3c6aSMatthew Dillon } 17941c7c3c6aSMatthew Dillon } 17951c7c3c6aSMatthew Dillon 17961c7c3c6aSMatthew Dillon /* 17971c7c3c6aSMatthew Dillon * Locate hash entry. If not found create, but if we aren't adding 17981c7c3c6aSMatthew Dillon * anything just return. 17991c7c3c6aSMatthew Dillon */ 18001c7c3c6aSMatthew Dillon 18011c7c3c6aSMatthew Dillon pswap = swp_pager_hash(object, index); 18021c7c3c6aSMatthew Dillon 18031c7c3c6aSMatthew Dillon if ((swap = *pswap) == NULL) { 18041c7c3c6aSMatthew Dillon int i; 18051c7c3c6aSMatthew Dillon 18061c7c3c6aSMatthew Dillon if (swapblk == SWAPBLK_NONE) 18071c7c3c6aSMatthew Dillon return; 18081c7c3c6aSMatthew Dillon 18091c7c3c6aSMatthew Dillon swap = *pswap = zalloc(swap_zone); 18101c7c3c6aSMatthew Dillon 18111c7c3c6aSMatthew Dillon swap->swb_hnext = NULL; 18121c7c3c6aSMatthew Dillon swap->swb_object = object; 18131c7c3c6aSMatthew Dillon swap->swb_index = index & ~SWAP_META_MASK; 18141c7c3c6aSMatthew Dillon swap->swb_count = 0; 18151c7c3c6aSMatthew Dillon 18161c7c3c6aSMatthew Dillon ++object->un_pager.swp.swp_bcount; 18171c7c3c6aSMatthew Dillon 18181c7c3c6aSMatthew Dillon for (i = 0; i < SWAP_META_PAGES; ++i) 18191c7c3c6aSMatthew Dillon swap->swb_pages[i] = SWAPBLK_NONE; 18201c7c3c6aSMatthew Dillon } 18211c7c3c6aSMatthew Dillon 18221c7c3c6aSMatthew Dillon /* 18231c7c3c6aSMatthew Dillon * Delete prior contents of metadata 18241c7c3c6aSMatthew Dillon */ 18251c7c3c6aSMatthew Dillon 18261c7c3c6aSMatthew Dillon index &= SWAP_META_MASK; 18271c7c3c6aSMatthew Dillon 18281c7c3c6aSMatthew Dillon if (swap->swb_pages[index] != SWAPBLK_NONE) { 18291c7c3c6aSMatthew Dillon swp_pager_freeswapspace( 18301c7c3c6aSMatthew Dillon swap->swb_pages[index] & SWAPBLK_MASK, 18311c7c3c6aSMatthew Dillon 1 18321c7c3c6aSMatthew Dillon ); 18331c7c3c6aSMatthew Dillon --swap->swb_count; 18341c7c3c6aSMatthew Dillon } 18351c7c3c6aSMatthew Dillon 18361c7c3c6aSMatthew Dillon /* 18371c7c3c6aSMatthew Dillon * Enter block into metadata 18381c7c3c6aSMatthew Dillon */ 18391c7c3c6aSMatthew Dillon 18401c7c3c6aSMatthew Dillon swap->swb_pages[index] = swapblk; 18411c7c3c6aSMatthew Dillon ++swap->swb_count; 18421c7c3c6aSMatthew Dillon } 18431c7c3c6aSMatthew Dillon 18441c7c3c6aSMatthew Dillon /* 18451c7c3c6aSMatthew Dillon * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 18461c7c3c6aSMatthew Dillon * 18471c7c3c6aSMatthew Dillon * The requested range of blocks is freed, with any associated swap 18481c7c3c6aSMatthew Dillon * returned to the swap bitmap. 18491c7c3c6aSMatthew Dillon * 18501c7c3c6aSMatthew Dillon * This routine will free swap metadata structures as they are cleaned 18511c7c3c6aSMatthew Dillon * out. This routine does *NOT* operate on swap metadata associated 18521c7c3c6aSMatthew Dillon * with resident pages. 18531c7c3c6aSMatthew Dillon * 18541c7c3c6aSMatthew Dillon * This routine must be called at splvm() 18551c7c3c6aSMatthew Dillon */ 18561c7c3c6aSMatthew Dillon 18571c7c3c6aSMatthew Dillon static void 18581c7c3c6aSMatthew Dillon swp_pager_meta_free(vm_object_t object, daddr_t index, daddr_t count) 18591c7c3c6aSMatthew Dillon { 18601c7c3c6aSMatthew Dillon if (object->type != OBJT_SWAP) 18611c7c3c6aSMatthew Dillon return; 18621c7c3c6aSMatthew Dillon 18631c7c3c6aSMatthew Dillon while (count > 0) { 18641c7c3c6aSMatthew Dillon struct swblock **pswap; 18651c7c3c6aSMatthew Dillon struct swblock *swap; 18661c7c3c6aSMatthew Dillon 18671c7c3c6aSMatthew Dillon pswap = swp_pager_hash(object, index); 18681c7c3c6aSMatthew Dillon 18691c7c3c6aSMatthew Dillon if ((swap = *pswap) != NULL) { 18701c7c3c6aSMatthew Dillon daddr_t v = swap->swb_pages[index & SWAP_META_MASK]; 18711c7c3c6aSMatthew Dillon 18721c7c3c6aSMatthew Dillon if (v != SWAPBLK_NONE) { 18731c7c3c6aSMatthew Dillon swp_pager_freeswapspace(v, 1); 18741c7c3c6aSMatthew Dillon swap->swb_pages[index & SWAP_META_MASK] = 18751c7c3c6aSMatthew Dillon SWAPBLK_NONE; 18761c7c3c6aSMatthew Dillon if (--swap->swb_count == 0) { 18771c7c3c6aSMatthew Dillon *pswap = swap->swb_hnext; 18781c7c3c6aSMatthew Dillon zfree(swap_zone, swap); 18791c7c3c6aSMatthew Dillon --object->un_pager.swp.swp_bcount; 18801c7c3c6aSMatthew Dillon } 18811c7c3c6aSMatthew Dillon } 18821c7c3c6aSMatthew Dillon --count; 18831c7c3c6aSMatthew Dillon ++index; 18841c7c3c6aSMatthew Dillon } else { 18851c7c3c6aSMatthew Dillon daddr_t n = SWAP_META_PAGES - (index & SWAP_META_MASK); 18861c7c3c6aSMatthew Dillon count -= n; 18871c7c3c6aSMatthew Dillon index += n; 18881c7c3c6aSMatthew Dillon } 18891c7c3c6aSMatthew Dillon } 18901c7c3c6aSMatthew Dillon } 18911c7c3c6aSMatthew Dillon 18921c7c3c6aSMatthew Dillon /* 18931c7c3c6aSMatthew Dillon * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 18941c7c3c6aSMatthew Dillon * 18951c7c3c6aSMatthew Dillon * This routine locates and destroys all swap metadata associated with 18961c7c3c6aSMatthew Dillon * an object. 18971c7c3c6aSMatthew Dillon */ 18981c7c3c6aSMatthew Dillon 18991c7c3c6aSMatthew Dillon static void 19001c7c3c6aSMatthew Dillon swp_pager_meta_free_all(vm_object_t object) 19011c7c3c6aSMatthew Dillon { 19021c7c3c6aSMatthew Dillon daddr_t index = 0; 19031c7c3c6aSMatthew Dillon 19041c7c3c6aSMatthew Dillon if (object->type != OBJT_SWAP) 19051c7c3c6aSMatthew Dillon return; 19061c7c3c6aSMatthew Dillon 19071c7c3c6aSMatthew Dillon while (object->un_pager.swp.swp_bcount) { 19081c7c3c6aSMatthew Dillon struct swblock **pswap; 19091c7c3c6aSMatthew Dillon struct swblock *swap; 19101c7c3c6aSMatthew Dillon 19111c7c3c6aSMatthew Dillon pswap = swp_pager_hash(object, index); 19121c7c3c6aSMatthew Dillon if ((swap = *pswap) != NULL) { 19131c7c3c6aSMatthew Dillon int i; 19141c7c3c6aSMatthew Dillon 19151c7c3c6aSMatthew Dillon for (i = 0; i < SWAP_META_PAGES; ++i) { 19161c7c3c6aSMatthew Dillon daddr_t v = swap->swb_pages[i]; 19171c7c3c6aSMatthew Dillon if (v != SWAPBLK_NONE) { 19181c7c3c6aSMatthew Dillon #if !defined(MAX_PERF) 19191c7c3c6aSMatthew Dillon --swap->swb_count; 19201c7c3c6aSMatthew Dillon #endif 19211c7c3c6aSMatthew Dillon swp_pager_freeswapspace( 19221c7c3c6aSMatthew Dillon v, 19231c7c3c6aSMatthew Dillon 1 19241c7c3c6aSMatthew Dillon ); 19251c7c3c6aSMatthew Dillon } 19261c7c3c6aSMatthew Dillon } 19271c7c3c6aSMatthew Dillon #if !defined(MAX_PERF) 19281c7c3c6aSMatthew Dillon if (swap->swb_count != 0) 19291c7c3c6aSMatthew Dillon panic("swap_pager_meta_free_all: swb_count != 0"); 19301c7c3c6aSMatthew Dillon #endif 19311c7c3c6aSMatthew Dillon *pswap = swap->swb_hnext; 19321c7c3c6aSMatthew Dillon zfree(swap_zone, swap); 19331c7c3c6aSMatthew Dillon --object->un_pager.swp.swp_bcount; 19341c7c3c6aSMatthew Dillon } 19351c7c3c6aSMatthew Dillon index += SWAP_META_PAGES; 19361c7c3c6aSMatthew Dillon #if !defined(MAX_PERF) 19371c7c3c6aSMatthew Dillon if (index > 0x20000000) 19381c7c3c6aSMatthew Dillon panic("swp_pager_meta_free_all: failed to locate all swap meta blocks"); 19391c7c3c6aSMatthew Dillon #endif 19401c7c3c6aSMatthew Dillon } 19411c7c3c6aSMatthew Dillon } 19421c7c3c6aSMatthew Dillon 19431c7c3c6aSMatthew Dillon /* 19441c7c3c6aSMatthew Dillon * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 19451c7c3c6aSMatthew Dillon * 19461c7c3c6aSMatthew Dillon * This routine is capable of looking up, popping, or freeing 19471c7c3c6aSMatthew Dillon * swapblk assignments in the swap meta data or in the vm_page_t. 19481c7c3c6aSMatthew Dillon * The routine typically returns the swapblk being looked-up, or popped, 19491c7c3c6aSMatthew Dillon * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 19501c7c3c6aSMatthew Dillon * was invalid. This routine will automatically free any invalid 19511c7c3c6aSMatthew Dillon * meta-data swapblks. 19521c7c3c6aSMatthew Dillon * 19531c7c3c6aSMatthew Dillon * It is not possible to store invalid swapblks in the swap meta data 19541c7c3c6aSMatthew Dillon * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 19551c7c3c6aSMatthew Dillon * 19561c7c3c6aSMatthew Dillon * When acting on a busy resident page and paging is in progress, we 19571c7c3c6aSMatthew Dillon * have to wait until paging is complete but otherwise can act on the 19581c7c3c6aSMatthew Dillon * busy page. 19591c7c3c6aSMatthew Dillon * 19601c7c3c6aSMatthew Dillon * SWM_FREE remove and free swap block from metadata 19611c7c3c6aSMatthew Dillon * 19621c7c3c6aSMatthew Dillon * SWM_POP remove from meta data but do not free.. pop it out 19631c7c3c6aSMatthew Dillon */ 19641c7c3c6aSMatthew Dillon 19651c7c3c6aSMatthew Dillon static daddr_t 19661c7c3c6aSMatthew Dillon swp_pager_meta_ctl( 19671c7c3c6aSMatthew Dillon vm_object_t object, 19681c7c3c6aSMatthew Dillon vm_pindex_t index, 19691c7c3c6aSMatthew Dillon int flags 19701c7c3c6aSMatthew Dillon ) { 19711c7c3c6aSMatthew Dillon /* 19721c7c3c6aSMatthew Dillon * The meta data only exists of the object is OBJT_SWAP 19731c7c3c6aSMatthew Dillon * and even then might not be allocated yet. 19741c7c3c6aSMatthew Dillon */ 19751c7c3c6aSMatthew Dillon 19761c7c3c6aSMatthew Dillon if ( 19771c7c3c6aSMatthew Dillon object->type != OBJT_SWAP || 19781c7c3c6aSMatthew Dillon object->un_pager.swp.swp_bcount == 0 19791c7c3c6aSMatthew Dillon ) { 19801c7c3c6aSMatthew Dillon return(SWAPBLK_NONE); 19811c7c3c6aSMatthew Dillon } 19821c7c3c6aSMatthew Dillon 19831c7c3c6aSMatthew Dillon { 19841c7c3c6aSMatthew Dillon struct swblock **pswap; 19851c7c3c6aSMatthew Dillon struct swblock *swap; 19861c7c3c6aSMatthew Dillon daddr_t r1 = SWAPBLK_NONE; 19871c7c3c6aSMatthew Dillon 19881c7c3c6aSMatthew Dillon pswap = swp_pager_hash(object, index); 19891c7c3c6aSMatthew Dillon 19901c7c3c6aSMatthew Dillon index &= SWAP_META_MASK; 19911c7c3c6aSMatthew Dillon 19921c7c3c6aSMatthew Dillon if ((swap = *pswap) != NULL) { 19931c7c3c6aSMatthew Dillon r1 = swap->swb_pages[index]; 19941c7c3c6aSMatthew Dillon 19951c7c3c6aSMatthew Dillon if (r1 != SWAPBLK_NONE) { 19961c7c3c6aSMatthew Dillon if (flags & SWM_FREE) { 19971c7c3c6aSMatthew Dillon swp_pager_freeswapspace( 19981c7c3c6aSMatthew Dillon r1, 19991c7c3c6aSMatthew Dillon 1 20001c7c3c6aSMatthew Dillon ); 20011c7c3c6aSMatthew Dillon r1 = SWAPBLK_NONE; 20021c7c3c6aSMatthew Dillon } 20031c7c3c6aSMatthew Dillon if (flags & (SWM_FREE|SWM_POP)) { 20041c7c3c6aSMatthew Dillon swap->swb_pages[index] = SWAPBLK_NONE; 20051c7c3c6aSMatthew Dillon if (--swap->swb_count == 0) { 20061c7c3c6aSMatthew Dillon *pswap = swap->swb_hnext; 20071c7c3c6aSMatthew Dillon zfree(swap_zone, swap); 20081c7c3c6aSMatthew Dillon --object->un_pager.swp.swp_bcount; 20091c7c3c6aSMatthew Dillon } 20101c7c3c6aSMatthew Dillon } 20111c7c3c6aSMatthew Dillon } 20121c7c3c6aSMatthew Dillon } 20131c7c3c6aSMatthew Dillon 20141c7c3c6aSMatthew Dillon return(r1); 20151c7c3c6aSMatthew Dillon } 20161c7c3c6aSMatthew Dillon /* not reached */ 20171c7c3c6aSMatthew Dillon } 20181c7c3c6aSMatthew Dillon 2019