1 /* 2 * Copyright (c) 1998 Matthew Dillon, 3 * Copyright (c) 1994 John S. Dyson 4 * Copyright (c) 1990 University of Utah. 5 * Copyright (c) 1982, 1986, 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * New Swap System 41 * Matthew Dillon 42 * 43 * Radix Bitmap 'blists'. 44 * 45 * - The new swapper uses the new radix bitmap code. This should scale 46 * to arbitrarily small or arbitrarily large swap spaces and an almost 47 * arbitrary degree of fragmentation. 48 * 49 * Features: 50 * 51 * - on the fly reallocation of swap during putpages. The new system 52 * does not try to keep previously allocated swap blocks for dirty 53 * pages. 54 * 55 * - on the fly deallocation of swap 56 * 57 * - No more garbage collection required. Unnecessarily allocated swap 58 * blocks only exist for dirty vm_page_t's now and these are already 59 * cycled (in a high-load system) by the pager. We also do on-the-fly 60 * removal of invalidated swap blocks when a page is destroyed 61 * or renamed. 62 * 63 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 64 * 65 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 66 * @(#)vm_swap.c 8.5 (Berkeley) 2/17/94 67 */ 68 69 #include <sys/cdefs.h> 70 __FBSDID("$FreeBSD$"); 71 72 #include "opt_mac.h" 73 #include "opt_swap.h" 74 #include "opt_vm.h" 75 76 #include <sys/param.h> 77 #include <sys/systm.h> 78 #include <sys/conf.h> 79 #include <sys/kernel.h> 80 #include <sys/proc.h> 81 #include <sys/bio.h> 82 #include <sys/buf.h> 83 #include <sys/disk.h> 84 #include <sys/fcntl.h> 85 #include <sys/mount.h> 86 #include <sys/namei.h> 87 #include <sys/vnode.h> 88 #include <sys/mac.h> 89 #include <sys/malloc.h> 90 #include <sys/sysctl.h> 91 #include <sys/sysproto.h> 92 #include <sys/blist.h> 93 #include <sys/lock.h> 94 #include <sys/sx.h> 95 #include <sys/vmmeter.h> 96 97 #include <vm/vm.h> 98 #include <vm/pmap.h> 99 #include <vm/vm_map.h> 100 #include <vm/vm_kern.h> 101 #include <vm/vm_object.h> 102 #include <vm/vm_page.h> 103 #include <vm/vm_pager.h> 104 #include <vm/vm_pageout.h> 105 #include <vm/vm_param.h> 106 #include <vm/swap_pager.h> 107 #include <vm/vm_extern.h> 108 #include <vm/uma.h> 109 110 #include <geom/geom.h> 111 112 /* 113 * SWB_NPAGES must be a power of 2. It may be set to 1, 2, 4, 8, or 16 114 * pages per allocation. We recommend you stick with the default of 8. 115 * The 16-page limit is due to the radix code (kern/subr_blist.c). 116 */ 117 #ifndef MAX_PAGEOUT_CLUSTER 118 #define MAX_PAGEOUT_CLUSTER 16 119 #endif 120 121 #if !defined(SWB_NPAGES) 122 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER 123 #endif 124 125 /* 126 * Piecemeal swap metadata structure. Swap is stored in a radix tree. 127 * 128 * If SWB_NPAGES is 8 and sizeof(char *) == sizeof(daddr_t), our radix 129 * is basically 8. Assuming PAGE_SIZE == 4096, one tree level represents 130 * 32K worth of data, two levels represent 256K, three levels represent 131 * 2 MBytes. This is acceptable. 132 * 133 * Overall memory utilization is about the same as the old swap structure. 134 */ 135 #define SWCORRECT(n) (sizeof(void *) * (n) / sizeof(daddr_t)) 136 #define SWAP_META_PAGES (SWB_NPAGES * 2) 137 #define SWAP_META_MASK (SWAP_META_PAGES - 1) 138 139 typedef int32_t swblk_t; /* 140 * swap offset. This is the type used to 141 * address the "virtual swap device" and 142 * therefore the maximum swap space is 143 * 2^32 pages. 144 */ 145 146 struct swdevt; 147 typedef void sw_strategy_t(struct buf *bp, struct swdevt *sw); 148 typedef void sw_close_t(struct thread *td, struct swdevt *sw); 149 150 /* 151 * Swap device table 152 */ 153 struct swdevt { 154 int sw_flags; 155 int sw_nblks; 156 int sw_used; 157 dev_t sw_dev; 158 struct vnode *sw_vp; 159 void *sw_id; 160 swblk_t sw_first; 161 swblk_t sw_end; 162 struct blist *sw_blist; 163 TAILQ_ENTRY(swdevt) sw_list; 164 sw_strategy_t *sw_strategy; 165 sw_close_t *sw_close; 166 }; 167 168 #define SW_CLOSING 0x04 169 170 struct swblock { 171 struct swblock *swb_hnext; 172 vm_object_t swb_object; 173 vm_pindex_t swb_index; 174 int swb_count; 175 daddr_t swb_pages[SWAP_META_PAGES]; 176 }; 177 178 static struct mtx sw_dev_mtx; 179 static TAILQ_HEAD(, swdevt) swtailq = TAILQ_HEAD_INITIALIZER(swtailq); 180 static struct swdevt *swdevhd; /* Allocate from here next */ 181 static int nswapdev; /* Number of swap devices */ 182 int swap_pager_avail; 183 static int swdev_syscall_active = 0; /* serialize swap(on|off) */ 184 185 static void swapdev_strategy(struct buf *, struct swdevt *sw); 186 187 #define SWM_FREE 0x02 /* free, period */ 188 #define SWM_POP 0x04 /* pop out */ 189 190 int swap_pager_full = 2; /* swap space exhaustion (task killing) */ 191 static int swap_pager_almost_full = 1; /* swap space exhaustion (w/hysteresis)*/ 192 static int nsw_rcount; /* free read buffers */ 193 static int nsw_wcount_sync; /* limit write buffers / synchronous */ 194 static int nsw_wcount_async; /* limit write buffers / asynchronous */ 195 static int nsw_wcount_async_max;/* assigned maximum */ 196 static int nsw_cluster_max; /* maximum VOP I/O allowed */ 197 198 static struct swblock **swhash; 199 static int swhash_mask; 200 static struct mtx swhash_mtx; 201 202 static int swap_async_max = 4; /* maximum in-progress async I/O's */ 203 static struct sx sw_alloc_sx; 204 205 206 SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 207 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 208 209 /* 210 * "named" and "unnamed" anon region objects. Try to reduce the overhead 211 * of searching a named list by hashing it just a little. 212 */ 213 214 #define NOBJLISTS 8 215 216 #define NOBJLIST(handle) \ 217 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)]) 218 219 static struct mtx sw_alloc_mtx; /* protect list manipulation */ 220 static struct pagerlst swap_pager_object_list[NOBJLISTS]; 221 static uma_zone_t swap_zone; 222 static struct vm_object swap_zone_obj; 223 224 /* 225 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 226 * calls hooked from other parts of the VM system and do not appear here. 227 * (see vm/swap_pager.h). 228 */ 229 static vm_object_t 230 swap_pager_alloc(void *handle, vm_ooffset_t size, 231 vm_prot_t prot, vm_ooffset_t offset); 232 static void swap_pager_dealloc(vm_object_t object); 233 static int swap_pager_getpages(vm_object_t, vm_page_t *, int, int); 234 static void swap_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *); 235 static boolean_t 236 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after); 237 static void swap_pager_init(void); 238 static void swap_pager_unswapped(vm_page_t); 239 static void swap_pager_swapoff(struct swdevt *sp, int *sw_used); 240 241 struct pagerops swappagerops = { 242 .pgo_init = swap_pager_init, /* early system initialization of pager */ 243 .pgo_alloc = swap_pager_alloc, /* allocate an OBJT_SWAP object */ 244 .pgo_dealloc = swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 245 .pgo_getpages = swap_pager_getpages, /* pagein */ 246 .pgo_putpages = swap_pager_putpages, /* pageout */ 247 .pgo_haspage = swap_pager_haspage, /* get backing store status for page */ 248 .pgo_pageunswapped = swap_pager_unswapped, /* remove swap related to page */ 249 }; 250 251 /* 252 * dmmax is in page-sized chunks with the new swap system. It was 253 * dev-bsized chunks in the old. dmmax is always a power of 2. 254 * 255 * swap_*() routines are externally accessible. swp_*() routines are 256 * internal. 257 */ 258 static int dmmax; 259 static int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 260 static int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 261 262 SYSCTL_INT(_vm, OID_AUTO, dmmax, 263 CTLFLAG_RD, &dmmax, 0, "Maximum size of a swap block"); 264 265 static void swp_sizecheck(void); 266 static void swp_pager_async_iodone(struct buf *bp); 267 static int swapongeom(struct thread *, struct vnode *); 268 static int swaponvp(struct thread *, struct vnode *, u_long); 269 270 /* 271 * Swap bitmap functions 272 */ 273 static void swp_pager_freeswapspace(daddr_t blk, int npages); 274 static daddr_t swp_pager_getswapspace(int npages); 275 276 /* 277 * Metadata functions 278 */ 279 static struct swblock **swp_pager_hash(vm_object_t object, vm_pindex_t index); 280 static void swp_pager_meta_build(vm_object_t, vm_pindex_t, daddr_t); 281 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, daddr_t); 282 static void swp_pager_meta_free_all(vm_object_t); 283 static daddr_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int); 284 285 /* 286 * SWP_SIZECHECK() - update swap_pager_full indication 287 * 288 * update the swap_pager_almost_full indication and warn when we are 289 * about to run out of swap space, using lowat/hiwat hysteresis. 290 * 291 * Clear swap_pager_full ( task killing ) indication when lowat is met. 292 * 293 * No restrictions on call 294 * This routine may not block. 295 * This routine must be called at splvm() 296 */ 297 static void 298 swp_sizecheck(void) 299 { 300 301 if (swap_pager_avail < nswap_lowat) { 302 if (swap_pager_almost_full == 0) { 303 printf("swap_pager: out of swap space\n"); 304 swap_pager_almost_full = 1; 305 } 306 } else { 307 swap_pager_full = 0; 308 if (swap_pager_avail > nswap_hiwat) 309 swap_pager_almost_full = 0; 310 } 311 } 312 313 /* 314 * SWP_PAGER_HASH() - hash swap meta data 315 * 316 * This is an helper function which hashes the swapblk given 317 * the object and page index. It returns a pointer to a pointer 318 * to the object, or a pointer to a NULL pointer if it could not 319 * find a swapblk. 320 * 321 * This routine must be called at splvm(). 322 */ 323 static struct swblock ** 324 swp_pager_hash(vm_object_t object, vm_pindex_t index) 325 { 326 struct swblock **pswap; 327 struct swblock *swap; 328 329 index &= ~(vm_pindex_t)SWAP_META_MASK; 330 pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask]; 331 while ((swap = *pswap) != NULL) { 332 if (swap->swb_object == object && 333 swap->swb_index == index 334 ) { 335 break; 336 } 337 pswap = &swap->swb_hnext; 338 } 339 return (pswap); 340 } 341 342 /* 343 * SWAP_PAGER_INIT() - initialize the swap pager! 344 * 345 * Expected to be started from system init. NOTE: This code is run 346 * before much else so be careful what you depend on. Most of the VM 347 * system has yet to be initialized at this point. 348 */ 349 static void 350 swap_pager_init(void) 351 { 352 /* 353 * Initialize object lists 354 */ 355 int i; 356 357 for (i = 0; i < NOBJLISTS; ++i) 358 TAILQ_INIT(&swap_pager_object_list[i]); 359 mtx_init(&sw_alloc_mtx, "swap_pager list", NULL, MTX_DEF); 360 mtx_init(&sw_dev_mtx, "swapdev", NULL, MTX_DEF); 361 362 /* 363 * Device Stripe, in PAGE_SIZE'd blocks 364 */ 365 dmmax = SWB_NPAGES * 2; 366 } 367 368 /* 369 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 370 * 371 * Expected to be started from pageout process once, prior to entering 372 * its main loop. 373 */ 374 void 375 swap_pager_swap_init(void) 376 { 377 int n, n2; 378 379 /* 380 * Number of in-transit swap bp operations. Don't 381 * exhaust the pbufs completely. Make sure we 382 * initialize workable values (0 will work for hysteresis 383 * but it isn't very efficient). 384 * 385 * The nsw_cluster_max is constrained by the bp->b_pages[] 386 * array (MAXPHYS/PAGE_SIZE) and our locally defined 387 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 388 * constrained by the swap device interleave stripe size. 389 * 390 * Currently we hardwire nsw_wcount_async to 4. This limit is 391 * designed to prevent other I/O from having high latencies due to 392 * our pageout I/O. The value 4 works well for one or two active swap 393 * devices but is probably a little low if you have more. Even so, 394 * a higher value would probably generate only a limited improvement 395 * with three or four active swap devices since the system does not 396 * typically have to pageout at extreme bandwidths. We will want 397 * at least 2 per swap devices, and 4 is a pretty good value if you 398 * have one NFS swap device due to the command/ack latency over NFS. 399 * So it all works out pretty well. 400 */ 401 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 402 403 mtx_lock(&pbuf_mtx); 404 nsw_rcount = (nswbuf + 1) / 2; 405 nsw_wcount_sync = (nswbuf + 3) / 4; 406 nsw_wcount_async = 4; 407 nsw_wcount_async_max = nsw_wcount_async; 408 mtx_unlock(&pbuf_mtx); 409 410 /* 411 * Initialize our zone. Right now I'm just guessing on the number 412 * we need based on the number of pages in the system. Each swblock 413 * can hold 16 pages, so this is probably overkill. This reservation 414 * is typically limited to around 32MB by default. 415 */ 416 n = cnt.v_page_count / 2; 417 if (maxswzone && n > maxswzone / sizeof(struct swblock)) 418 n = maxswzone / sizeof(struct swblock); 419 n2 = n; 420 swap_zone = uma_zcreate("SWAPMETA", sizeof(struct swblock), NULL, NULL, 421 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM); 422 do { 423 if (uma_zone_set_obj(swap_zone, &swap_zone_obj, n)) 424 break; 425 /* 426 * if the allocation failed, try a zone two thirds the 427 * size of the previous attempt. 428 */ 429 n -= ((n + 2) / 3); 430 } while (n > 0); 431 if (swap_zone == NULL) 432 panic("failed to create swap_zone."); 433 if (n2 != n) 434 printf("Swap zone entries reduced from %d to %d.\n", n2, n); 435 n2 = n; 436 437 /* 438 * Initialize our meta-data hash table. The swapper does not need to 439 * be quite as efficient as the VM system, so we do not use an 440 * oversized hash table. 441 * 442 * n: size of hash table, must be power of 2 443 * swhash_mask: hash table index mask 444 */ 445 for (n = 1; n < n2 / 8; n *= 2) 446 ; 447 swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK | M_ZERO); 448 swhash_mask = n - 1; 449 mtx_init(&swhash_mtx, "swap_pager swhash", NULL, MTX_DEF); 450 } 451 452 /* 453 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 454 * its metadata structures. 455 * 456 * This routine is called from the mmap and fork code to create a new 457 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 458 * and then converting it with swp_pager_meta_build(). 459 * 460 * This routine may block in vm_object_allocate() and create a named 461 * object lookup race, so we must interlock. We must also run at 462 * splvm() for the object lookup to handle races with interrupts, but 463 * we do not have to maintain splvm() in between the lookup and the 464 * add because (I believe) it is not possible to attempt to create 465 * a new swap object w/handle when a default object with that handle 466 * already exists. 467 * 468 * MPSAFE 469 */ 470 static vm_object_t 471 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 472 vm_ooffset_t offset) 473 { 474 vm_object_t object; 475 vm_pindex_t pindex; 476 477 pindex = OFF_TO_IDX(offset + PAGE_MASK + size); 478 479 if (handle) { 480 mtx_lock(&Giant); 481 /* 482 * Reference existing named region or allocate new one. There 483 * should not be a race here against swp_pager_meta_build() 484 * as called from vm_page_remove() in regards to the lookup 485 * of the handle. 486 */ 487 sx_xlock(&sw_alloc_sx); 488 object = vm_pager_object_lookup(NOBJLIST(handle), handle); 489 490 if (object != NULL) { 491 vm_object_reference(object); 492 } else { 493 object = vm_object_allocate(OBJT_DEFAULT, pindex); 494 object->handle = handle; 495 496 VM_OBJECT_LOCK(object); 497 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 498 VM_OBJECT_UNLOCK(object); 499 } 500 sx_xunlock(&sw_alloc_sx); 501 mtx_unlock(&Giant); 502 } else { 503 object = vm_object_allocate(OBJT_DEFAULT, pindex); 504 505 VM_OBJECT_LOCK(object); 506 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 507 VM_OBJECT_UNLOCK(object); 508 } 509 return (object); 510 } 511 512 /* 513 * SWAP_PAGER_DEALLOC() - remove swap metadata from object 514 * 515 * The swap backing for the object is destroyed. The code is 516 * designed such that we can reinstantiate it later, but this 517 * routine is typically called only when the entire object is 518 * about to be destroyed. 519 * 520 * This routine may block, but no longer does. 521 * 522 * The object must be locked or unreferenceable. 523 */ 524 static void 525 swap_pager_dealloc(vm_object_t object) 526 { 527 int s; 528 529 /* 530 * Remove from list right away so lookups will fail if we block for 531 * pageout completion. 532 */ 533 if (object->handle != NULL) { 534 mtx_lock(&sw_alloc_mtx); 535 TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list); 536 mtx_unlock(&sw_alloc_mtx); 537 } 538 539 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 540 vm_object_pip_wait(object, "swpdea"); 541 542 /* 543 * Free all remaining metadata. We only bother to free it from 544 * the swap meta data. We do not attempt to free swapblk's still 545 * associated with vm_page_t's for this object. We do not care 546 * if paging is still in progress on some objects. 547 */ 548 s = splvm(); 549 swp_pager_meta_free_all(object); 550 splx(s); 551 } 552 553 /************************************************************************ 554 * SWAP PAGER BITMAP ROUTINES * 555 ************************************************************************/ 556 557 /* 558 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 559 * 560 * Allocate swap for the requested number of pages. The starting 561 * swap block number (a page index) is returned or SWAPBLK_NONE 562 * if the allocation failed. 563 * 564 * Also has the side effect of advising that somebody made a mistake 565 * when they configured swap and didn't configure enough. 566 * 567 * Must be called at splvm() to avoid races with bitmap frees from 568 * vm_page_remove() aka swap_pager_page_removed(). 569 * 570 * This routine may not block 571 * This routine must be called at splvm(). 572 * 573 * We allocate in round-robin fashion from the configured devices. 574 */ 575 static daddr_t 576 swp_pager_getswapspace(int npages) 577 { 578 daddr_t blk; 579 struct swdevt *sp; 580 int i; 581 582 blk = SWAPBLK_NONE; 583 mtx_lock(&sw_dev_mtx); 584 sp = swdevhd; 585 for (i = 0; i < nswapdev; i++) { 586 if (sp == NULL) 587 sp = TAILQ_FIRST(&swtailq); 588 if (!(sp->sw_flags & SW_CLOSING)) { 589 blk = blist_alloc(sp->sw_blist, npages); 590 if (blk != SWAPBLK_NONE) { 591 blk += sp->sw_first; 592 sp->sw_used += npages; 593 swap_pager_avail -= npages; 594 swp_sizecheck(); 595 swdevhd = TAILQ_NEXT(sp, sw_list); 596 goto done; 597 } 598 } 599 sp = TAILQ_NEXT(sp, sw_list); 600 } 601 if (swap_pager_full != 2) { 602 printf("swap_pager_getswapspace(%d): failed\n", npages); 603 swap_pager_full = 2; 604 swap_pager_almost_full = 1; 605 } 606 swdevhd = NULL; 607 done: 608 mtx_unlock(&sw_dev_mtx); 609 return (blk); 610 } 611 612 static struct swdevt * 613 swp_pager_find_dev(daddr_t blk) 614 { 615 struct swdevt *sp; 616 617 mtx_lock(&sw_dev_mtx); 618 TAILQ_FOREACH(sp, &swtailq, sw_list) { 619 if (blk >= sp->sw_first && blk < sp->sw_end) { 620 mtx_unlock(&sw_dev_mtx); 621 return (sp); 622 } 623 } 624 panic("Swapdev not found"); 625 } 626 627 static void 628 swp_pager_strategy(struct buf *bp) 629 { 630 struct swdevt *sp; 631 632 mtx_lock(&sw_dev_mtx); 633 TAILQ_FOREACH(sp, &swtailq, sw_list) { 634 if (bp->b_blkno >= sp->sw_first && bp->b_blkno < sp->sw_end) { 635 mtx_unlock(&sw_dev_mtx); 636 sp->sw_strategy(bp, sp); 637 return; 638 } 639 } 640 panic("Swapdev not found"); 641 } 642 643 644 /* 645 * SWP_PAGER_FREESWAPSPACE() - free raw swap space 646 * 647 * This routine returns the specified swap blocks back to the bitmap. 648 * 649 * Note: This routine may not block (it could in the old swap code), 650 * and through the use of the new blist routines it does not block. 651 * 652 * We must be called at splvm() to avoid races with bitmap frees from 653 * vm_page_remove() aka swap_pager_page_removed(). 654 * 655 * This routine may not block 656 * This routine must be called at splvm(). 657 */ 658 static void 659 swp_pager_freeswapspace(daddr_t blk, int npages) 660 { 661 struct swdevt *sp; 662 663 mtx_lock(&sw_dev_mtx); 664 TAILQ_FOREACH(sp, &swtailq, sw_list) { 665 if (blk >= sp->sw_first && blk < sp->sw_end) { 666 sp->sw_used -= npages; 667 /* 668 * If we are attempting to stop swapping on 669 * this device, we don't want to mark any 670 * blocks free lest they be reused. 671 */ 672 if ((sp->sw_flags & SW_CLOSING) == 0) { 673 blist_free(sp->sw_blist, blk - sp->sw_first, 674 npages); 675 swap_pager_avail += npages; 676 swp_sizecheck(); 677 } 678 mtx_unlock(&sw_dev_mtx); 679 return; 680 } 681 } 682 panic("Swapdev not found"); 683 } 684 685 /* 686 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 687 * range within an object. 688 * 689 * This is a globally accessible routine. 690 * 691 * This routine removes swapblk assignments from swap metadata. 692 * 693 * The external callers of this routine typically have already destroyed 694 * or renamed vm_page_t's associated with this range in the object so 695 * we should be ok. 696 * 697 * This routine may be called at any spl. We up our spl to splvm temporarily 698 * in order to perform the metadata removal. 699 */ 700 void 701 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size) 702 { 703 int s = splvm(); 704 705 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 706 swp_pager_meta_free(object, start, size); 707 splx(s); 708 } 709 710 /* 711 * SWAP_PAGER_RESERVE() - reserve swap blocks in object 712 * 713 * Assigns swap blocks to the specified range within the object. The 714 * swap blocks are not zerod. Any previous swap assignment is destroyed. 715 * 716 * Returns 0 on success, -1 on failure. 717 */ 718 int 719 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 720 { 721 int s; 722 int n = 0; 723 daddr_t blk = SWAPBLK_NONE; 724 vm_pindex_t beg = start; /* save start index */ 725 726 s = splvm(); 727 VM_OBJECT_LOCK(object); 728 while (size) { 729 if (n == 0) { 730 n = BLIST_MAX_ALLOC; 731 while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) { 732 n >>= 1; 733 if (n == 0) { 734 swp_pager_meta_free(object, beg, start - beg); 735 VM_OBJECT_UNLOCK(object); 736 splx(s); 737 return (-1); 738 } 739 } 740 } 741 swp_pager_meta_build(object, start, blk); 742 --size; 743 ++start; 744 ++blk; 745 --n; 746 } 747 swp_pager_meta_free(object, start, n); 748 VM_OBJECT_UNLOCK(object); 749 splx(s); 750 return (0); 751 } 752 753 /* 754 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 755 * and destroy the source. 756 * 757 * Copy any valid swapblks from the source to the destination. In 758 * cases where both the source and destination have a valid swapblk, 759 * we keep the destination's. 760 * 761 * This routine is allowed to block. It may block allocating metadata 762 * indirectly through swp_pager_meta_build() or if paging is still in 763 * progress on the source. 764 * 765 * This routine can be called at any spl 766 * 767 * XXX vm_page_collapse() kinda expects us not to block because we 768 * supposedly do not need to allocate memory, but for the moment we 769 * *may* have to get a little memory from the zone allocator, but 770 * it is taken from the interrupt memory. We should be ok. 771 * 772 * The source object contains no vm_page_t's (which is just as well) 773 * 774 * The source object is of type OBJT_SWAP. 775 * 776 * The source and destination objects must be locked or 777 * inaccessible (XXX are they ?) 778 */ 779 void 780 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject, 781 vm_pindex_t offset, int destroysource) 782 { 783 vm_pindex_t i; 784 int s; 785 786 VM_OBJECT_LOCK_ASSERT(srcobject, MA_OWNED); 787 VM_OBJECT_LOCK_ASSERT(dstobject, MA_OWNED); 788 789 s = splvm(); 790 /* 791 * If destroysource is set, we remove the source object from the 792 * swap_pager internal queue now. 793 */ 794 if (destroysource) { 795 if (srcobject->handle != NULL) { 796 mtx_lock(&sw_alloc_mtx); 797 TAILQ_REMOVE( 798 NOBJLIST(srcobject->handle), 799 srcobject, 800 pager_object_list 801 ); 802 mtx_unlock(&sw_alloc_mtx); 803 } 804 } 805 806 /* 807 * transfer source to destination. 808 */ 809 for (i = 0; i < dstobject->size; ++i) { 810 daddr_t dstaddr; 811 812 /* 813 * Locate (without changing) the swapblk on the destination, 814 * unless it is invalid in which case free it silently, or 815 * if the destination is a resident page, in which case the 816 * source is thrown away. 817 */ 818 dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 819 820 if (dstaddr == SWAPBLK_NONE) { 821 /* 822 * Destination has no swapblk and is not resident, 823 * copy source. 824 */ 825 daddr_t srcaddr; 826 827 srcaddr = swp_pager_meta_ctl( 828 srcobject, 829 i + offset, 830 SWM_POP 831 ); 832 833 if (srcaddr != SWAPBLK_NONE) { 834 /* 835 * swp_pager_meta_build() can sleep. 836 */ 837 vm_object_pip_add(srcobject, 1); 838 VM_OBJECT_UNLOCK(srcobject); 839 vm_object_pip_add(dstobject, 1); 840 swp_pager_meta_build(dstobject, i, srcaddr); 841 vm_object_pip_wakeup(dstobject); 842 VM_OBJECT_LOCK(srcobject); 843 vm_object_pip_wakeup(srcobject); 844 } 845 } else { 846 /* 847 * Destination has valid swapblk or it is represented 848 * by a resident page. We destroy the sourceblock. 849 */ 850 851 swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE); 852 } 853 } 854 855 /* 856 * Free left over swap blocks in source. 857 * 858 * We have to revert the type to OBJT_DEFAULT so we do not accidently 859 * double-remove the object from the swap queues. 860 */ 861 if (destroysource) { 862 swp_pager_meta_free_all(srcobject); 863 /* 864 * Reverting the type is not necessary, the caller is going 865 * to destroy srcobject directly, but I'm doing it here 866 * for consistency since we've removed the object from its 867 * queues. 868 */ 869 srcobject->type = OBJT_DEFAULT; 870 } 871 splx(s); 872 } 873 874 /* 875 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 876 * the requested page. 877 * 878 * We determine whether good backing store exists for the requested 879 * page and return TRUE if it does, FALSE if it doesn't. 880 * 881 * If TRUE, we also try to determine how much valid, contiguous backing 882 * store exists before and after the requested page within a reasonable 883 * distance. We do not try to restrict it to the swap device stripe 884 * (that is handled in getpages/putpages). It probably isn't worth 885 * doing here. 886 */ 887 static boolean_t 888 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after) 889 { 890 daddr_t blk0; 891 int s; 892 893 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 894 /* 895 * do we have good backing store at the requested index ? 896 */ 897 s = splvm(); 898 blk0 = swp_pager_meta_ctl(object, pindex, 0); 899 900 if (blk0 == SWAPBLK_NONE) { 901 splx(s); 902 if (before) 903 *before = 0; 904 if (after) 905 *after = 0; 906 return (FALSE); 907 } 908 909 /* 910 * find backwards-looking contiguous good backing store 911 */ 912 if (before != NULL) { 913 int i; 914 915 for (i = 1; i < (SWB_NPAGES/2); ++i) { 916 daddr_t blk; 917 918 if (i > pindex) 919 break; 920 blk = swp_pager_meta_ctl(object, pindex - i, 0); 921 if (blk != blk0 - i) 922 break; 923 } 924 *before = (i - 1); 925 } 926 927 /* 928 * find forward-looking contiguous good backing store 929 */ 930 if (after != NULL) { 931 int i; 932 933 for (i = 1; i < (SWB_NPAGES/2); ++i) { 934 daddr_t blk; 935 936 blk = swp_pager_meta_ctl(object, pindex + i, 0); 937 if (blk != blk0 + i) 938 break; 939 } 940 *after = (i - 1); 941 } 942 splx(s); 943 return (TRUE); 944 } 945 946 /* 947 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 948 * 949 * This removes any associated swap backing store, whether valid or 950 * not, from the page. 951 * 952 * This routine is typically called when a page is made dirty, at 953 * which point any associated swap can be freed. MADV_FREE also 954 * calls us in a special-case situation 955 * 956 * NOTE!!! If the page is clean and the swap was valid, the caller 957 * should make the page dirty before calling this routine. This routine 958 * does NOT change the m->dirty status of the page. Also: MADV_FREE 959 * depends on it. 960 * 961 * This routine may not block 962 * This routine must be called at splvm() 963 */ 964 static void 965 swap_pager_unswapped(vm_page_t m) 966 { 967 968 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 969 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 970 } 971 972 /* 973 * SWAP_PAGER_GETPAGES() - bring pages in from swap 974 * 975 * Attempt to retrieve (m, count) pages from backing store, but make 976 * sure we retrieve at least m[reqpage]. We try to load in as large 977 * a chunk surrounding m[reqpage] as is contiguous in swap and which 978 * belongs to the same object. 979 * 980 * The code is designed for asynchronous operation and 981 * immediate-notification of 'reqpage' but tends not to be 982 * used that way. Please do not optimize-out this algorithmic 983 * feature, I intend to improve on it in the future. 984 * 985 * The parent has a single vm_object_pip_add() reference prior to 986 * calling us and we should return with the same. 987 * 988 * The parent has BUSY'd the pages. We should return with 'm' 989 * left busy, but the others adjusted. 990 */ 991 static int 992 swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage) 993 { 994 struct buf *bp; 995 vm_page_t mreq; 996 int s; 997 int i; 998 int j; 999 daddr_t blk; 1000 1001 mreq = m[reqpage]; 1002 1003 KASSERT(mreq->object == object, 1004 ("swap_pager_getpages: object mismatch %p/%p", 1005 object, mreq->object)); 1006 1007 /* 1008 * Calculate range to retrieve. The pages have already been assigned 1009 * their swapblks. We require a *contiguous* range but we know it to 1010 * not span devices. If we do not supply it, bad things 1011 * happen. Note that blk, iblk & jblk can be SWAPBLK_NONE, but the 1012 * loops are set up such that the case(s) are handled implicitly. 1013 * 1014 * The swp_*() calls must be made at splvm(). vm_page_free() does 1015 * not need to be, but it will go a little faster if it is. 1016 */ 1017 s = splvm(); 1018 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 1019 1020 for (i = reqpage - 1; i >= 0; --i) { 1021 daddr_t iblk; 1022 1023 iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0); 1024 if (blk != iblk + (reqpage - i)) 1025 break; 1026 } 1027 ++i; 1028 1029 for (j = reqpage + 1; j < count; ++j) { 1030 daddr_t jblk; 1031 1032 jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0); 1033 if (blk != jblk - (j - reqpage)) 1034 break; 1035 } 1036 1037 /* 1038 * free pages outside our collection range. Note: we never free 1039 * mreq, it must remain busy throughout. 1040 */ 1041 vm_page_lock_queues(); 1042 { 1043 int k; 1044 1045 for (k = 0; k < i; ++k) 1046 vm_page_free(m[k]); 1047 for (k = j; k < count; ++k) 1048 vm_page_free(m[k]); 1049 } 1050 vm_page_unlock_queues(); 1051 splx(s); 1052 1053 1054 /* 1055 * Return VM_PAGER_FAIL if we have nothing to do. Return mreq 1056 * still busy, but the others unbusied. 1057 */ 1058 if (blk == SWAPBLK_NONE) 1059 return (VM_PAGER_FAIL); 1060 1061 /* 1062 * Getpbuf() can sleep. 1063 */ 1064 VM_OBJECT_UNLOCK(object); 1065 /* 1066 * Get a swap buffer header to perform the IO 1067 */ 1068 bp = getpbuf(&nsw_rcount); 1069 bp->b_flags |= B_PAGING; 1070 1071 /* 1072 * map our page(s) into kva for input 1073 */ 1074 pmap_qenter((vm_offset_t)bp->b_data, m + i, j - i); 1075 1076 bp->b_iocmd = BIO_READ; 1077 bp->b_iodone = swp_pager_async_iodone; 1078 bp->b_rcred = crhold(thread0.td_ucred); 1079 bp->b_wcred = crhold(thread0.td_ucred); 1080 bp->b_blkno = blk - (reqpage - i); 1081 bp->b_bcount = PAGE_SIZE * (j - i); 1082 bp->b_bufsize = PAGE_SIZE * (j - i); 1083 bp->b_pager.pg_reqpage = reqpage - i; 1084 1085 VM_OBJECT_LOCK(object); 1086 vm_page_lock_queues(); 1087 { 1088 int k; 1089 1090 for (k = i; k < j; ++k) { 1091 bp->b_pages[k - i] = m[k]; 1092 vm_page_flag_set(m[k], PG_SWAPINPROG); 1093 } 1094 } 1095 vm_page_unlock_queues(); 1096 VM_OBJECT_UNLOCK(object); 1097 bp->b_npages = j - i; 1098 1099 cnt.v_swapin++; 1100 cnt.v_swappgsin += bp->b_npages; 1101 1102 /* 1103 * We still hold the lock on mreq, and our automatic completion routine 1104 * does not remove it. 1105 */ 1106 VM_OBJECT_LOCK(mreq->object); 1107 vm_object_pip_add(mreq->object, bp->b_npages); 1108 VM_OBJECT_UNLOCK(mreq->object); 1109 1110 /* 1111 * perform the I/O. NOTE!!! bp cannot be considered valid after 1112 * this point because we automatically release it on completion. 1113 * Instead, we look at the one page we are interested in which we 1114 * still hold a lock on even through the I/O completion. 1115 * 1116 * The other pages in our m[] array are also released on completion, 1117 * so we cannot assume they are valid anymore either. 1118 * 1119 * NOTE: b_blkno is destroyed by the call to swapdev_strategy 1120 */ 1121 BUF_KERNPROC(bp); 1122 swp_pager_strategy(bp); 1123 1124 /* 1125 * wait for the page we want to complete. PG_SWAPINPROG is always 1126 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 1127 * is set in the meta-data. 1128 */ 1129 s = splvm(); 1130 vm_page_lock_queues(); 1131 while ((mreq->flags & PG_SWAPINPROG) != 0) { 1132 vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED); 1133 cnt.v_intrans++; 1134 if (msleep(mreq, &vm_page_queue_mtx, PSWP, "swread", hz*20)) { 1135 printf( 1136 "swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n", 1137 bp->b_bufobj, (intmax_t)bp->b_blkno, bp->b_bcount); 1138 } 1139 } 1140 vm_page_unlock_queues(); 1141 splx(s); 1142 1143 VM_OBJECT_LOCK(mreq->object); 1144 /* 1145 * mreq is left busied after completion, but all the other pages 1146 * are freed. If we had an unrecoverable read error the page will 1147 * not be valid. 1148 */ 1149 if (mreq->valid != VM_PAGE_BITS_ALL) { 1150 return (VM_PAGER_ERROR); 1151 } else { 1152 return (VM_PAGER_OK); 1153 } 1154 1155 /* 1156 * A final note: in a low swap situation, we cannot deallocate swap 1157 * and mark a page dirty here because the caller is likely to mark 1158 * the page clean when we return, causing the page to possibly revert 1159 * to all-zero's later. 1160 */ 1161 } 1162 1163 /* 1164 * swap_pager_putpages: 1165 * 1166 * Assign swap (if necessary) and initiate I/O on the specified pages. 1167 * 1168 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 1169 * are automatically converted to SWAP objects. 1170 * 1171 * In a low memory situation we may block in VOP_STRATEGY(), but the new 1172 * vm_page reservation system coupled with properly written VFS devices 1173 * should ensure that no low-memory deadlock occurs. This is an area 1174 * which needs work. 1175 * 1176 * The parent has N vm_object_pip_add() references prior to 1177 * calling us and will remove references for rtvals[] that are 1178 * not set to VM_PAGER_PEND. We need to remove the rest on I/O 1179 * completion. 1180 * 1181 * The parent has soft-busy'd the pages it passes us and will unbusy 1182 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 1183 * We need to unbusy the rest on I/O completion. 1184 */ 1185 void 1186 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count, 1187 boolean_t sync, int *rtvals) 1188 { 1189 int i; 1190 int n = 0; 1191 1192 GIANT_REQUIRED; 1193 if (count && m[0]->object != object) { 1194 panic("swap_pager_getpages: object mismatch %p/%p", 1195 object, 1196 m[0]->object 1197 ); 1198 } 1199 1200 /* 1201 * Step 1 1202 * 1203 * Turn object into OBJT_SWAP 1204 * check for bogus sysops 1205 * force sync if not pageout process 1206 */ 1207 if (object->type != OBJT_SWAP) 1208 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 1209 VM_OBJECT_UNLOCK(object); 1210 1211 if (curproc != pageproc) 1212 sync = TRUE; 1213 1214 /* 1215 * Step 2 1216 * 1217 * Update nsw parameters from swap_async_max sysctl values. 1218 * Do not let the sysop crash the machine with bogus numbers. 1219 */ 1220 mtx_lock(&pbuf_mtx); 1221 if (swap_async_max != nsw_wcount_async_max) { 1222 int n; 1223 int s; 1224 1225 /* 1226 * limit range 1227 */ 1228 if ((n = swap_async_max) > nswbuf / 2) 1229 n = nswbuf / 2; 1230 if (n < 1) 1231 n = 1; 1232 swap_async_max = n; 1233 1234 /* 1235 * Adjust difference ( if possible ). If the current async 1236 * count is too low, we may not be able to make the adjustment 1237 * at this time. 1238 */ 1239 s = splvm(); 1240 n -= nsw_wcount_async_max; 1241 if (nsw_wcount_async + n >= 0) { 1242 nsw_wcount_async += n; 1243 nsw_wcount_async_max += n; 1244 wakeup(&nsw_wcount_async); 1245 } 1246 splx(s); 1247 } 1248 mtx_unlock(&pbuf_mtx); 1249 1250 /* 1251 * Step 3 1252 * 1253 * Assign swap blocks and issue I/O. We reallocate swap on the fly. 1254 * The page is left dirty until the pageout operation completes 1255 * successfully. 1256 */ 1257 for (i = 0; i < count; i += n) { 1258 int s; 1259 int j; 1260 struct buf *bp; 1261 daddr_t blk; 1262 1263 /* 1264 * Maximum I/O size is limited by a number of factors. 1265 */ 1266 n = min(BLIST_MAX_ALLOC, count - i); 1267 n = min(n, nsw_cluster_max); 1268 1269 s = splvm(); 1270 1271 /* 1272 * Get biggest block of swap we can. If we fail, fall 1273 * back and try to allocate a smaller block. Don't go 1274 * overboard trying to allocate space if it would overly 1275 * fragment swap. 1276 */ 1277 while ( 1278 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE && 1279 n > 4 1280 ) { 1281 n >>= 1; 1282 } 1283 if (blk == SWAPBLK_NONE) { 1284 for (j = 0; j < n; ++j) 1285 rtvals[i+j] = VM_PAGER_FAIL; 1286 splx(s); 1287 continue; 1288 } 1289 1290 /* 1291 * All I/O parameters have been satisfied, build the I/O 1292 * request and assign the swap space. 1293 */ 1294 if (sync == TRUE) { 1295 bp = getpbuf(&nsw_wcount_sync); 1296 } else { 1297 bp = getpbuf(&nsw_wcount_async); 1298 bp->b_flags = B_ASYNC; 1299 } 1300 bp->b_flags |= B_PAGING; 1301 bp->b_iocmd = BIO_WRITE; 1302 1303 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n); 1304 1305 bp->b_rcred = crhold(thread0.td_ucred); 1306 bp->b_wcred = crhold(thread0.td_ucred); 1307 bp->b_bcount = PAGE_SIZE * n; 1308 bp->b_bufsize = PAGE_SIZE * n; 1309 bp->b_blkno = blk; 1310 1311 VM_OBJECT_LOCK(object); 1312 for (j = 0; j < n; ++j) { 1313 vm_page_t mreq = m[i+j]; 1314 1315 swp_pager_meta_build( 1316 mreq->object, 1317 mreq->pindex, 1318 blk + j 1319 ); 1320 vm_page_dirty(mreq); 1321 rtvals[i+j] = VM_PAGER_OK; 1322 1323 vm_page_lock_queues(); 1324 vm_page_flag_set(mreq, PG_SWAPINPROG); 1325 vm_page_unlock_queues(); 1326 bp->b_pages[j] = mreq; 1327 } 1328 VM_OBJECT_UNLOCK(object); 1329 bp->b_npages = n; 1330 /* 1331 * Must set dirty range for NFS to work. 1332 */ 1333 bp->b_dirtyoff = 0; 1334 bp->b_dirtyend = bp->b_bcount; 1335 1336 cnt.v_swapout++; 1337 cnt.v_swappgsout += bp->b_npages; 1338 1339 splx(s); 1340 1341 /* 1342 * asynchronous 1343 * 1344 * NOTE: b_blkno is destroyed by the call to swapdev_strategy 1345 */ 1346 if (sync == FALSE) { 1347 bp->b_iodone = swp_pager_async_iodone; 1348 BUF_KERNPROC(bp); 1349 swp_pager_strategy(bp); 1350 1351 for (j = 0; j < n; ++j) 1352 rtvals[i+j] = VM_PAGER_PEND; 1353 /* restart outter loop */ 1354 continue; 1355 } 1356 1357 /* 1358 * synchronous 1359 * 1360 * NOTE: b_blkno is destroyed by the call to swapdev_strategy 1361 */ 1362 bp->b_iodone = bdone; 1363 swp_pager_strategy(bp); 1364 1365 /* 1366 * Wait for the sync I/O to complete, then update rtvals. 1367 * We just set the rtvals[] to VM_PAGER_PEND so we can call 1368 * our async completion routine at the end, thus avoiding a 1369 * double-free. 1370 */ 1371 s = splbio(); 1372 bwait(bp, PVM, "swwrt"); 1373 for (j = 0; j < n; ++j) 1374 rtvals[i+j] = VM_PAGER_PEND; 1375 /* 1376 * Now that we are through with the bp, we can call the 1377 * normal async completion, which frees everything up. 1378 */ 1379 swp_pager_async_iodone(bp); 1380 splx(s); 1381 } 1382 VM_OBJECT_LOCK(object); 1383 } 1384 1385 /* 1386 * swp_pager_async_iodone: 1387 * 1388 * Completion routine for asynchronous reads and writes from/to swap. 1389 * Also called manually by synchronous code to finish up a bp. 1390 * 1391 * For READ operations, the pages are PG_BUSY'd. For WRITE operations, 1392 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY 1393 * unbusy all pages except the 'main' request page. For WRITE 1394 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 1395 * because we marked them all VM_PAGER_PEND on return from putpages ). 1396 * 1397 * This routine may not block. 1398 * This routine is called at splbio() or better 1399 * 1400 * We up ourselves to splvm() as required for various vm_page related 1401 * calls. 1402 */ 1403 static void 1404 swp_pager_async_iodone(struct buf *bp) 1405 { 1406 int s; 1407 int i; 1408 vm_object_t object = NULL; 1409 1410 bp->b_flags |= B_DONE; 1411 1412 /* 1413 * report error 1414 */ 1415 if (bp->b_ioflags & BIO_ERROR) { 1416 printf( 1417 "swap_pager: I/O error - %s failed; blkno %ld," 1418 "size %ld, error %d\n", 1419 ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"), 1420 (long)bp->b_blkno, 1421 (long)bp->b_bcount, 1422 bp->b_error 1423 ); 1424 } 1425 1426 /* 1427 * set object, raise to splvm(). 1428 */ 1429 s = splvm(); 1430 1431 /* 1432 * remove the mapping for kernel virtual 1433 */ 1434 pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); 1435 1436 if (bp->b_npages) { 1437 object = bp->b_pages[0]->object; 1438 VM_OBJECT_LOCK(object); 1439 } 1440 vm_page_lock_queues(); 1441 /* 1442 * cleanup pages. If an error occurs writing to swap, we are in 1443 * very serious trouble. If it happens to be a disk error, though, 1444 * we may be able to recover by reassigning the swap later on. So 1445 * in this case we remove the m->swapblk assignment for the page 1446 * but do not free it in the rlist. The errornous block(s) are thus 1447 * never reallocated as swap. Redirty the page and continue. 1448 */ 1449 for (i = 0; i < bp->b_npages; ++i) { 1450 vm_page_t m = bp->b_pages[i]; 1451 1452 vm_page_flag_clear(m, PG_SWAPINPROG); 1453 1454 if (bp->b_ioflags & BIO_ERROR) { 1455 /* 1456 * If an error occurs I'd love to throw the swapblk 1457 * away without freeing it back to swapspace, so it 1458 * can never be used again. But I can't from an 1459 * interrupt. 1460 */ 1461 if (bp->b_iocmd == BIO_READ) { 1462 /* 1463 * When reading, reqpage needs to stay 1464 * locked for the parent, but all other 1465 * pages can be freed. We still want to 1466 * wakeup the parent waiting on the page, 1467 * though. ( also: pg_reqpage can be -1 and 1468 * not match anything ). 1469 * 1470 * We have to wake specifically requested pages 1471 * up too because we cleared PG_SWAPINPROG and 1472 * someone may be waiting for that. 1473 * 1474 * NOTE: for reads, m->dirty will probably 1475 * be overridden by the original caller of 1476 * getpages so don't play cute tricks here. 1477 * 1478 * XXX IT IS NOT LEGAL TO FREE THE PAGE HERE 1479 * AS THIS MESSES WITH object->memq, and it is 1480 * not legal to mess with object->memq from an 1481 * interrupt. 1482 */ 1483 m->valid = 0; 1484 if (i != bp->b_pager.pg_reqpage) 1485 vm_page_free(m); 1486 else 1487 vm_page_flash(m); 1488 /* 1489 * If i == bp->b_pager.pg_reqpage, do not wake 1490 * the page up. The caller needs to. 1491 */ 1492 } else { 1493 /* 1494 * If a write error occurs, reactivate page 1495 * so it doesn't clog the inactive list, 1496 * then finish the I/O. 1497 */ 1498 vm_page_dirty(m); 1499 vm_page_activate(m); 1500 vm_page_io_finish(m); 1501 } 1502 } else if (bp->b_iocmd == BIO_READ) { 1503 /* 1504 * For read success, clear dirty bits. Nobody should 1505 * have this page mapped but don't take any chances, 1506 * make sure the pmap modify bits are also cleared. 1507 * 1508 * NOTE: for reads, m->dirty will probably be 1509 * overridden by the original caller of getpages so 1510 * we cannot set them in order to free the underlying 1511 * swap in a low-swap situation. I don't think we'd 1512 * want to do that anyway, but it was an optimization 1513 * that existed in the old swapper for a time before 1514 * it got ripped out due to precisely this problem. 1515 * 1516 * If not the requested page then deactivate it. 1517 * 1518 * Note that the requested page, reqpage, is left 1519 * busied, but we still have to wake it up. The 1520 * other pages are released (unbusied) by 1521 * vm_page_wakeup(). We do not set reqpage's 1522 * valid bits here, it is up to the caller. 1523 */ 1524 pmap_clear_modify(m); 1525 m->valid = VM_PAGE_BITS_ALL; 1526 vm_page_undirty(m); 1527 1528 /* 1529 * We have to wake specifically requested pages 1530 * up too because we cleared PG_SWAPINPROG and 1531 * could be waiting for it in getpages. However, 1532 * be sure to not unbusy getpages specifically 1533 * requested page - getpages expects it to be 1534 * left busy. 1535 */ 1536 if (i != bp->b_pager.pg_reqpage) { 1537 vm_page_deactivate(m); 1538 vm_page_wakeup(m); 1539 } else { 1540 vm_page_flash(m); 1541 } 1542 } else { 1543 /* 1544 * For write success, clear the modify and dirty 1545 * status, then finish the I/O ( which decrements the 1546 * busy count and possibly wakes waiter's up ). 1547 */ 1548 pmap_clear_modify(m); 1549 vm_page_undirty(m); 1550 vm_page_io_finish(m); 1551 if (vm_page_count_severe()) 1552 vm_page_try_to_cache(m); 1553 } 1554 } 1555 vm_page_unlock_queues(); 1556 1557 /* 1558 * adjust pip. NOTE: the original parent may still have its own 1559 * pip refs on the object. 1560 */ 1561 if (object != NULL) { 1562 vm_object_pip_wakeupn(object, bp->b_npages); 1563 VM_OBJECT_UNLOCK(object); 1564 } 1565 1566 /* 1567 * release the physical I/O buffer 1568 */ 1569 relpbuf( 1570 bp, 1571 ((bp->b_iocmd == BIO_READ) ? &nsw_rcount : 1572 ((bp->b_flags & B_ASYNC) ? 1573 &nsw_wcount_async : 1574 &nsw_wcount_sync 1575 ) 1576 ) 1577 ); 1578 splx(s); 1579 } 1580 1581 /* 1582 * swap_pager_isswapped: 1583 * 1584 * Return 1 if at least one page in the given object is paged 1585 * out to the given swap device. 1586 * 1587 * This routine may not block. 1588 */ 1589 int 1590 swap_pager_isswapped(vm_object_t object, struct swdevt *sp) 1591 { 1592 daddr_t index = 0; 1593 int bcount; 1594 int i; 1595 1596 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1597 if (object->type != OBJT_SWAP) 1598 return (0); 1599 1600 for (bcount = 0; bcount < object->un_pager.swp.swp_bcount; bcount++) { 1601 struct swblock *swap; 1602 1603 mtx_lock(&swhash_mtx); 1604 if ((swap = *swp_pager_hash(object, index)) != NULL) { 1605 for (i = 0; i < SWAP_META_PAGES; ++i) { 1606 daddr_t v = swap->swb_pages[i]; 1607 if (v == SWAPBLK_NONE) 1608 continue; 1609 if (swp_pager_find_dev(v) == sp) { 1610 mtx_unlock(&swhash_mtx); 1611 return 1; 1612 } 1613 } 1614 } 1615 mtx_unlock(&swhash_mtx); 1616 index += SWAP_META_PAGES; 1617 if (index > 0x20000000) 1618 panic("swap_pager_isswapped: failed to locate all swap meta blocks"); 1619 } 1620 return 0; 1621 } 1622 1623 /* 1624 * SWP_PAGER_FORCE_PAGEIN() - force a swap block to be paged in 1625 * 1626 * This routine dissociates the page at the given index within a 1627 * swap block from its backing store, paging it in if necessary. 1628 * If the page is paged in, it is placed in the inactive queue, 1629 * since it had its backing store ripped out from under it. 1630 * We also attempt to swap in all other pages in the swap block, 1631 * we only guarantee that the one at the specified index is 1632 * paged in. 1633 * 1634 * XXX - The code to page the whole block in doesn't work, so we 1635 * revert to the one-by-one behavior for now. Sigh. 1636 */ 1637 static __inline void 1638 swp_pager_force_pagein(struct swblock *swap, int idx) 1639 { 1640 vm_object_t object; 1641 vm_page_t m; 1642 vm_pindex_t pindex; 1643 1644 object = swap->swb_object; 1645 pindex = swap->swb_index; 1646 mtx_unlock(&swhash_mtx); 1647 1648 VM_OBJECT_LOCK(object); 1649 vm_object_pip_add(object, 1); 1650 m = vm_page_grab(object, pindex + idx, VM_ALLOC_NORMAL|VM_ALLOC_RETRY); 1651 if (m->valid == VM_PAGE_BITS_ALL) { 1652 vm_object_pip_subtract(object, 1); 1653 vm_page_lock_queues(); 1654 vm_page_activate(m); 1655 vm_page_dirty(m); 1656 vm_page_wakeup(m); 1657 vm_page_unlock_queues(); 1658 vm_pager_page_unswapped(m); 1659 VM_OBJECT_UNLOCK(object); 1660 return; 1661 } 1662 1663 if (swap_pager_getpages(object, &m, 1, 0) != 1664 VM_PAGER_OK) 1665 panic("swap_pager_force_pagein: read from swap failed");/*XXX*/ 1666 vm_object_pip_subtract(object, 1); 1667 vm_page_lock_queues(); 1668 vm_page_dirty(m); 1669 vm_page_dontneed(m); 1670 vm_page_wakeup(m); 1671 vm_page_unlock_queues(); 1672 vm_pager_page_unswapped(m); 1673 VM_OBJECT_UNLOCK(object); 1674 } 1675 1676 1677 /* 1678 * swap_pager_swapoff: 1679 * 1680 * Page in all of the pages that have been paged out to the 1681 * given device. The corresponding blocks in the bitmap must be 1682 * marked as allocated and the device must be flagged SW_CLOSING. 1683 * There may be no processes swapped out to the device. 1684 * 1685 * The sw_used parameter points to the field in the swdev structure 1686 * that contains a count of the number of blocks still allocated 1687 * on the device. If we encounter objects with a nonzero pip count 1688 * in our scan, we use this number to determine if we're really done. 1689 * 1690 * This routine may block. 1691 */ 1692 static void 1693 swap_pager_swapoff(struct swdevt *sp, int *sw_used) 1694 { 1695 struct swblock **pswap; 1696 struct swblock *swap; 1697 vm_object_t waitobj; 1698 daddr_t v; 1699 int i, j; 1700 1701 GIANT_REQUIRED; 1702 1703 full_rescan: 1704 waitobj = NULL; 1705 for (i = 0; i <= swhash_mask; i++) { /* '<=' is correct here */ 1706 restart: 1707 pswap = &swhash[i]; 1708 mtx_lock(&swhash_mtx); 1709 while ((swap = *pswap) != NULL) { 1710 for (j = 0; j < SWAP_META_PAGES; ++j) { 1711 v = swap->swb_pages[j]; 1712 if (v != SWAPBLK_NONE && 1713 swp_pager_find_dev(v) == sp) 1714 break; 1715 } 1716 if (j < SWAP_META_PAGES) { 1717 swp_pager_force_pagein(swap, j); 1718 goto restart; 1719 } else if (swap->swb_object->paging_in_progress) { 1720 if (!waitobj) 1721 waitobj = swap->swb_object; 1722 } 1723 pswap = &swap->swb_hnext; 1724 } 1725 mtx_unlock(&swhash_mtx); 1726 } 1727 if (waitobj && *sw_used) { 1728 /* 1729 * We wait on an arbitrary object to clock our rescans 1730 * to the rate of paging completion. 1731 */ 1732 VM_OBJECT_LOCK(waitobj); 1733 vm_object_pip_wait(waitobj, "swpoff"); 1734 VM_OBJECT_UNLOCK(waitobj); 1735 goto full_rescan; 1736 } 1737 if (*sw_used) 1738 panic("swapoff: failed to locate %d swap blocks", *sw_used); 1739 } 1740 1741 /************************************************************************ 1742 * SWAP META DATA * 1743 ************************************************************************ 1744 * 1745 * These routines manipulate the swap metadata stored in the 1746 * OBJT_SWAP object. All swp_*() routines must be called at 1747 * splvm() because swap can be freed up by the low level vm_page 1748 * code which might be called from interrupts beyond what splbio() covers. 1749 * 1750 * Swap metadata is implemented with a global hash and not directly 1751 * linked into the object. Instead the object simply contains 1752 * appropriate tracking counters. 1753 */ 1754 1755 /* 1756 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 1757 * 1758 * We first convert the object to a swap object if it is a default 1759 * object. 1760 * 1761 * The specified swapblk is added to the object's swap metadata. If 1762 * the swapblk is not valid, it is freed instead. Any previously 1763 * assigned swapblk is freed. 1764 * 1765 * This routine must be called at splvm(), except when used to convert 1766 * an OBJT_DEFAULT object into an OBJT_SWAP object. 1767 */ 1768 static void 1769 swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk) 1770 { 1771 struct swblock *swap; 1772 struct swblock **pswap; 1773 int idx; 1774 1775 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1776 /* 1777 * Convert default object to swap object if necessary 1778 */ 1779 if (object->type != OBJT_SWAP) { 1780 object->type = OBJT_SWAP; 1781 object->un_pager.swp.swp_bcount = 0; 1782 1783 if (object->handle != NULL) { 1784 mtx_lock(&sw_alloc_mtx); 1785 TAILQ_INSERT_TAIL( 1786 NOBJLIST(object->handle), 1787 object, 1788 pager_object_list 1789 ); 1790 mtx_unlock(&sw_alloc_mtx); 1791 } 1792 } 1793 1794 /* 1795 * Locate hash entry. If not found create, but if we aren't adding 1796 * anything just return. If we run out of space in the map we wait 1797 * and, since the hash table may have changed, retry. 1798 */ 1799 retry: 1800 mtx_lock(&swhash_mtx); 1801 pswap = swp_pager_hash(object, pindex); 1802 1803 if ((swap = *pswap) == NULL) { 1804 int i; 1805 1806 if (swapblk == SWAPBLK_NONE) 1807 goto done; 1808 1809 swap = *pswap = uma_zalloc(swap_zone, M_NOWAIT); 1810 if (swap == NULL) { 1811 mtx_unlock(&swhash_mtx); 1812 VM_OBJECT_UNLOCK(object); 1813 VM_WAIT; 1814 VM_OBJECT_LOCK(object); 1815 goto retry; 1816 } 1817 1818 swap->swb_hnext = NULL; 1819 swap->swb_object = object; 1820 swap->swb_index = pindex & ~(vm_pindex_t)SWAP_META_MASK; 1821 swap->swb_count = 0; 1822 1823 ++object->un_pager.swp.swp_bcount; 1824 1825 for (i = 0; i < SWAP_META_PAGES; ++i) 1826 swap->swb_pages[i] = SWAPBLK_NONE; 1827 } 1828 1829 /* 1830 * Delete prior contents of metadata 1831 */ 1832 idx = pindex & SWAP_META_MASK; 1833 1834 if (swap->swb_pages[idx] != SWAPBLK_NONE) { 1835 swp_pager_freeswapspace(swap->swb_pages[idx], 1); 1836 --swap->swb_count; 1837 } 1838 1839 /* 1840 * Enter block into metadata 1841 */ 1842 swap->swb_pages[idx] = swapblk; 1843 if (swapblk != SWAPBLK_NONE) 1844 ++swap->swb_count; 1845 done: 1846 mtx_unlock(&swhash_mtx); 1847 } 1848 1849 /* 1850 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 1851 * 1852 * The requested range of blocks is freed, with any associated swap 1853 * returned to the swap bitmap. 1854 * 1855 * This routine will free swap metadata structures as they are cleaned 1856 * out. This routine does *NOT* operate on swap metadata associated 1857 * with resident pages. 1858 * 1859 * This routine must be called at splvm() 1860 */ 1861 static void 1862 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count) 1863 { 1864 1865 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1866 if (object->type != OBJT_SWAP) 1867 return; 1868 1869 while (count > 0) { 1870 struct swblock **pswap; 1871 struct swblock *swap; 1872 1873 mtx_lock(&swhash_mtx); 1874 pswap = swp_pager_hash(object, index); 1875 1876 if ((swap = *pswap) != NULL) { 1877 daddr_t v = swap->swb_pages[index & SWAP_META_MASK]; 1878 1879 if (v != SWAPBLK_NONE) { 1880 swp_pager_freeswapspace(v, 1); 1881 swap->swb_pages[index & SWAP_META_MASK] = 1882 SWAPBLK_NONE; 1883 if (--swap->swb_count == 0) { 1884 *pswap = swap->swb_hnext; 1885 uma_zfree(swap_zone, swap); 1886 --object->un_pager.swp.swp_bcount; 1887 } 1888 } 1889 --count; 1890 ++index; 1891 } else { 1892 int n = SWAP_META_PAGES - (index & SWAP_META_MASK); 1893 count -= n; 1894 index += n; 1895 } 1896 mtx_unlock(&swhash_mtx); 1897 } 1898 } 1899 1900 /* 1901 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 1902 * 1903 * This routine locates and destroys all swap metadata associated with 1904 * an object. 1905 * 1906 * This routine must be called at splvm() 1907 */ 1908 static void 1909 swp_pager_meta_free_all(vm_object_t object) 1910 { 1911 daddr_t index = 0; 1912 1913 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1914 if (object->type != OBJT_SWAP) 1915 return; 1916 1917 while (object->un_pager.swp.swp_bcount) { 1918 struct swblock **pswap; 1919 struct swblock *swap; 1920 1921 mtx_lock(&swhash_mtx); 1922 pswap = swp_pager_hash(object, index); 1923 if ((swap = *pswap) != NULL) { 1924 int i; 1925 1926 for (i = 0; i < SWAP_META_PAGES; ++i) { 1927 daddr_t v = swap->swb_pages[i]; 1928 if (v != SWAPBLK_NONE) { 1929 --swap->swb_count; 1930 swp_pager_freeswapspace(v, 1); 1931 } 1932 } 1933 if (swap->swb_count != 0) 1934 panic("swap_pager_meta_free_all: swb_count != 0"); 1935 *pswap = swap->swb_hnext; 1936 uma_zfree(swap_zone, swap); 1937 --object->un_pager.swp.swp_bcount; 1938 } 1939 mtx_unlock(&swhash_mtx); 1940 index += SWAP_META_PAGES; 1941 if (index > 0x20000000) 1942 panic("swp_pager_meta_free_all: failed to locate all swap meta blocks"); 1943 } 1944 } 1945 1946 /* 1947 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 1948 * 1949 * This routine is capable of looking up, popping, or freeing 1950 * swapblk assignments in the swap meta data or in the vm_page_t. 1951 * The routine typically returns the swapblk being looked-up, or popped, 1952 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 1953 * was invalid. This routine will automatically free any invalid 1954 * meta-data swapblks. 1955 * 1956 * It is not possible to store invalid swapblks in the swap meta data 1957 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 1958 * 1959 * When acting on a busy resident page and paging is in progress, we 1960 * have to wait until paging is complete but otherwise can act on the 1961 * busy page. 1962 * 1963 * This routine must be called at splvm(). 1964 * 1965 * SWM_FREE remove and free swap block from metadata 1966 * SWM_POP remove from meta data but do not free.. pop it out 1967 */ 1968 static daddr_t 1969 swp_pager_meta_ctl(vm_object_t object, vm_pindex_t pindex, int flags) 1970 { 1971 struct swblock **pswap; 1972 struct swblock *swap; 1973 daddr_t r1; 1974 int idx; 1975 1976 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1977 /* 1978 * The meta data only exists of the object is OBJT_SWAP 1979 * and even then might not be allocated yet. 1980 */ 1981 if (object->type != OBJT_SWAP) 1982 return (SWAPBLK_NONE); 1983 1984 r1 = SWAPBLK_NONE; 1985 mtx_lock(&swhash_mtx); 1986 pswap = swp_pager_hash(object, pindex); 1987 1988 if ((swap = *pswap) != NULL) { 1989 idx = pindex & SWAP_META_MASK; 1990 r1 = swap->swb_pages[idx]; 1991 1992 if (r1 != SWAPBLK_NONE) { 1993 if (flags & SWM_FREE) { 1994 swp_pager_freeswapspace(r1, 1); 1995 r1 = SWAPBLK_NONE; 1996 } 1997 if (flags & (SWM_FREE|SWM_POP)) { 1998 swap->swb_pages[idx] = SWAPBLK_NONE; 1999 if (--swap->swb_count == 0) { 2000 *pswap = swap->swb_hnext; 2001 uma_zfree(swap_zone, swap); 2002 --object->un_pager.swp.swp_bcount; 2003 } 2004 } 2005 } 2006 } 2007 mtx_unlock(&swhash_mtx); 2008 return (r1); 2009 } 2010 2011 /* 2012 * System call swapon(name) enables swapping on device name, 2013 * which must be in the swdevsw. Return EBUSY 2014 * if already swapping on this device. 2015 */ 2016 #ifndef _SYS_SYSPROTO_H_ 2017 struct swapon_args { 2018 char *name; 2019 }; 2020 #endif 2021 2022 /* 2023 * MPSAFE 2024 */ 2025 /* ARGSUSED */ 2026 int 2027 swapon(struct thread *td, struct swapon_args *uap) 2028 { 2029 struct vattr attr; 2030 struct vnode *vp; 2031 struct nameidata nd; 2032 int error; 2033 2034 mtx_lock(&Giant); 2035 error = suser(td); 2036 if (error) 2037 goto done2; 2038 2039 while (swdev_syscall_active) 2040 tsleep(&swdev_syscall_active, PUSER - 1, "swpon", 0); 2041 swdev_syscall_active = 1; 2042 2043 /* 2044 * Swap metadata may not fit in the KVM if we have physical 2045 * memory of >1GB. 2046 */ 2047 if (swap_zone == NULL) { 2048 error = ENOMEM; 2049 goto done; 2050 } 2051 2052 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->name, td); 2053 error = namei(&nd); 2054 if (error) 2055 goto done; 2056 2057 NDFREE(&nd, NDF_ONLY_PNBUF); 2058 vp = nd.ni_vp; 2059 2060 if (vn_isdisk(vp, &error)) { 2061 error = swapongeom(td, vp); 2062 } else if (vp->v_type == VREG && 2063 (vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 && 2064 (error = VOP_GETATTR(vp, &attr, td->td_ucred, td)) == 0) { 2065 /* 2066 * Allow direct swapping to NFS regular files in the same 2067 * way that nfs_mountroot() sets up diskless swapping. 2068 */ 2069 error = swaponvp(td, vp, attr.va_size / DEV_BSIZE); 2070 } 2071 2072 if (error) 2073 vrele(vp); 2074 done: 2075 swdev_syscall_active = 0; 2076 wakeup_one(&swdev_syscall_active); 2077 done2: 2078 mtx_unlock(&Giant); 2079 return (error); 2080 } 2081 2082 static void 2083 swaponsomething(struct vnode *vp, void *id, u_long nblks, sw_strategy_t *strategy, sw_close_t *close, dev_t dev) 2084 { 2085 struct swdevt *sp, *tsp; 2086 swblk_t dvbase; 2087 u_long mblocks; 2088 2089 /* 2090 * If we go beyond this, we get overflows in the radix 2091 * tree bitmap code. 2092 */ 2093 mblocks = 0x40000000 / BLIST_META_RADIX; 2094 if (nblks > mblocks) { 2095 printf("WARNING: reducing size to maximum of %lu blocks per swap unit\n", 2096 mblocks); 2097 nblks = mblocks; 2098 } 2099 /* 2100 * nblks is in DEV_BSIZE'd chunks, convert to PAGE_SIZE'd chunks. 2101 * First chop nblks off to page-align it, then convert. 2102 * 2103 * sw->sw_nblks is in page-sized chunks now too. 2104 */ 2105 nblks &= ~(ctodb(1) - 1); 2106 nblks = dbtoc(nblks); 2107 2108 sp = malloc(sizeof *sp, M_VMPGDATA, M_WAITOK | M_ZERO); 2109 sp->sw_vp = vp; 2110 sp->sw_id = id; 2111 sp->sw_dev = dev; 2112 sp->sw_flags = 0; 2113 sp->sw_nblks = nblks; 2114 sp->sw_used = 0; 2115 sp->sw_strategy = strategy; 2116 sp->sw_close = close; 2117 2118 sp->sw_blist = blist_create(nblks); 2119 /* 2120 * Do not free the first two block in order to avoid overwriting 2121 * any bsd label at the front of the partition 2122 */ 2123 blist_free(sp->sw_blist, 2, nblks - 2); 2124 2125 dvbase = 0; 2126 mtx_lock(&sw_dev_mtx); 2127 TAILQ_FOREACH(tsp, &swtailq, sw_list) { 2128 if (tsp->sw_end >= dvbase) { 2129 /* 2130 * We put one uncovered page between the devices 2131 * in order to definitively prevent any cross-device 2132 * I/O requests 2133 */ 2134 dvbase = tsp->sw_end + 1; 2135 } 2136 } 2137 sp->sw_first = dvbase; 2138 sp->sw_end = dvbase + nblks; 2139 TAILQ_INSERT_TAIL(&swtailq, sp, sw_list); 2140 nswapdev++; 2141 swap_pager_avail += nblks; 2142 swp_sizecheck(); 2143 mtx_unlock(&sw_dev_mtx); 2144 } 2145 2146 /* 2147 * SYSCALL: swapoff(devname) 2148 * 2149 * Disable swapping on the given device. 2150 * 2151 * XXX: Badly designed system call: it should use a device index 2152 * rather than filename as specification. We keep sw_vp around 2153 * only to make this work. 2154 */ 2155 #ifndef _SYS_SYSPROTO_H_ 2156 struct swapoff_args { 2157 char *name; 2158 }; 2159 #endif 2160 2161 /* 2162 * MPSAFE 2163 */ 2164 /* ARGSUSED */ 2165 int 2166 swapoff(struct thread *td, struct swapoff_args *uap) 2167 { 2168 struct vnode *vp; 2169 struct nameidata nd; 2170 struct swdevt *sp; 2171 u_long nblks, dvbase; 2172 int error; 2173 2174 mtx_lock(&Giant); 2175 2176 error = suser(td); 2177 if (error) 2178 goto done2; 2179 2180 while (swdev_syscall_active) 2181 tsleep(&swdev_syscall_active, PUSER - 1, "swpoff", 0); 2182 swdev_syscall_active = 1; 2183 2184 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->name, td); 2185 error = namei(&nd); 2186 if (error) 2187 goto done; 2188 NDFREE(&nd, NDF_ONLY_PNBUF); 2189 vp = nd.ni_vp; 2190 2191 mtx_lock(&sw_dev_mtx); 2192 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2193 if (sp->sw_vp == vp) 2194 goto found; 2195 } 2196 mtx_unlock(&sw_dev_mtx); 2197 error = EINVAL; 2198 goto done; 2199 found: 2200 mtx_unlock(&sw_dev_mtx); 2201 #ifdef MAC 2202 (void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 2203 error = mac_check_system_swapoff(td->td_ucred, vp); 2204 (void) VOP_UNLOCK(vp, 0, td); 2205 if (error != 0) 2206 goto done; 2207 #endif 2208 2209 nblks = sp->sw_nblks; 2210 2211 /* 2212 * We can turn off this swap device safely only if the 2213 * available virtual memory in the system will fit the amount 2214 * of data we will have to page back in, plus an epsilon so 2215 * the system doesn't become critically low on swap space. 2216 */ 2217 if (cnt.v_free_count + cnt.v_cache_count + swap_pager_avail < 2218 nblks + nswap_lowat) { 2219 error = ENOMEM; 2220 goto done; 2221 } 2222 2223 /* 2224 * Prevent further allocations on this device. 2225 */ 2226 mtx_lock(&sw_dev_mtx); 2227 sp->sw_flags |= SW_CLOSING; 2228 for (dvbase = 0; dvbase < sp->sw_end; dvbase += dmmax) { 2229 swap_pager_avail -= blist_fill(sp->sw_blist, 2230 dvbase, dmmax); 2231 } 2232 mtx_unlock(&sw_dev_mtx); 2233 2234 /* 2235 * Page in the contents of the device and close it. 2236 */ 2237 #ifndef NO_SWAPPING 2238 vm_proc_swapin_all(sp); 2239 #endif /* !NO_SWAPPING */ 2240 swap_pager_swapoff(sp, &sp->sw_used); 2241 2242 sp->sw_close(td, sp); 2243 sp->sw_id = NULL; 2244 mtx_lock(&sw_dev_mtx); 2245 TAILQ_REMOVE(&swtailq, sp, sw_list); 2246 nswapdev--; 2247 if (nswapdev == 0) { 2248 swap_pager_full = 2; 2249 swap_pager_almost_full = 1; 2250 } 2251 if (swdevhd == sp) 2252 swdevhd = NULL; 2253 mtx_unlock(&sw_dev_mtx); 2254 blist_destroy(sp->sw_blist); 2255 free(sp, M_VMPGDATA); 2256 2257 done: 2258 swdev_syscall_active = 0; 2259 wakeup_one(&swdev_syscall_active); 2260 done2: 2261 mtx_unlock(&Giant); 2262 return (error); 2263 } 2264 2265 void 2266 swap_pager_status(int *total, int *used) 2267 { 2268 struct swdevt *sp; 2269 2270 *total = 0; 2271 *used = 0; 2272 mtx_lock(&sw_dev_mtx); 2273 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2274 *total += sp->sw_nblks; 2275 *used += sp->sw_used; 2276 } 2277 mtx_unlock(&sw_dev_mtx); 2278 } 2279 2280 static int 2281 sysctl_vm_swap_info(SYSCTL_HANDLER_ARGS) 2282 { 2283 int *name = (int *)arg1; 2284 int error, n; 2285 struct xswdev xs; 2286 struct swdevt *sp; 2287 2288 if (arg2 != 1) /* name length */ 2289 return (EINVAL); 2290 2291 n = 0; 2292 mtx_lock(&sw_dev_mtx); 2293 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2294 if (n == *name) { 2295 mtx_unlock(&sw_dev_mtx); 2296 xs.xsw_version = XSWDEV_VERSION; 2297 xs.xsw_dev = sp->sw_dev; 2298 xs.xsw_flags = sp->sw_flags; 2299 xs.xsw_nblks = sp->sw_nblks; 2300 xs.xsw_used = sp->sw_used; 2301 2302 error = SYSCTL_OUT(req, &xs, sizeof(xs)); 2303 return (error); 2304 } 2305 n++; 2306 } 2307 mtx_unlock(&sw_dev_mtx); 2308 return (ENOENT); 2309 } 2310 2311 SYSCTL_INT(_vm, OID_AUTO, nswapdev, CTLFLAG_RD, &nswapdev, 0, 2312 "Number of swap devices"); 2313 SYSCTL_NODE(_vm, OID_AUTO, swap_info, CTLFLAG_RD, sysctl_vm_swap_info, 2314 "Swap statistics by device"); 2315 2316 /* 2317 * vmspace_swap_count() - count the approximate swap useage in pages for a 2318 * vmspace. 2319 * 2320 * The map must be locked. 2321 * 2322 * Swap useage is determined by taking the proportional swap used by 2323 * VM objects backing the VM map. To make up for fractional losses, 2324 * if the VM object has any swap use at all the associated map entries 2325 * count for at least 1 swap page. 2326 */ 2327 int 2328 vmspace_swap_count(struct vmspace *vmspace) 2329 { 2330 vm_map_t map = &vmspace->vm_map; 2331 vm_map_entry_t cur; 2332 int count = 0; 2333 2334 for (cur = map->header.next; cur != &map->header; cur = cur->next) { 2335 vm_object_t object; 2336 2337 if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && 2338 (object = cur->object.vm_object) != NULL) { 2339 VM_OBJECT_LOCK(object); 2340 if (object->type == OBJT_SWAP && 2341 object->un_pager.swp.swp_bcount != 0) { 2342 int n = (cur->end - cur->start) / PAGE_SIZE; 2343 2344 count += object->un_pager.swp.swp_bcount * 2345 SWAP_META_PAGES * n / object->size + 1; 2346 } 2347 VM_OBJECT_UNLOCK(object); 2348 } 2349 } 2350 return (count); 2351 } 2352 2353 /* 2354 * GEOM backend 2355 * 2356 * Swapping onto disk devices. 2357 * 2358 */ 2359 2360 static g_orphan_t swapgeom_orphan; 2361 2362 static struct g_class g_swap_class = { 2363 .name = "SWAP", 2364 .version = G_VERSION, 2365 .orphan = swapgeom_orphan, 2366 }; 2367 2368 DECLARE_GEOM_CLASS(g_swap_class, g_class); 2369 2370 2371 static void 2372 swapgeom_done(struct bio *bp2) 2373 { 2374 struct buf *bp; 2375 2376 bp = bp2->bio_caller2; 2377 bp->b_ioflags = bp2->bio_flags; 2378 if (bp2->bio_error) 2379 bp->b_ioflags |= BIO_ERROR; 2380 bp->b_resid = bp->b_bcount - bp2->bio_completed; 2381 bp->b_error = bp2->bio_error; 2382 bufdone(bp); 2383 g_destroy_bio(bp2); 2384 } 2385 2386 static void 2387 swapgeom_strategy(struct buf *bp, struct swdevt *sp) 2388 { 2389 struct bio *bio; 2390 struct g_consumer *cp; 2391 2392 cp = sp->sw_id; 2393 if (cp == NULL) { 2394 bp->b_error = ENXIO; 2395 bp->b_ioflags |= BIO_ERROR; 2396 bufdone(bp); 2397 return; 2398 } 2399 bio = g_new_bio(); 2400 if (bio == NULL) { 2401 /* 2402 * XXX: This is better than panicing, but not much better. 2403 * XXX: Somehow this should be retried. A more generic 2404 * XXX: implementation of ENOMEM in geom may be able to cope. 2405 */ 2406 bp->b_error = ENOMEM; 2407 bp->b_ioflags |= BIO_ERROR; 2408 bufdone(bp); 2409 return; 2410 } 2411 bio->bio_caller2 = bp; 2412 bio->bio_cmd = bp->b_iocmd; 2413 bio->bio_data = bp->b_data; 2414 bio->bio_offset = (bp->b_blkno - sp->sw_first) * PAGE_SIZE; 2415 bio->bio_length = bp->b_bcount; 2416 bio->bio_done = swapgeom_done; 2417 g_io_request(bio, cp); 2418 return; 2419 } 2420 2421 static void 2422 swapgeom_orphan(struct g_consumer *cp) 2423 { 2424 struct swdevt *sp; 2425 2426 mtx_lock(&sw_dev_mtx); 2427 TAILQ_FOREACH(sp, &swtailq, sw_list) 2428 if (sp->sw_id == cp) 2429 sp->sw_id = NULL; 2430 mtx_unlock(&sw_dev_mtx); 2431 } 2432 2433 static void 2434 swapgeom_close_ev(void *arg, int flags) 2435 { 2436 struct g_consumer *cp; 2437 2438 cp = arg; 2439 g_access(cp, -1, -1, 0); 2440 g_detach(cp); 2441 g_destroy_consumer(cp); 2442 } 2443 2444 static void 2445 swapgeom_close(struct thread *td, struct swdevt *sw) 2446 { 2447 2448 /* XXX: direct call when Giant untangled */ 2449 g_waitfor_event(swapgeom_close_ev, sw->sw_id, M_WAITOK, NULL); 2450 } 2451 2452 2453 struct swh0h0 { 2454 struct cdev *dev; 2455 struct vnode *vp; 2456 int error; 2457 }; 2458 2459 static void 2460 swapongeom_ev(void *arg, int flags) 2461 { 2462 struct swh0h0 *swh; 2463 struct g_provider *pp; 2464 struct g_consumer *cp; 2465 static struct g_geom *gp; 2466 struct swdevt *sp; 2467 u_long nblks; 2468 int error; 2469 2470 swh = arg; 2471 swh->error = 0; 2472 pp = g_dev_getprovider(swh->dev); 2473 if (pp == NULL) { 2474 swh->error = ENODEV; 2475 return; 2476 } 2477 mtx_lock(&sw_dev_mtx); 2478 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2479 cp = sp->sw_id; 2480 if (cp != NULL && cp->provider == pp) { 2481 mtx_unlock(&sw_dev_mtx); 2482 swh->error = EBUSY; 2483 return; 2484 } 2485 } 2486 mtx_unlock(&sw_dev_mtx); 2487 if (gp == NULL) 2488 gp = g_new_geomf(&g_swap_class, "swap", NULL); 2489 cp = g_new_consumer(gp); 2490 g_attach(cp, pp); 2491 /* 2492 * XXX: Everytime you think you can improve the margin for 2493 * footshooting, somebody depends on the ability to do so: 2494 * savecore(8) wants to write to our swapdev so we cannot 2495 * set an exclusive count :-( 2496 */ 2497 error = g_access(cp, 1, 1, 0); 2498 if (error) { 2499 g_detach(cp); 2500 g_destroy_consumer(cp); 2501 swh->error = error; 2502 return; 2503 } 2504 nblks = pp->mediasize / DEV_BSIZE; 2505 swaponsomething(swh->vp, cp, nblks, swapgeom_strategy, 2506 swapgeom_close, dev2udev(swh->dev)); 2507 swh->error = 0; 2508 return; 2509 } 2510 2511 static int 2512 swapongeom(struct thread *td, struct vnode *vp) 2513 { 2514 int error; 2515 struct swh0h0 swh; 2516 2517 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 2518 2519 swh.dev = vp->v_rdev; 2520 swh.vp = vp; 2521 swh.error = 0; 2522 /* XXX: direct call when Giant untangled */ 2523 error = g_waitfor_event(swapongeom_ev, &swh, M_WAITOK, NULL); 2524 if (!error) 2525 error = swh.error; 2526 VOP_UNLOCK(vp, 0, td); 2527 return (error); 2528 } 2529 2530 /* 2531 * VNODE backend 2532 * 2533 * This is used mainly for network filesystem (read: probably only tested 2534 * with NFS) swapfiles. 2535 * 2536 */ 2537 2538 static void 2539 swapdev_strategy(struct buf *bp, struct swdevt *sp) 2540 { 2541 int s; 2542 struct vnode *vp2; 2543 2544 bp->b_blkno = ctodb(bp->b_blkno - sp->sw_first); 2545 2546 vp2 = sp->sw_id; 2547 vhold(vp2); 2548 s = splvm(); 2549 if (bp->b_iocmd == BIO_WRITE) { 2550 if (bp->b_bufobj) /* XXX: should always be true /phk */ 2551 bufobj_wdrop(bp->b_bufobj); 2552 bufobj_wref(&vp2->v_bufobj); 2553 } 2554 bp->b_vp = vp2; 2555 splx(s); 2556 bp->b_iooffset = dbtob(bp->b_blkno); 2557 bstrategy(bp); 2558 return; 2559 } 2560 2561 static void 2562 swapdev_close(struct thread *td, struct swdevt *sp) 2563 { 2564 2565 VOP_CLOSE(sp->sw_vp, FREAD | FWRITE, td->td_ucred, td); 2566 vrele(sp->sw_vp); 2567 } 2568 2569 2570 static int 2571 swaponvp(struct thread *td, struct vnode *vp, u_long nblks) 2572 { 2573 struct swdevt *sp; 2574 int error; 2575 2576 if (nblks == 0) 2577 return (ENXIO); 2578 mtx_lock(&sw_dev_mtx); 2579 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2580 if (sp->sw_id == vp) { 2581 mtx_unlock(&sw_dev_mtx); 2582 return (EBUSY); 2583 } 2584 } 2585 mtx_unlock(&sw_dev_mtx); 2586 2587 (void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 2588 #ifdef MAC 2589 error = mac_check_system_swapon(td->td_ucred, vp); 2590 if (error == 0) 2591 #endif 2592 error = VOP_OPEN(vp, FREAD | FWRITE, td->td_ucred, td, -1); 2593 (void) VOP_UNLOCK(vp, 0, td); 2594 if (error) 2595 return (error); 2596 2597 swaponsomething(vp, vp, nblks, swapdev_strategy, swapdev_close, 2598 NODEV); 2599 return (0); 2600 } 2601