1 /*- 2 * Copyright (c) 1998 Matthew Dillon, 3 * Copyright (c) 1994 John S. Dyson 4 * Copyright (c) 1990 University of Utah. 5 * Copyright (c) 1982, 1986, 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * New Swap System 41 * Matthew Dillon 42 * 43 * Radix Bitmap 'blists'. 44 * 45 * - The new swapper uses the new radix bitmap code. This should scale 46 * to arbitrarily small or arbitrarily large swap spaces and an almost 47 * arbitrary degree of fragmentation. 48 * 49 * Features: 50 * 51 * - on the fly reallocation of swap during putpages. The new system 52 * does not try to keep previously allocated swap blocks for dirty 53 * pages. 54 * 55 * - on the fly deallocation of swap 56 * 57 * - No more garbage collection required. Unnecessarily allocated swap 58 * blocks only exist for dirty vm_page_t's now and these are already 59 * cycled (in a high-load system) by the pager. We also do on-the-fly 60 * removal of invalidated swap blocks when a page is destroyed 61 * or renamed. 62 * 63 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 64 * 65 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 66 * @(#)vm_swap.c 8.5 (Berkeley) 2/17/94 67 */ 68 69 #include <sys/cdefs.h> 70 __FBSDID("$FreeBSD$"); 71 72 #include "opt_mac.h" 73 #include "opt_swap.h" 74 #include "opt_vm.h" 75 76 #include <sys/param.h> 77 #include <sys/systm.h> 78 #include <sys/conf.h> 79 #include <sys/kernel.h> 80 #include <sys/proc.h> 81 #include <sys/bio.h> 82 #include <sys/buf.h> 83 #include <sys/disk.h> 84 #include <sys/fcntl.h> 85 #include <sys/mount.h> 86 #include <sys/namei.h> 87 #include <sys/vnode.h> 88 #include <sys/mac.h> 89 #include <sys/malloc.h> 90 #include <sys/sysctl.h> 91 #include <sys/sysproto.h> 92 #include <sys/blist.h> 93 #include <sys/lock.h> 94 #include <sys/sx.h> 95 #include <sys/vmmeter.h> 96 97 #include <vm/vm.h> 98 #include <vm/pmap.h> 99 #include <vm/vm_map.h> 100 #include <vm/vm_kern.h> 101 #include <vm/vm_object.h> 102 #include <vm/vm_page.h> 103 #include <vm/vm_pager.h> 104 #include <vm/vm_pageout.h> 105 #include <vm/vm_param.h> 106 #include <vm/swap_pager.h> 107 #include <vm/vm_extern.h> 108 #include <vm/uma.h> 109 110 #include <geom/geom.h> 111 112 /* 113 * SWB_NPAGES must be a power of 2. It may be set to 1, 2, 4, 8, or 16 114 * pages per allocation. We recommend you stick with the default of 8. 115 * The 16-page limit is due to the radix code (kern/subr_blist.c). 116 */ 117 #ifndef MAX_PAGEOUT_CLUSTER 118 #define MAX_PAGEOUT_CLUSTER 16 119 #endif 120 121 #if !defined(SWB_NPAGES) 122 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER 123 #endif 124 125 /* 126 * Piecemeal swap metadata structure. Swap is stored in a radix tree. 127 * 128 * If SWB_NPAGES is 8 and sizeof(char *) == sizeof(daddr_t), our radix 129 * is basically 8. Assuming PAGE_SIZE == 4096, one tree level represents 130 * 32K worth of data, two levels represent 256K, three levels represent 131 * 2 MBytes. This is acceptable. 132 * 133 * Overall memory utilization is about the same as the old swap structure. 134 */ 135 #define SWCORRECT(n) (sizeof(void *) * (n) / sizeof(daddr_t)) 136 #define SWAP_META_PAGES (SWB_NPAGES * 2) 137 #define SWAP_META_MASK (SWAP_META_PAGES - 1) 138 139 typedef int32_t swblk_t; /* 140 * swap offset. This is the type used to 141 * address the "virtual swap device" and 142 * therefore the maximum swap space is 143 * 2^32 pages. 144 */ 145 146 struct swdevt; 147 typedef void sw_strategy_t(struct buf *bp, struct swdevt *sw); 148 typedef void sw_close_t(struct thread *td, struct swdevt *sw); 149 150 /* 151 * Swap device table 152 */ 153 struct swdevt { 154 int sw_flags; 155 int sw_nblks; 156 int sw_used; 157 dev_t sw_dev; 158 struct vnode *sw_vp; 159 void *sw_id; 160 swblk_t sw_first; 161 swblk_t sw_end; 162 struct blist *sw_blist; 163 TAILQ_ENTRY(swdevt) sw_list; 164 sw_strategy_t *sw_strategy; 165 sw_close_t *sw_close; 166 }; 167 168 #define SW_CLOSING 0x04 169 170 struct swblock { 171 struct swblock *swb_hnext; 172 vm_object_t swb_object; 173 vm_pindex_t swb_index; 174 int swb_count; 175 daddr_t swb_pages[SWAP_META_PAGES]; 176 }; 177 178 static struct mtx sw_dev_mtx; 179 static TAILQ_HEAD(, swdevt) swtailq = TAILQ_HEAD_INITIALIZER(swtailq); 180 static struct swdevt *swdevhd; /* Allocate from here next */ 181 static int nswapdev; /* Number of swap devices */ 182 int swap_pager_avail; 183 static int swdev_syscall_active = 0; /* serialize swap(on|off) */ 184 185 static void swapdev_strategy(struct buf *, struct swdevt *sw); 186 187 #define SWM_FREE 0x02 /* free, period */ 188 #define SWM_POP 0x04 /* pop out */ 189 190 int swap_pager_full = 2; /* swap space exhaustion (task killing) */ 191 static int swap_pager_almost_full = 1; /* swap space exhaustion (w/hysteresis)*/ 192 static int nsw_rcount; /* free read buffers */ 193 static int nsw_wcount_sync; /* limit write buffers / synchronous */ 194 static int nsw_wcount_async; /* limit write buffers / asynchronous */ 195 static int nsw_wcount_async_max;/* assigned maximum */ 196 static int nsw_cluster_max; /* maximum VOP I/O allowed */ 197 198 static struct swblock **swhash; 199 static int swhash_mask; 200 static struct mtx swhash_mtx; 201 202 static int swap_async_max = 4; /* maximum in-progress async I/O's */ 203 static struct sx sw_alloc_sx; 204 205 206 SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 207 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 208 209 /* 210 * "named" and "unnamed" anon region objects. Try to reduce the overhead 211 * of searching a named list by hashing it just a little. 212 */ 213 214 #define NOBJLISTS 8 215 216 #define NOBJLIST(handle) \ 217 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)]) 218 219 static struct mtx sw_alloc_mtx; /* protect list manipulation */ 220 static struct pagerlst swap_pager_object_list[NOBJLISTS]; 221 static uma_zone_t swap_zone; 222 static struct vm_object swap_zone_obj; 223 224 /* 225 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 226 * calls hooked from other parts of the VM system and do not appear here. 227 * (see vm/swap_pager.h). 228 */ 229 static vm_object_t 230 swap_pager_alloc(void *handle, vm_ooffset_t size, 231 vm_prot_t prot, vm_ooffset_t offset); 232 static void swap_pager_dealloc(vm_object_t object); 233 static int swap_pager_getpages(vm_object_t, vm_page_t *, int, int); 234 static void swap_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *); 235 static boolean_t 236 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after); 237 static void swap_pager_init(void); 238 static void swap_pager_unswapped(vm_page_t); 239 static void swap_pager_swapoff(struct swdevt *sp); 240 241 struct pagerops swappagerops = { 242 .pgo_init = swap_pager_init, /* early system initialization of pager */ 243 .pgo_alloc = swap_pager_alloc, /* allocate an OBJT_SWAP object */ 244 .pgo_dealloc = swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 245 .pgo_getpages = swap_pager_getpages, /* pagein */ 246 .pgo_putpages = swap_pager_putpages, /* pageout */ 247 .pgo_haspage = swap_pager_haspage, /* get backing store status for page */ 248 .pgo_pageunswapped = swap_pager_unswapped, /* remove swap related to page */ 249 }; 250 251 /* 252 * dmmax is in page-sized chunks with the new swap system. It was 253 * dev-bsized chunks in the old. dmmax is always a power of 2. 254 * 255 * swap_*() routines are externally accessible. swp_*() routines are 256 * internal. 257 */ 258 static int dmmax; 259 static int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 260 static int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 261 262 SYSCTL_INT(_vm, OID_AUTO, dmmax, 263 CTLFLAG_RD, &dmmax, 0, "Maximum size of a swap block"); 264 265 static void swp_sizecheck(void); 266 static void swp_pager_async_iodone(struct buf *bp); 267 static int swapongeom(struct thread *, struct vnode *); 268 static int swaponvp(struct thread *, struct vnode *, u_long); 269 static int swapoff_one(struct swdevt *sp, struct thread *td); 270 271 /* 272 * Swap bitmap functions 273 */ 274 static void swp_pager_freeswapspace(daddr_t blk, int npages); 275 static daddr_t swp_pager_getswapspace(int npages); 276 277 /* 278 * Metadata functions 279 */ 280 static struct swblock **swp_pager_hash(vm_object_t object, vm_pindex_t index); 281 static void swp_pager_meta_build(vm_object_t, vm_pindex_t, daddr_t); 282 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, daddr_t); 283 static void swp_pager_meta_free_all(vm_object_t); 284 static daddr_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int); 285 286 /* 287 * SWP_SIZECHECK() - update swap_pager_full indication 288 * 289 * update the swap_pager_almost_full indication and warn when we are 290 * about to run out of swap space, using lowat/hiwat hysteresis. 291 * 292 * Clear swap_pager_full ( task killing ) indication when lowat is met. 293 * 294 * No restrictions on call 295 * This routine may not block. 296 * This routine must be called at splvm() 297 */ 298 static void 299 swp_sizecheck(void) 300 { 301 302 if (swap_pager_avail < nswap_lowat) { 303 if (swap_pager_almost_full == 0) { 304 printf("swap_pager: out of swap space\n"); 305 swap_pager_almost_full = 1; 306 } 307 } else { 308 swap_pager_full = 0; 309 if (swap_pager_avail > nswap_hiwat) 310 swap_pager_almost_full = 0; 311 } 312 } 313 314 /* 315 * SWP_PAGER_HASH() - hash swap meta data 316 * 317 * This is an helper function which hashes the swapblk given 318 * the object and page index. It returns a pointer to a pointer 319 * to the object, or a pointer to a NULL pointer if it could not 320 * find a swapblk. 321 * 322 * This routine must be called at splvm(). 323 */ 324 static struct swblock ** 325 swp_pager_hash(vm_object_t object, vm_pindex_t index) 326 { 327 struct swblock **pswap; 328 struct swblock *swap; 329 330 index &= ~(vm_pindex_t)SWAP_META_MASK; 331 pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask]; 332 while ((swap = *pswap) != NULL) { 333 if (swap->swb_object == object && 334 swap->swb_index == index 335 ) { 336 break; 337 } 338 pswap = &swap->swb_hnext; 339 } 340 return (pswap); 341 } 342 343 /* 344 * SWAP_PAGER_INIT() - initialize the swap pager! 345 * 346 * Expected to be started from system init. NOTE: This code is run 347 * before much else so be careful what you depend on. Most of the VM 348 * system has yet to be initialized at this point. 349 */ 350 static void 351 swap_pager_init(void) 352 { 353 /* 354 * Initialize object lists 355 */ 356 int i; 357 358 for (i = 0; i < NOBJLISTS; ++i) 359 TAILQ_INIT(&swap_pager_object_list[i]); 360 mtx_init(&sw_alloc_mtx, "swap_pager list", NULL, MTX_DEF); 361 mtx_init(&sw_dev_mtx, "swapdev", NULL, MTX_DEF); 362 363 /* 364 * Device Stripe, in PAGE_SIZE'd blocks 365 */ 366 dmmax = SWB_NPAGES * 2; 367 } 368 369 /* 370 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 371 * 372 * Expected to be started from pageout process once, prior to entering 373 * its main loop. 374 */ 375 void 376 swap_pager_swap_init(void) 377 { 378 int n, n2; 379 380 /* 381 * Number of in-transit swap bp operations. Don't 382 * exhaust the pbufs completely. Make sure we 383 * initialize workable values (0 will work for hysteresis 384 * but it isn't very efficient). 385 * 386 * The nsw_cluster_max is constrained by the bp->b_pages[] 387 * array (MAXPHYS/PAGE_SIZE) and our locally defined 388 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 389 * constrained by the swap device interleave stripe size. 390 * 391 * Currently we hardwire nsw_wcount_async to 4. This limit is 392 * designed to prevent other I/O from having high latencies due to 393 * our pageout I/O. The value 4 works well for one or two active swap 394 * devices but is probably a little low if you have more. Even so, 395 * a higher value would probably generate only a limited improvement 396 * with three or four active swap devices since the system does not 397 * typically have to pageout at extreme bandwidths. We will want 398 * at least 2 per swap devices, and 4 is a pretty good value if you 399 * have one NFS swap device due to the command/ack latency over NFS. 400 * So it all works out pretty well. 401 */ 402 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 403 404 mtx_lock(&pbuf_mtx); 405 nsw_rcount = (nswbuf + 1) / 2; 406 nsw_wcount_sync = (nswbuf + 3) / 4; 407 nsw_wcount_async = 4; 408 nsw_wcount_async_max = nsw_wcount_async; 409 mtx_unlock(&pbuf_mtx); 410 411 /* 412 * Initialize our zone. Right now I'm just guessing on the number 413 * we need based on the number of pages in the system. Each swblock 414 * can hold 16 pages, so this is probably overkill. This reservation 415 * is typically limited to around 32MB by default. 416 */ 417 n = cnt.v_page_count / 2; 418 if (maxswzone && n > maxswzone / sizeof(struct swblock)) 419 n = maxswzone / sizeof(struct swblock); 420 n2 = n; 421 swap_zone = uma_zcreate("SWAPMETA", sizeof(struct swblock), NULL, NULL, 422 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM); 423 if (swap_zone == NULL) 424 panic("failed to create swap_zone."); 425 do { 426 if (uma_zone_set_obj(swap_zone, &swap_zone_obj, n)) 427 break; 428 /* 429 * if the allocation failed, try a zone two thirds the 430 * size of the previous attempt. 431 */ 432 n -= ((n + 2) / 3); 433 } while (n > 0); 434 if (n2 != n) 435 printf("Swap zone entries reduced from %d to %d.\n", n2, n); 436 n2 = n; 437 438 /* 439 * Initialize our meta-data hash table. The swapper does not need to 440 * be quite as efficient as the VM system, so we do not use an 441 * oversized hash table. 442 * 443 * n: size of hash table, must be power of 2 444 * swhash_mask: hash table index mask 445 */ 446 for (n = 1; n < n2 / 8; n *= 2) 447 ; 448 swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK | M_ZERO); 449 swhash_mask = n - 1; 450 mtx_init(&swhash_mtx, "swap_pager swhash", NULL, MTX_DEF); 451 } 452 453 /* 454 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 455 * its metadata structures. 456 * 457 * This routine is called from the mmap and fork code to create a new 458 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 459 * and then converting it with swp_pager_meta_build(). 460 * 461 * This routine may block in vm_object_allocate() and create a named 462 * object lookup race, so we must interlock. We must also run at 463 * splvm() for the object lookup to handle races with interrupts, but 464 * we do not have to maintain splvm() in between the lookup and the 465 * add because (I believe) it is not possible to attempt to create 466 * a new swap object w/handle when a default object with that handle 467 * already exists. 468 * 469 * MPSAFE 470 */ 471 static vm_object_t 472 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 473 vm_ooffset_t offset) 474 { 475 vm_object_t object; 476 vm_pindex_t pindex; 477 478 pindex = OFF_TO_IDX(offset + PAGE_MASK + size); 479 480 if (handle) { 481 mtx_lock(&Giant); 482 /* 483 * Reference existing named region or allocate new one. There 484 * should not be a race here against swp_pager_meta_build() 485 * as called from vm_page_remove() in regards to the lookup 486 * of the handle. 487 */ 488 sx_xlock(&sw_alloc_sx); 489 object = vm_pager_object_lookup(NOBJLIST(handle), handle); 490 491 if (object != NULL) { 492 vm_object_reference(object); 493 } else { 494 object = vm_object_allocate(OBJT_DEFAULT, pindex); 495 object->handle = handle; 496 497 VM_OBJECT_LOCK(object); 498 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 499 VM_OBJECT_UNLOCK(object); 500 } 501 sx_xunlock(&sw_alloc_sx); 502 mtx_unlock(&Giant); 503 } else { 504 object = vm_object_allocate(OBJT_DEFAULT, pindex); 505 506 VM_OBJECT_LOCK(object); 507 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 508 VM_OBJECT_UNLOCK(object); 509 } 510 return (object); 511 } 512 513 /* 514 * SWAP_PAGER_DEALLOC() - remove swap metadata from object 515 * 516 * The swap backing for the object is destroyed. The code is 517 * designed such that we can reinstantiate it later, but this 518 * routine is typically called only when the entire object is 519 * about to be destroyed. 520 * 521 * This routine may block, but no longer does. 522 * 523 * The object must be locked or unreferenceable. 524 */ 525 static void 526 swap_pager_dealloc(vm_object_t object) 527 { 528 529 /* 530 * Remove from list right away so lookups will fail if we block for 531 * pageout completion. 532 */ 533 if (object->handle != NULL) { 534 mtx_lock(&sw_alloc_mtx); 535 TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list); 536 mtx_unlock(&sw_alloc_mtx); 537 } 538 539 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 540 vm_object_pip_wait(object, "swpdea"); 541 542 /* 543 * Free all remaining metadata. We only bother to free it from 544 * the swap meta data. We do not attempt to free swapblk's still 545 * associated with vm_page_t's for this object. We do not care 546 * if paging is still in progress on some objects. 547 */ 548 swp_pager_meta_free_all(object); 549 } 550 551 /************************************************************************ 552 * SWAP PAGER BITMAP ROUTINES * 553 ************************************************************************/ 554 555 /* 556 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 557 * 558 * Allocate swap for the requested number of pages. The starting 559 * swap block number (a page index) is returned or SWAPBLK_NONE 560 * if the allocation failed. 561 * 562 * Also has the side effect of advising that somebody made a mistake 563 * when they configured swap and didn't configure enough. 564 * 565 * Must be called at splvm() to avoid races with bitmap frees from 566 * vm_page_remove() aka swap_pager_page_removed(). 567 * 568 * This routine may not block 569 * This routine must be called at splvm(). 570 * 571 * We allocate in round-robin fashion from the configured devices. 572 */ 573 static daddr_t 574 swp_pager_getswapspace(int npages) 575 { 576 daddr_t blk; 577 struct swdevt *sp; 578 int i; 579 580 blk = SWAPBLK_NONE; 581 mtx_lock(&sw_dev_mtx); 582 sp = swdevhd; 583 for (i = 0; i < nswapdev; i++) { 584 if (sp == NULL) 585 sp = TAILQ_FIRST(&swtailq); 586 if (!(sp->sw_flags & SW_CLOSING)) { 587 blk = blist_alloc(sp->sw_blist, npages); 588 if (blk != SWAPBLK_NONE) { 589 blk += sp->sw_first; 590 sp->sw_used += npages; 591 swap_pager_avail -= npages; 592 swp_sizecheck(); 593 swdevhd = TAILQ_NEXT(sp, sw_list); 594 goto done; 595 } 596 } 597 sp = TAILQ_NEXT(sp, sw_list); 598 } 599 if (swap_pager_full != 2) { 600 printf("swap_pager_getswapspace(%d): failed\n", npages); 601 swap_pager_full = 2; 602 swap_pager_almost_full = 1; 603 } 604 swdevhd = NULL; 605 done: 606 mtx_unlock(&sw_dev_mtx); 607 return (blk); 608 } 609 610 static int 611 swp_pager_isondev(daddr_t blk, struct swdevt *sp) 612 { 613 614 return (blk >= sp->sw_first && blk < sp->sw_end); 615 } 616 617 static void 618 swp_pager_strategy(struct buf *bp) 619 { 620 struct swdevt *sp; 621 622 mtx_lock(&sw_dev_mtx); 623 TAILQ_FOREACH(sp, &swtailq, sw_list) { 624 if (bp->b_blkno >= sp->sw_first && bp->b_blkno < sp->sw_end) { 625 mtx_unlock(&sw_dev_mtx); 626 sp->sw_strategy(bp, sp); 627 return; 628 } 629 } 630 panic("Swapdev not found"); 631 } 632 633 634 /* 635 * SWP_PAGER_FREESWAPSPACE() - free raw swap space 636 * 637 * This routine returns the specified swap blocks back to the bitmap. 638 * 639 * Note: This routine may not block (it could in the old swap code), 640 * and through the use of the new blist routines it does not block. 641 * 642 * We must be called at splvm() to avoid races with bitmap frees from 643 * vm_page_remove() aka swap_pager_page_removed(). 644 * 645 * This routine may not block 646 * This routine must be called at splvm(). 647 */ 648 static void 649 swp_pager_freeswapspace(daddr_t blk, int npages) 650 { 651 struct swdevt *sp; 652 653 mtx_lock(&sw_dev_mtx); 654 TAILQ_FOREACH(sp, &swtailq, sw_list) { 655 if (blk >= sp->sw_first && blk < sp->sw_end) { 656 sp->sw_used -= npages; 657 /* 658 * If we are attempting to stop swapping on 659 * this device, we don't want to mark any 660 * blocks free lest they be reused. 661 */ 662 if ((sp->sw_flags & SW_CLOSING) == 0) { 663 blist_free(sp->sw_blist, blk - sp->sw_first, 664 npages); 665 swap_pager_avail += npages; 666 swp_sizecheck(); 667 } 668 mtx_unlock(&sw_dev_mtx); 669 return; 670 } 671 } 672 panic("Swapdev not found"); 673 } 674 675 /* 676 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 677 * range within an object. 678 * 679 * This is a globally accessible routine. 680 * 681 * This routine removes swapblk assignments from swap metadata. 682 * 683 * The external callers of this routine typically have already destroyed 684 * or renamed vm_page_t's associated with this range in the object so 685 * we should be ok. 686 * 687 * This routine may be called at any spl. We up our spl to splvm temporarily 688 * in order to perform the metadata removal. 689 */ 690 void 691 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size) 692 { 693 694 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 695 swp_pager_meta_free(object, start, size); 696 } 697 698 /* 699 * SWAP_PAGER_RESERVE() - reserve swap blocks in object 700 * 701 * Assigns swap blocks to the specified range within the object. The 702 * swap blocks are not zerod. Any previous swap assignment is destroyed. 703 * 704 * Returns 0 on success, -1 on failure. 705 */ 706 int 707 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 708 { 709 int n = 0; 710 daddr_t blk = SWAPBLK_NONE; 711 vm_pindex_t beg = start; /* save start index */ 712 713 VM_OBJECT_LOCK(object); 714 while (size) { 715 if (n == 0) { 716 n = BLIST_MAX_ALLOC; 717 while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) { 718 n >>= 1; 719 if (n == 0) { 720 swp_pager_meta_free(object, beg, start - beg); 721 VM_OBJECT_UNLOCK(object); 722 return (-1); 723 } 724 } 725 } 726 swp_pager_meta_build(object, start, blk); 727 --size; 728 ++start; 729 ++blk; 730 --n; 731 } 732 swp_pager_meta_free(object, start, n); 733 VM_OBJECT_UNLOCK(object); 734 return (0); 735 } 736 737 /* 738 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 739 * and destroy the source. 740 * 741 * Copy any valid swapblks from the source to the destination. In 742 * cases where both the source and destination have a valid swapblk, 743 * we keep the destination's. 744 * 745 * This routine is allowed to block. It may block allocating metadata 746 * indirectly through swp_pager_meta_build() or if paging is still in 747 * progress on the source. 748 * 749 * This routine can be called at any spl 750 * 751 * XXX vm_page_collapse() kinda expects us not to block because we 752 * supposedly do not need to allocate memory, but for the moment we 753 * *may* have to get a little memory from the zone allocator, but 754 * it is taken from the interrupt memory. We should be ok. 755 * 756 * The source object contains no vm_page_t's (which is just as well) 757 * 758 * The source object is of type OBJT_SWAP. 759 * 760 * The source and destination objects must be locked or 761 * inaccessible (XXX are they ?) 762 */ 763 void 764 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject, 765 vm_pindex_t offset, int destroysource) 766 { 767 vm_pindex_t i; 768 769 VM_OBJECT_LOCK_ASSERT(srcobject, MA_OWNED); 770 VM_OBJECT_LOCK_ASSERT(dstobject, MA_OWNED); 771 772 /* 773 * If destroysource is set, we remove the source object from the 774 * swap_pager internal queue now. 775 */ 776 if (destroysource) { 777 if (srcobject->handle != NULL) { 778 mtx_lock(&sw_alloc_mtx); 779 TAILQ_REMOVE( 780 NOBJLIST(srcobject->handle), 781 srcobject, 782 pager_object_list 783 ); 784 mtx_unlock(&sw_alloc_mtx); 785 } 786 } 787 788 /* 789 * transfer source to destination. 790 */ 791 for (i = 0; i < dstobject->size; ++i) { 792 daddr_t dstaddr; 793 794 /* 795 * Locate (without changing) the swapblk on the destination, 796 * unless it is invalid in which case free it silently, or 797 * if the destination is a resident page, in which case the 798 * source is thrown away. 799 */ 800 dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 801 802 if (dstaddr == SWAPBLK_NONE) { 803 /* 804 * Destination has no swapblk and is not resident, 805 * copy source. 806 */ 807 daddr_t srcaddr; 808 809 srcaddr = swp_pager_meta_ctl( 810 srcobject, 811 i + offset, 812 SWM_POP 813 ); 814 815 if (srcaddr != SWAPBLK_NONE) { 816 /* 817 * swp_pager_meta_build() can sleep. 818 */ 819 vm_object_pip_add(srcobject, 1); 820 VM_OBJECT_UNLOCK(srcobject); 821 vm_object_pip_add(dstobject, 1); 822 swp_pager_meta_build(dstobject, i, srcaddr); 823 vm_object_pip_wakeup(dstobject); 824 VM_OBJECT_LOCK(srcobject); 825 vm_object_pip_wakeup(srcobject); 826 } 827 } else { 828 /* 829 * Destination has valid swapblk or it is represented 830 * by a resident page. We destroy the sourceblock. 831 */ 832 833 swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE); 834 } 835 } 836 837 /* 838 * Free left over swap blocks in source. 839 * 840 * We have to revert the type to OBJT_DEFAULT so we do not accidently 841 * double-remove the object from the swap queues. 842 */ 843 if (destroysource) { 844 swp_pager_meta_free_all(srcobject); 845 /* 846 * Reverting the type is not necessary, the caller is going 847 * to destroy srcobject directly, but I'm doing it here 848 * for consistency since we've removed the object from its 849 * queues. 850 */ 851 srcobject->type = OBJT_DEFAULT; 852 } 853 } 854 855 /* 856 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 857 * the requested page. 858 * 859 * We determine whether good backing store exists for the requested 860 * page and return TRUE if it does, FALSE if it doesn't. 861 * 862 * If TRUE, we also try to determine how much valid, contiguous backing 863 * store exists before and after the requested page within a reasonable 864 * distance. We do not try to restrict it to the swap device stripe 865 * (that is handled in getpages/putpages). It probably isn't worth 866 * doing here. 867 */ 868 static boolean_t 869 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after) 870 { 871 daddr_t blk0; 872 873 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 874 /* 875 * do we have good backing store at the requested index ? 876 */ 877 blk0 = swp_pager_meta_ctl(object, pindex, 0); 878 879 if (blk0 == SWAPBLK_NONE) { 880 if (before) 881 *before = 0; 882 if (after) 883 *after = 0; 884 return (FALSE); 885 } 886 887 /* 888 * find backwards-looking contiguous good backing store 889 */ 890 if (before != NULL) { 891 int i; 892 893 for (i = 1; i < (SWB_NPAGES/2); ++i) { 894 daddr_t blk; 895 896 if (i > pindex) 897 break; 898 blk = swp_pager_meta_ctl(object, pindex - i, 0); 899 if (blk != blk0 - i) 900 break; 901 } 902 *before = (i - 1); 903 } 904 905 /* 906 * find forward-looking contiguous good backing store 907 */ 908 if (after != NULL) { 909 int i; 910 911 for (i = 1; i < (SWB_NPAGES/2); ++i) { 912 daddr_t blk; 913 914 blk = swp_pager_meta_ctl(object, pindex + i, 0); 915 if (blk != blk0 + i) 916 break; 917 } 918 *after = (i - 1); 919 } 920 return (TRUE); 921 } 922 923 /* 924 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 925 * 926 * This removes any associated swap backing store, whether valid or 927 * not, from the page. 928 * 929 * This routine is typically called when a page is made dirty, at 930 * which point any associated swap can be freed. MADV_FREE also 931 * calls us in a special-case situation 932 * 933 * NOTE!!! If the page is clean and the swap was valid, the caller 934 * should make the page dirty before calling this routine. This routine 935 * does NOT change the m->dirty status of the page. Also: MADV_FREE 936 * depends on it. 937 * 938 * This routine may not block 939 * This routine must be called at splvm() 940 */ 941 static void 942 swap_pager_unswapped(vm_page_t m) 943 { 944 945 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 946 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 947 } 948 949 /* 950 * SWAP_PAGER_GETPAGES() - bring pages in from swap 951 * 952 * Attempt to retrieve (m, count) pages from backing store, but make 953 * sure we retrieve at least m[reqpage]. We try to load in as large 954 * a chunk surrounding m[reqpage] as is contiguous in swap and which 955 * belongs to the same object. 956 * 957 * The code is designed for asynchronous operation and 958 * immediate-notification of 'reqpage' but tends not to be 959 * used that way. Please do not optimize-out this algorithmic 960 * feature, I intend to improve on it in the future. 961 * 962 * The parent has a single vm_object_pip_add() reference prior to 963 * calling us and we should return with the same. 964 * 965 * The parent has BUSY'd the pages. We should return with 'm' 966 * left busy, but the others adjusted. 967 */ 968 static int 969 swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage) 970 { 971 struct buf *bp; 972 vm_page_t mreq; 973 int i; 974 int j; 975 daddr_t blk; 976 977 mreq = m[reqpage]; 978 979 KASSERT(mreq->object == object, 980 ("swap_pager_getpages: object mismatch %p/%p", 981 object, mreq->object)); 982 983 /* 984 * Calculate range to retrieve. The pages have already been assigned 985 * their swapblks. We require a *contiguous* range but we know it to 986 * not span devices. If we do not supply it, bad things 987 * happen. Note that blk, iblk & jblk can be SWAPBLK_NONE, but the 988 * loops are set up such that the case(s) are handled implicitly. 989 * 990 * The swp_*() calls must be made at splvm(). vm_page_free() does 991 * not need to be, but it will go a little faster if it is. 992 */ 993 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 994 995 for (i = reqpage - 1; i >= 0; --i) { 996 daddr_t iblk; 997 998 iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0); 999 if (blk != iblk + (reqpage - i)) 1000 break; 1001 } 1002 ++i; 1003 1004 for (j = reqpage + 1; j < count; ++j) { 1005 daddr_t jblk; 1006 1007 jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0); 1008 if (blk != jblk - (j - reqpage)) 1009 break; 1010 } 1011 1012 /* 1013 * free pages outside our collection range. Note: we never free 1014 * mreq, it must remain busy throughout. 1015 */ 1016 if (0 < i || j < count) { 1017 int k; 1018 1019 vm_page_lock_queues(); 1020 for (k = 0; k < i; ++k) 1021 vm_page_free(m[k]); 1022 for (k = j; k < count; ++k) 1023 vm_page_free(m[k]); 1024 vm_page_unlock_queues(); 1025 } 1026 1027 /* 1028 * Return VM_PAGER_FAIL if we have nothing to do. Return mreq 1029 * still busy, but the others unbusied. 1030 */ 1031 if (blk == SWAPBLK_NONE) 1032 return (VM_PAGER_FAIL); 1033 1034 /* 1035 * Getpbuf() can sleep. 1036 */ 1037 VM_OBJECT_UNLOCK(object); 1038 /* 1039 * Get a swap buffer header to perform the IO 1040 */ 1041 bp = getpbuf(&nsw_rcount); 1042 bp->b_flags |= B_PAGING; 1043 1044 /* 1045 * map our page(s) into kva for input 1046 */ 1047 pmap_qenter((vm_offset_t)bp->b_data, m + i, j - i); 1048 1049 bp->b_iocmd = BIO_READ; 1050 bp->b_iodone = swp_pager_async_iodone; 1051 bp->b_rcred = crhold(thread0.td_ucred); 1052 bp->b_wcred = crhold(thread0.td_ucred); 1053 bp->b_blkno = blk - (reqpage - i); 1054 bp->b_bcount = PAGE_SIZE * (j - i); 1055 bp->b_bufsize = PAGE_SIZE * (j - i); 1056 bp->b_pager.pg_reqpage = reqpage - i; 1057 1058 VM_OBJECT_LOCK(object); 1059 { 1060 int k; 1061 1062 for (k = i; k < j; ++k) { 1063 bp->b_pages[k - i] = m[k]; 1064 m[k]->oflags |= VPO_SWAPINPROG; 1065 } 1066 } 1067 bp->b_npages = j - i; 1068 1069 cnt.v_swapin++; 1070 cnt.v_swappgsin += bp->b_npages; 1071 1072 /* 1073 * We still hold the lock on mreq, and our automatic completion routine 1074 * does not remove it. 1075 */ 1076 vm_object_pip_add(object, bp->b_npages); 1077 VM_OBJECT_UNLOCK(object); 1078 1079 /* 1080 * perform the I/O. NOTE!!! bp cannot be considered valid after 1081 * this point because we automatically release it on completion. 1082 * Instead, we look at the one page we are interested in which we 1083 * still hold a lock on even through the I/O completion. 1084 * 1085 * The other pages in our m[] array are also released on completion, 1086 * so we cannot assume they are valid anymore either. 1087 * 1088 * NOTE: b_blkno is destroyed by the call to swapdev_strategy 1089 */ 1090 BUF_KERNPROC(bp); 1091 swp_pager_strategy(bp); 1092 1093 /* 1094 * wait for the page we want to complete. VPO_SWAPINPROG is always 1095 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 1096 * is set in the meta-data. 1097 */ 1098 VM_OBJECT_LOCK(object); 1099 while ((mreq->oflags & VPO_SWAPINPROG) != 0) { 1100 mreq->oflags |= VPO_WANTED; 1101 vm_page_lock_queues(); 1102 vm_page_flag_set(mreq, PG_REFERENCED); 1103 vm_page_unlock_queues(); 1104 cnt.v_intrans++; 1105 if (msleep(mreq, VM_OBJECT_MTX(object), PSWP, "swread", hz*20)) { 1106 printf( 1107 "swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n", 1108 bp->b_bufobj, (intmax_t)bp->b_blkno, bp->b_bcount); 1109 } 1110 } 1111 1112 /* 1113 * mreq is left busied after completion, but all the other pages 1114 * are freed. If we had an unrecoverable read error the page will 1115 * not be valid. 1116 */ 1117 if (mreq->valid != VM_PAGE_BITS_ALL) { 1118 return (VM_PAGER_ERROR); 1119 } else { 1120 return (VM_PAGER_OK); 1121 } 1122 1123 /* 1124 * A final note: in a low swap situation, we cannot deallocate swap 1125 * and mark a page dirty here because the caller is likely to mark 1126 * the page clean when we return, causing the page to possibly revert 1127 * to all-zero's later. 1128 */ 1129 } 1130 1131 /* 1132 * swap_pager_putpages: 1133 * 1134 * Assign swap (if necessary) and initiate I/O on the specified pages. 1135 * 1136 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 1137 * are automatically converted to SWAP objects. 1138 * 1139 * In a low memory situation we may block in VOP_STRATEGY(), but the new 1140 * vm_page reservation system coupled with properly written VFS devices 1141 * should ensure that no low-memory deadlock occurs. This is an area 1142 * which needs work. 1143 * 1144 * The parent has N vm_object_pip_add() references prior to 1145 * calling us and will remove references for rtvals[] that are 1146 * not set to VM_PAGER_PEND. We need to remove the rest on I/O 1147 * completion. 1148 * 1149 * The parent has soft-busy'd the pages it passes us and will unbusy 1150 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 1151 * We need to unbusy the rest on I/O completion. 1152 */ 1153 void 1154 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count, 1155 boolean_t sync, int *rtvals) 1156 { 1157 int i; 1158 int n = 0; 1159 1160 GIANT_REQUIRED; 1161 if (count && m[0]->object != object) { 1162 panic("swap_pager_getpages: object mismatch %p/%p", 1163 object, 1164 m[0]->object 1165 ); 1166 } 1167 1168 /* 1169 * Step 1 1170 * 1171 * Turn object into OBJT_SWAP 1172 * check for bogus sysops 1173 * force sync if not pageout process 1174 */ 1175 if (object->type != OBJT_SWAP) 1176 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 1177 VM_OBJECT_UNLOCK(object); 1178 1179 if (curproc != pageproc) 1180 sync = TRUE; 1181 1182 /* 1183 * Step 2 1184 * 1185 * Update nsw parameters from swap_async_max sysctl values. 1186 * Do not let the sysop crash the machine with bogus numbers. 1187 */ 1188 mtx_lock(&pbuf_mtx); 1189 if (swap_async_max != nsw_wcount_async_max) { 1190 int n; 1191 1192 /* 1193 * limit range 1194 */ 1195 if ((n = swap_async_max) > nswbuf / 2) 1196 n = nswbuf / 2; 1197 if (n < 1) 1198 n = 1; 1199 swap_async_max = n; 1200 1201 /* 1202 * Adjust difference ( if possible ). If the current async 1203 * count is too low, we may not be able to make the adjustment 1204 * at this time. 1205 */ 1206 n -= nsw_wcount_async_max; 1207 if (nsw_wcount_async + n >= 0) { 1208 nsw_wcount_async += n; 1209 nsw_wcount_async_max += n; 1210 wakeup(&nsw_wcount_async); 1211 } 1212 } 1213 mtx_unlock(&pbuf_mtx); 1214 1215 /* 1216 * Step 3 1217 * 1218 * Assign swap blocks and issue I/O. We reallocate swap on the fly. 1219 * The page is left dirty until the pageout operation completes 1220 * successfully. 1221 */ 1222 for (i = 0; i < count; i += n) { 1223 int j; 1224 struct buf *bp; 1225 daddr_t blk; 1226 1227 /* 1228 * Maximum I/O size is limited by a number of factors. 1229 */ 1230 n = min(BLIST_MAX_ALLOC, count - i); 1231 n = min(n, nsw_cluster_max); 1232 1233 /* 1234 * Get biggest block of swap we can. If we fail, fall 1235 * back and try to allocate a smaller block. Don't go 1236 * overboard trying to allocate space if it would overly 1237 * fragment swap. 1238 */ 1239 while ( 1240 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE && 1241 n > 4 1242 ) { 1243 n >>= 1; 1244 } 1245 if (blk == SWAPBLK_NONE) { 1246 for (j = 0; j < n; ++j) 1247 rtvals[i+j] = VM_PAGER_FAIL; 1248 continue; 1249 } 1250 1251 /* 1252 * All I/O parameters have been satisfied, build the I/O 1253 * request and assign the swap space. 1254 */ 1255 if (sync == TRUE) { 1256 bp = getpbuf(&nsw_wcount_sync); 1257 } else { 1258 bp = getpbuf(&nsw_wcount_async); 1259 bp->b_flags = B_ASYNC; 1260 } 1261 bp->b_flags |= B_PAGING; 1262 bp->b_iocmd = BIO_WRITE; 1263 1264 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n); 1265 1266 bp->b_rcred = crhold(thread0.td_ucred); 1267 bp->b_wcred = crhold(thread0.td_ucred); 1268 bp->b_bcount = PAGE_SIZE * n; 1269 bp->b_bufsize = PAGE_SIZE * n; 1270 bp->b_blkno = blk; 1271 1272 VM_OBJECT_LOCK(object); 1273 for (j = 0; j < n; ++j) { 1274 vm_page_t mreq = m[i+j]; 1275 1276 swp_pager_meta_build( 1277 mreq->object, 1278 mreq->pindex, 1279 blk + j 1280 ); 1281 vm_page_dirty(mreq); 1282 rtvals[i+j] = VM_PAGER_OK; 1283 1284 mreq->oflags |= VPO_SWAPINPROG; 1285 bp->b_pages[j] = mreq; 1286 } 1287 VM_OBJECT_UNLOCK(object); 1288 bp->b_npages = n; 1289 /* 1290 * Must set dirty range for NFS to work. 1291 */ 1292 bp->b_dirtyoff = 0; 1293 bp->b_dirtyend = bp->b_bcount; 1294 1295 cnt.v_swapout++; 1296 cnt.v_swappgsout += bp->b_npages; 1297 1298 /* 1299 * asynchronous 1300 * 1301 * NOTE: b_blkno is destroyed by the call to swapdev_strategy 1302 */ 1303 if (sync == FALSE) { 1304 bp->b_iodone = swp_pager_async_iodone; 1305 BUF_KERNPROC(bp); 1306 swp_pager_strategy(bp); 1307 1308 for (j = 0; j < n; ++j) 1309 rtvals[i+j] = VM_PAGER_PEND; 1310 /* restart outter loop */ 1311 continue; 1312 } 1313 1314 /* 1315 * synchronous 1316 * 1317 * NOTE: b_blkno is destroyed by the call to swapdev_strategy 1318 */ 1319 bp->b_iodone = bdone; 1320 swp_pager_strategy(bp); 1321 1322 /* 1323 * Wait for the sync I/O to complete, then update rtvals. 1324 * We just set the rtvals[] to VM_PAGER_PEND so we can call 1325 * our async completion routine at the end, thus avoiding a 1326 * double-free. 1327 */ 1328 bwait(bp, PVM, "swwrt"); 1329 for (j = 0; j < n; ++j) 1330 rtvals[i+j] = VM_PAGER_PEND; 1331 /* 1332 * Now that we are through with the bp, we can call the 1333 * normal async completion, which frees everything up. 1334 */ 1335 swp_pager_async_iodone(bp); 1336 } 1337 VM_OBJECT_LOCK(object); 1338 } 1339 1340 /* 1341 * swp_pager_async_iodone: 1342 * 1343 * Completion routine for asynchronous reads and writes from/to swap. 1344 * Also called manually by synchronous code to finish up a bp. 1345 * 1346 * For READ operations, the pages are PG_BUSY'd. For WRITE operations, 1347 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY 1348 * unbusy all pages except the 'main' request page. For WRITE 1349 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 1350 * because we marked them all VM_PAGER_PEND on return from putpages ). 1351 * 1352 * This routine may not block. 1353 * This routine is called at splbio() or better 1354 * 1355 * We up ourselves to splvm() as required for various vm_page related 1356 * calls. 1357 */ 1358 static void 1359 swp_pager_async_iodone(struct buf *bp) 1360 { 1361 int i; 1362 vm_object_t object = NULL; 1363 1364 /* 1365 * report error 1366 */ 1367 if (bp->b_ioflags & BIO_ERROR) { 1368 printf( 1369 "swap_pager: I/O error - %s failed; blkno %ld," 1370 "size %ld, error %d\n", 1371 ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"), 1372 (long)bp->b_blkno, 1373 (long)bp->b_bcount, 1374 bp->b_error 1375 ); 1376 } 1377 1378 /* 1379 * remove the mapping for kernel virtual 1380 */ 1381 pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); 1382 1383 if (bp->b_npages) { 1384 object = bp->b_pages[0]->object; 1385 VM_OBJECT_LOCK(object); 1386 } 1387 vm_page_lock_queues(); 1388 /* 1389 * cleanup pages. If an error occurs writing to swap, we are in 1390 * very serious trouble. If it happens to be a disk error, though, 1391 * we may be able to recover by reassigning the swap later on. So 1392 * in this case we remove the m->swapblk assignment for the page 1393 * but do not free it in the rlist. The errornous block(s) are thus 1394 * never reallocated as swap. Redirty the page and continue. 1395 */ 1396 for (i = 0; i < bp->b_npages; ++i) { 1397 vm_page_t m = bp->b_pages[i]; 1398 1399 m->oflags &= ~VPO_SWAPINPROG; 1400 1401 if (bp->b_ioflags & BIO_ERROR) { 1402 /* 1403 * If an error occurs I'd love to throw the swapblk 1404 * away without freeing it back to swapspace, so it 1405 * can never be used again. But I can't from an 1406 * interrupt. 1407 */ 1408 if (bp->b_iocmd == BIO_READ) { 1409 /* 1410 * When reading, reqpage needs to stay 1411 * locked for the parent, but all other 1412 * pages can be freed. We still want to 1413 * wakeup the parent waiting on the page, 1414 * though. ( also: pg_reqpage can be -1 and 1415 * not match anything ). 1416 * 1417 * We have to wake specifically requested pages 1418 * up too because we cleared VPO_SWAPINPROG and 1419 * someone may be waiting for that. 1420 * 1421 * NOTE: for reads, m->dirty will probably 1422 * be overridden by the original caller of 1423 * getpages so don't play cute tricks here. 1424 */ 1425 m->valid = 0; 1426 if (i != bp->b_pager.pg_reqpage) 1427 vm_page_free(m); 1428 else 1429 vm_page_flash(m); 1430 /* 1431 * If i == bp->b_pager.pg_reqpage, do not wake 1432 * the page up. The caller needs to. 1433 */ 1434 } else { 1435 /* 1436 * If a write error occurs, reactivate page 1437 * so it doesn't clog the inactive list, 1438 * then finish the I/O. 1439 */ 1440 vm_page_dirty(m); 1441 vm_page_activate(m); 1442 vm_page_io_finish(m); 1443 } 1444 } else if (bp->b_iocmd == BIO_READ) { 1445 /* 1446 * For read success, clear dirty bits. Nobody should 1447 * have this page mapped but don't take any chances, 1448 * make sure the pmap modify bits are also cleared. 1449 * 1450 * NOTE: for reads, m->dirty will probably be 1451 * overridden by the original caller of getpages so 1452 * we cannot set them in order to free the underlying 1453 * swap in a low-swap situation. I don't think we'd 1454 * want to do that anyway, but it was an optimization 1455 * that existed in the old swapper for a time before 1456 * it got ripped out due to precisely this problem. 1457 * 1458 * If not the requested page then deactivate it. 1459 * 1460 * Note that the requested page, reqpage, is left 1461 * busied, but we still have to wake it up. The 1462 * other pages are released (unbusied) by 1463 * vm_page_wakeup(). We do not set reqpage's 1464 * valid bits here, it is up to the caller. 1465 */ 1466 pmap_clear_modify(m); 1467 m->valid = VM_PAGE_BITS_ALL; 1468 vm_page_undirty(m); 1469 1470 /* 1471 * We have to wake specifically requested pages 1472 * up too because we cleared VPO_SWAPINPROG and 1473 * could be waiting for it in getpages. However, 1474 * be sure to not unbusy getpages specifically 1475 * requested page - getpages expects it to be 1476 * left busy. 1477 */ 1478 if (i != bp->b_pager.pg_reqpage) { 1479 vm_page_deactivate(m); 1480 vm_page_wakeup(m); 1481 } else { 1482 vm_page_flash(m); 1483 } 1484 } else { 1485 /* 1486 * For write success, clear the modify and dirty 1487 * status, then finish the I/O ( which decrements the 1488 * busy count and possibly wakes waiter's up ). 1489 */ 1490 pmap_clear_modify(m); 1491 vm_page_undirty(m); 1492 vm_page_io_finish(m); 1493 if (vm_page_count_severe()) 1494 vm_page_try_to_cache(m); 1495 } 1496 } 1497 vm_page_unlock_queues(); 1498 1499 /* 1500 * adjust pip. NOTE: the original parent may still have its own 1501 * pip refs on the object. 1502 */ 1503 if (object != NULL) { 1504 vm_object_pip_wakeupn(object, bp->b_npages); 1505 VM_OBJECT_UNLOCK(object); 1506 } 1507 1508 /* 1509 * swapdev_strategy() manually sets b_vp and b_bufobj before calling 1510 * bstrategy(). Set them back to NULL now we're done with it, or we'll 1511 * trigger a KASSERT in relpbuf(). 1512 */ 1513 if (bp->b_vp) { 1514 bp->b_vp = NULL; 1515 bp->b_bufobj = NULL; 1516 } 1517 /* 1518 * release the physical I/O buffer 1519 */ 1520 relpbuf( 1521 bp, 1522 ((bp->b_iocmd == BIO_READ) ? &nsw_rcount : 1523 ((bp->b_flags & B_ASYNC) ? 1524 &nsw_wcount_async : 1525 &nsw_wcount_sync 1526 ) 1527 ) 1528 ); 1529 } 1530 1531 /* 1532 * swap_pager_isswapped: 1533 * 1534 * Return 1 if at least one page in the given object is paged 1535 * out to the given swap device. 1536 * 1537 * This routine may not block. 1538 */ 1539 int 1540 swap_pager_isswapped(vm_object_t object, struct swdevt *sp) 1541 { 1542 daddr_t index = 0; 1543 int bcount; 1544 int i; 1545 1546 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1547 if (object->type != OBJT_SWAP) 1548 return (0); 1549 1550 mtx_lock(&swhash_mtx); 1551 for (bcount = 0; bcount < object->un_pager.swp.swp_bcount; bcount++) { 1552 struct swblock *swap; 1553 1554 if ((swap = *swp_pager_hash(object, index)) != NULL) { 1555 for (i = 0; i < SWAP_META_PAGES; ++i) { 1556 if (swp_pager_isondev(swap->swb_pages[i], sp)) { 1557 mtx_unlock(&swhash_mtx); 1558 return (1); 1559 } 1560 } 1561 } 1562 index += SWAP_META_PAGES; 1563 if (index > 0x20000000) 1564 panic("swap_pager_isswapped: failed to locate all swap meta blocks"); 1565 } 1566 mtx_unlock(&swhash_mtx); 1567 return (0); 1568 } 1569 1570 /* 1571 * SWP_PAGER_FORCE_PAGEIN() - force a swap block to be paged in 1572 * 1573 * This routine dissociates the page at the given index within a 1574 * swap block from its backing store, paging it in if necessary. 1575 * If the page is paged in, it is placed in the inactive queue, 1576 * since it had its backing store ripped out from under it. 1577 * We also attempt to swap in all other pages in the swap block, 1578 * we only guarantee that the one at the specified index is 1579 * paged in. 1580 * 1581 * XXX - The code to page the whole block in doesn't work, so we 1582 * revert to the one-by-one behavior for now. Sigh. 1583 */ 1584 static inline void 1585 swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex) 1586 { 1587 vm_page_t m; 1588 1589 vm_object_pip_add(object, 1); 1590 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL|VM_ALLOC_RETRY); 1591 if (m->valid == VM_PAGE_BITS_ALL) { 1592 vm_object_pip_subtract(object, 1); 1593 vm_page_lock_queues(); 1594 vm_page_activate(m); 1595 vm_page_dirty(m); 1596 vm_page_wakeup(m); 1597 vm_page_unlock_queues(); 1598 vm_pager_page_unswapped(m); 1599 return; 1600 } 1601 1602 if (swap_pager_getpages(object, &m, 1, 0) != VM_PAGER_OK) 1603 panic("swap_pager_force_pagein: read from swap failed");/*XXX*/ 1604 vm_object_pip_subtract(object, 1); 1605 vm_page_lock_queues(); 1606 vm_page_dirty(m); 1607 vm_page_dontneed(m); 1608 vm_page_wakeup(m); 1609 vm_page_unlock_queues(); 1610 vm_pager_page_unswapped(m); 1611 } 1612 1613 /* 1614 * swap_pager_swapoff: 1615 * 1616 * Page in all of the pages that have been paged out to the 1617 * given device. The corresponding blocks in the bitmap must be 1618 * marked as allocated and the device must be flagged SW_CLOSING. 1619 * There may be no processes swapped out to the device. 1620 * 1621 * This routine may block. 1622 */ 1623 static void 1624 swap_pager_swapoff(struct swdevt *sp) 1625 { 1626 struct swblock *swap; 1627 int i, j, retries; 1628 1629 GIANT_REQUIRED; 1630 1631 retries = 0; 1632 full_rescan: 1633 mtx_lock(&swhash_mtx); 1634 for (i = 0; i <= swhash_mask; i++) { /* '<=' is correct here */ 1635 restart: 1636 for (swap = swhash[i]; swap != NULL; swap = swap->swb_hnext) { 1637 vm_object_t object = swap->swb_object; 1638 vm_pindex_t pindex = swap->swb_index; 1639 for (j = 0; j < SWAP_META_PAGES; ++j) { 1640 if (swp_pager_isondev(swap->swb_pages[j], sp)) { 1641 /* avoid deadlock */ 1642 if (!VM_OBJECT_TRYLOCK(object)) { 1643 break; 1644 } else { 1645 mtx_unlock(&swhash_mtx); 1646 swp_pager_force_pagein(object, 1647 pindex + j); 1648 VM_OBJECT_UNLOCK(object); 1649 mtx_lock(&swhash_mtx); 1650 goto restart; 1651 } 1652 } 1653 } 1654 } 1655 } 1656 mtx_unlock(&swhash_mtx); 1657 if (sp->sw_used) { 1658 int dummy; 1659 /* 1660 * Objects may be locked or paging to the device being 1661 * removed, so we will miss their pages and need to 1662 * make another pass. We have marked this device as 1663 * SW_CLOSING, so the activity should finish soon. 1664 */ 1665 retries++; 1666 if (retries > 100) { 1667 panic("swapoff: failed to locate %d swap blocks", 1668 sp->sw_used); 1669 } 1670 tsleep(&dummy, PVM, "swpoff", hz / 20); 1671 goto full_rescan; 1672 } 1673 } 1674 1675 /************************************************************************ 1676 * SWAP META DATA * 1677 ************************************************************************ 1678 * 1679 * These routines manipulate the swap metadata stored in the 1680 * OBJT_SWAP object. All swp_*() routines must be called at 1681 * splvm() because swap can be freed up by the low level vm_page 1682 * code which might be called from interrupts beyond what splbio() covers. 1683 * 1684 * Swap metadata is implemented with a global hash and not directly 1685 * linked into the object. Instead the object simply contains 1686 * appropriate tracking counters. 1687 */ 1688 1689 /* 1690 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 1691 * 1692 * We first convert the object to a swap object if it is a default 1693 * object. 1694 * 1695 * The specified swapblk is added to the object's swap metadata. If 1696 * the swapblk is not valid, it is freed instead. Any previously 1697 * assigned swapblk is freed. 1698 * 1699 * This routine must be called at splvm(), except when used to convert 1700 * an OBJT_DEFAULT object into an OBJT_SWAP object. 1701 */ 1702 static void 1703 swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk) 1704 { 1705 struct swblock *swap; 1706 struct swblock **pswap; 1707 int idx; 1708 1709 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1710 /* 1711 * Convert default object to swap object if necessary 1712 */ 1713 if (object->type != OBJT_SWAP) { 1714 object->type = OBJT_SWAP; 1715 object->un_pager.swp.swp_bcount = 0; 1716 1717 if (object->handle != NULL) { 1718 mtx_lock(&sw_alloc_mtx); 1719 TAILQ_INSERT_TAIL( 1720 NOBJLIST(object->handle), 1721 object, 1722 pager_object_list 1723 ); 1724 mtx_unlock(&sw_alloc_mtx); 1725 } 1726 } 1727 1728 /* 1729 * Locate hash entry. If not found create, but if we aren't adding 1730 * anything just return. If we run out of space in the map we wait 1731 * and, since the hash table may have changed, retry. 1732 */ 1733 retry: 1734 mtx_lock(&swhash_mtx); 1735 pswap = swp_pager_hash(object, pindex); 1736 1737 if ((swap = *pswap) == NULL) { 1738 int i; 1739 1740 if (swapblk == SWAPBLK_NONE) 1741 goto done; 1742 1743 swap = *pswap = uma_zalloc(swap_zone, M_NOWAIT); 1744 if (swap == NULL) { 1745 mtx_unlock(&swhash_mtx); 1746 VM_OBJECT_UNLOCK(object); 1747 VM_WAIT; 1748 VM_OBJECT_LOCK(object); 1749 goto retry; 1750 } 1751 1752 swap->swb_hnext = NULL; 1753 swap->swb_object = object; 1754 swap->swb_index = pindex & ~(vm_pindex_t)SWAP_META_MASK; 1755 swap->swb_count = 0; 1756 1757 ++object->un_pager.swp.swp_bcount; 1758 1759 for (i = 0; i < SWAP_META_PAGES; ++i) 1760 swap->swb_pages[i] = SWAPBLK_NONE; 1761 } 1762 1763 /* 1764 * Delete prior contents of metadata 1765 */ 1766 idx = pindex & SWAP_META_MASK; 1767 1768 if (swap->swb_pages[idx] != SWAPBLK_NONE) { 1769 swp_pager_freeswapspace(swap->swb_pages[idx], 1); 1770 --swap->swb_count; 1771 } 1772 1773 /* 1774 * Enter block into metadata 1775 */ 1776 swap->swb_pages[idx] = swapblk; 1777 if (swapblk != SWAPBLK_NONE) 1778 ++swap->swb_count; 1779 done: 1780 mtx_unlock(&swhash_mtx); 1781 } 1782 1783 /* 1784 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 1785 * 1786 * The requested range of blocks is freed, with any associated swap 1787 * returned to the swap bitmap. 1788 * 1789 * This routine will free swap metadata structures as they are cleaned 1790 * out. This routine does *NOT* operate on swap metadata associated 1791 * with resident pages. 1792 * 1793 * This routine must be called at splvm() 1794 */ 1795 static void 1796 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count) 1797 { 1798 1799 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1800 if (object->type != OBJT_SWAP) 1801 return; 1802 1803 while (count > 0) { 1804 struct swblock **pswap; 1805 struct swblock *swap; 1806 1807 mtx_lock(&swhash_mtx); 1808 pswap = swp_pager_hash(object, index); 1809 1810 if ((swap = *pswap) != NULL) { 1811 daddr_t v = swap->swb_pages[index & SWAP_META_MASK]; 1812 1813 if (v != SWAPBLK_NONE) { 1814 swp_pager_freeswapspace(v, 1); 1815 swap->swb_pages[index & SWAP_META_MASK] = 1816 SWAPBLK_NONE; 1817 if (--swap->swb_count == 0) { 1818 *pswap = swap->swb_hnext; 1819 uma_zfree(swap_zone, swap); 1820 --object->un_pager.swp.swp_bcount; 1821 } 1822 } 1823 --count; 1824 ++index; 1825 } else { 1826 int n = SWAP_META_PAGES - (index & SWAP_META_MASK); 1827 count -= n; 1828 index += n; 1829 } 1830 mtx_unlock(&swhash_mtx); 1831 } 1832 } 1833 1834 /* 1835 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 1836 * 1837 * This routine locates and destroys all swap metadata associated with 1838 * an object. 1839 * 1840 * This routine must be called at splvm() 1841 */ 1842 static void 1843 swp_pager_meta_free_all(vm_object_t object) 1844 { 1845 daddr_t index = 0; 1846 1847 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1848 if (object->type != OBJT_SWAP) 1849 return; 1850 1851 while (object->un_pager.swp.swp_bcount) { 1852 struct swblock **pswap; 1853 struct swblock *swap; 1854 1855 mtx_lock(&swhash_mtx); 1856 pswap = swp_pager_hash(object, index); 1857 if ((swap = *pswap) != NULL) { 1858 int i; 1859 1860 for (i = 0; i < SWAP_META_PAGES; ++i) { 1861 daddr_t v = swap->swb_pages[i]; 1862 if (v != SWAPBLK_NONE) { 1863 --swap->swb_count; 1864 swp_pager_freeswapspace(v, 1); 1865 } 1866 } 1867 if (swap->swb_count != 0) 1868 panic("swap_pager_meta_free_all: swb_count != 0"); 1869 *pswap = swap->swb_hnext; 1870 uma_zfree(swap_zone, swap); 1871 --object->un_pager.swp.swp_bcount; 1872 } 1873 mtx_unlock(&swhash_mtx); 1874 index += SWAP_META_PAGES; 1875 if (index > 0x20000000) 1876 panic("swp_pager_meta_free_all: failed to locate all swap meta blocks"); 1877 } 1878 } 1879 1880 /* 1881 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 1882 * 1883 * This routine is capable of looking up, popping, or freeing 1884 * swapblk assignments in the swap meta data or in the vm_page_t. 1885 * The routine typically returns the swapblk being looked-up, or popped, 1886 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 1887 * was invalid. This routine will automatically free any invalid 1888 * meta-data swapblks. 1889 * 1890 * It is not possible to store invalid swapblks in the swap meta data 1891 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 1892 * 1893 * When acting on a busy resident page and paging is in progress, we 1894 * have to wait until paging is complete but otherwise can act on the 1895 * busy page. 1896 * 1897 * This routine must be called at splvm(). 1898 * 1899 * SWM_FREE remove and free swap block from metadata 1900 * SWM_POP remove from meta data but do not free.. pop it out 1901 */ 1902 static daddr_t 1903 swp_pager_meta_ctl(vm_object_t object, vm_pindex_t pindex, int flags) 1904 { 1905 struct swblock **pswap; 1906 struct swblock *swap; 1907 daddr_t r1; 1908 int idx; 1909 1910 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1911 /* 1912 * The meta data only exists of the object is OBJT_SWAP 1913 * and even then might not be allocated yet. 1914 */ 1915 if (object->type != OBJT_SWAP) 1916 return (SWAPBLK_NONE); 1917 1918 r1 = SWAPBLK_NONE; 1919 mtx_lock(&swhash_mtx); 1920 pswap = swp_pager_hash(object, pindex); 1921 1922 if ((swap = *pswap) != NULL) { 1923 idx = pindex & SWAP_META_MASK; 1924 r1 = swap->swb_pages[idx]; 1925 1926 if (r1 != SWAPBLK_NONE) { 1927 if (flags & SWM_FREE) { 1928 swp_pager_freeswapspace(r1, 1); 1929 r1 = SWAPBLK_NONE; 1930 } 1931 if (flags & (SWM_FREE|SWM_POP)) { 1932 swap->swb_pages[idx] = SWAPBLK_NONE; 1933 if (--swap->swb_count == 0) { 1934 *pswap = swap->swb_hnext; 1935 uma_zfree(swap_zone, swap); 1936 --object->un_pager.swp.swp_bcount; 1937 } 1938 } 1939 } 1940 } 1941 mtx_unlock(&swhash_mtx); 1942 return (r1); 1943 } 1944 1945 /* 1946 * System call swapon(name) enables swapping on device name, 1947 * which must be in the swdevsw. Return EBUSY 1948 * if already swapping on this device. 1949 */ 1950 #ifndef _SYS_SYSPROTO_H_ 1951 struct swapon_args { 1952 char *name; 1953 }; 1954 #endif 1955 1956 /* 1957 * MPSAFE 1958 */ 1959 /* ARGSUSED */ 1960 int 1961 swapon(struct thread *td, struct swapon_args *uap) 1962 { 1963 struct vattr attr; 1964 struct vnode *vp; 1965 struct nameidata nd; 1966 int error; 1967 1968 mtx_lock(&Giant); 1969 error = suser(td); 1970 if (error) 1971 goto done2; 1972 1973 while (swdev_syscall_active) 1974 tsleep(&swdev_syscall_active, PUSER - 1, "swpon", 0); 1975 swdev_syscall_active = 1; 1976 1977 /* 1978 * Swap metadata may not fit in the KVM if we have physical 1979 * memory of >1GB. 1980 */ 1981 if (swap_zone == NULL) { 1982 error = ENOMEM; 1983 goto done; 1984 } 1985 1986 NDINIT(&nd, LOOKUP, ISOPEN | FOLLOW, UIO_USERSPACE, uap->name, td); 1987 error = namei(&nd); 1988 if (error) 1989 goto done; 1990 1991 NDFREE(&nd, NDF_ONLY_PNBUF); 1992 vp = nd.ni_vp; 1993 1994 if (vn_isdisk(vp, &error)) { 1995 error = swapongeom(td, vp); 1996 } else if (vp->v_type == VREG && 1997 (vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 && 1998 (error = VOP_GETATTR(vp, &attr, td->td_ucred, td)) == 0) { 1999 /* 2000 * Allow direct swapping to NFS regular files in the same 2001 * way that nfs_mountroot() sets up diskless swapping. 2002 */ 2003 error = swaponvp(td, vp, attr.va_size / DEV_BSIZE); 2004 } 2005 2006 if (error) 2007 vrele(vp); 2008 done: 2009 swdev_syscall_active = 0; 2010 wakeup_one(&swdev_syscall_active); 2011 done2: 2012 mtx_unlock(&Giant); 2013 return (error); 2014 } 2015 2016 static void 2017 swaponsomething(struct vnode *vp, void *id, u_long nblks, sw_strategy_t *strategy, sw_close_t *close, dev_t dev) 2018 { 2019 struct swdevt *sp, *tsp; 2020 swblk_t dvbase; 2021 u_long mblocks; 2022 2023 /* 2024 * If we go beyond this, we get overflows in the radix 2025 * tree bitmap code. 2026 */ 2027 mblocks = 0x40000000 / BLIST_META_RADIX; 2028 if (nblks > mblocks) { 2029 printf("WARNING: reducing size to maximum of %lu blocks per swap unit\n", 2030 mblocks); 2031 nblks = mblocks; 2032 } 2033 /* 2034 * nblks is in DEV_BSIZE'd chunks, convert to PAGE_SIZE'd chunks. 2035 * First chop nblks off to page-align it, then convert. 2036 * 2037 * sw->sw_nblks is in page-sized chunks now too. 2038 */ 2039 nblks &= ~(ctodb(1) - 1); 2040 nblks = dbtoc(nblks); 2041 2042 sp = malloc(sizeof *sp, M_VMPGDATA, M_WAITOK | M_ZERO); 2043 sp->sw_vp = vp; 2044 sp->sw_id = id; 2045 sp->sw_dev = dev; 2046 sp->sw_flags = 0; 2047 sp->sw_nblks = nblks; 2048 sp->sw_used = 0; 2049 sp->sw_strategy = strategy; 2050 sp->sw_close = close; 2051 2052 sp->sw_blist = blist_create(nblks); 2053 /* 2054 * Do not free the first two block in order to avoid overwriting 2055 * any bsd label at the front of the partition 2056 */ 2057 blist_free(sp->sw_blist, 2, nblks - 2); 2058 2059 dvbase = 0; 2060 mtx_lock(&sw_dev_mtx); 2061 TAILQ_FOREACH(tsp, &swtailq, sw_list) { 2062 if (tsp->sw_end >= dvbase) { 2063 /* 2064 * We put one uncovered page between the devices 2065 * in order to definitively prevent any cross-device 2066 * I/O requests 2067 */ 2068 dvbase = tsp->sw_end + 1; 2069 } 2070 } 2071 sp->sw_first = dvbase; 2072 sp->sw_end = dvbase + nblks; 2073 TAILQ_INSERT_TAIL(&swtailq, sp, sw_list); 2074 nswapdev++; 2075 swap_pager_avail += nblks; 2076 swp_sizecheck(); 2077 mtx_unlock(&sw_dev_mtx); 2078 } 2079 2080 /* 2081 * SYSCALL: swapoff(devname) 2082 * 2083 * Disable swapping on the given device. 2084 * 2085 * XXX: Badly designed system call: it should use a device index 2086 * rather than filename as specification. We keep sw_vp around 2087 * only to make this work. 2088 */ 2089 #ifndef _SYS_SYSPROTO_H_ 2090 struct swapoff_args { 2091 char *name; 2092 }; 2093 #endif 2094 2095 /* 2096 * MPSAFE 2097 */ 2098 /* ARGSUSED */ 2099 int 2100 swapoff(struct thread *td, struct swapoff_args *uap) 2101 { 2102 struct vnode *vp; 2103 struct nameidata nd; 2104 struct swdevt *sp; 2105 int error; 2106 2107 error = suser(td); 2108 if (error) 2109 return (error); 2110 2111 mtx_lock(&Giant); 2112 while (swdev_syscall_active) 2113 tsleep(&swdev_syscall_active, PUSER - 1, "swpoff", 0); 2114 swdev_syscall_active = 1; 2115 2116 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->name, td); 2117 error = namei(&nd); 2118 if (error) 2119 goto done; 2120 NDFREE(&nd, NDF_ONLY_PNBUF); 2121 vp = nd.ni_vp; 2122 2123 mtx_lock(&sw_dev_mtx); 2124 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2125 if (sp->sw_vp == vp) 2126 break; 2127 } 2128 mtx_unlock(&sw_dev_mtx); 2129 if (sp == NULL) { 2130 error = EINVAL; 2131 goto done; 2132 } 2133 error = swapoff_one(sp, td); 2134 done: 2135 swdev_syscall_active = 0; 2136 wakeup_one(&swdev_syscall_active); 2137 mtx_unlock(&Giant); 2138 return (error); 2139 } 2140 2141 static int 2142 swapoff_one(struct swdevt *sp, struct thread *td) 2143 { 2144 u_long nblks, dvbase; 2145 #ifdef MAC 2146 int error; 2147 #endif 2148 2149 mtx_assert(&Giant, MA_OWNED); 2150 #ifdef MAC 2151 (void) vn_lock(sp->sw_vp, LK_EXCLUSIVE | LK_RETRY, td); 2152 error = mac_check_system_swapoff(td->td_ucred, sp->sw_vp); 2153 (void) VOP_UNLOCK(sp->sw_vp, 0, td); 2154 if (error != 0) 2155 return (error); 2156 #endif 2157 nblks = sp->sw_nblks; 2158 2159 /* 2160 * We can turn off this swap device safely only if the 2161 * available virtual memory in the system will fit the amount 2162 * of data we will have to page back in, plus an epsilon so 2163 * the system doesn't become critically low on swap space. 2164 */ 2165 if (cnt.v_free_count + cnt.v_cache_count + swap_pager_avail < 2166 nblks + nswap_lowat) { 2167 return (ENOMEM); 2168 } 2169 2170 /* 2171 * Prevent further allocations on this device. 2172 */ 2173 mtx_lock(&sw_dev_mtx); 2174 sp->sw_flags |= SW_CLOSING; 2175 for (dvbase = 0; dvbase < sp->sw_end; dvbase += dmmax) { 2176 swap_pager_avail -= blist_fill(sp->sw_blist, 2177 dvbase, dmmax); 2178 } 2179 mtx_unlock(&sw_dev_mtx); 2180 2181 /* 2182 * Page in the contents of the device and close it. 2183 */ 2184 swap_pager_swapoff(sp); 2185 2186 sp->sw_close(td, sp); 2187 sp->sw_id = NULL; 2188 mtx_lock(&sw_dev_mtx); 2189 TAILQ_REMOVE(&swtailq, sp, sw_list); 2190 nswapdev--; 2191 if (nswapdev == 0) { 2192 swap_pager_full = 2; 2193 swap_pager_almost_full = 1; 2194 } 2195 if (swdevhd == sp) 2196 swdevhd = NULL; 2197 mtx_unlock(&sw_dev_mtx); 2198 blist_destroy(sp->sw_blist); 2199 free(sp, M_VMPGDATA); 2200 return (0); 2201 } 2202 2203 void 2204 swapoff_all(void) 2205 { 2206 struct swdevt *sp, *spt; 2207 const char *devname; 2208 int error; 2209 2210 mtx_lock(&Giant); 2211 while (swdev_syscall_active) 2212 tsleep(&swdev_syscall_active, PUSER - 1, "swpoff", 0); 2213 swdev_syscall_active = 1; 2214 2215 mtx_lock(&sw_dev_mtx); 2216 TAILQ_FOREACH_SAFE(sp, &swtailq, sw_list, spt) { 2217 mtx_unlock(&sw_dev_mtx); 2218 if (vn_isdisk(sp->sw_vp, NULL)) 2219 devname = sp->sw_vp->v_rdev->si_name; 2220 else 2221 devname = "[file]"; 2222 error = swapoff_one(sp, &thread0); 2223 if (error != 0) { 2224 printf("Cannot remove swap device %s (error=%d), " 2225 "skipping.\n", devname, error); 2226 } else if (bootverbose) { 2227 printf("Swap device %s removed.\n", devname); 2228 } 2229 mtx_lock(&sw_dev_mtx); 2230 } 2231 mtx_unlock(&sw_dev_mtx); 2232 2233 swdev_syscall_active = 0; 2234 wakeup_one(&swdev_syscall_active); 2235 mtx_unlock(&Giant); 2236 } 2237 2238 void 2239 swap_pager_status(int *total, int *used) 2240 { 2241 struct swdevt *sp; 2242 2243 *total = 0; 2244 *used = 0; 2245 mtx_lock(&sw_dev_mtx); 2246 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2247 *total += sp->sw_nblks; 2248 *used += sp->sw_used; 2249 } 2250 mtx_unlock(&sw_dev_mtx); 2251 } 2252 2253 static int 2254 sysctl_vm_swap_info(SYSCTL_HANDLER_ARGS) 2255 { 2256 int *name = (int *)arg1; 2257 int error, n; 2258 struct xswdev xs; 2259 struct swdevt *sp; 2260 2261 if (arg2 != 1) /* name length */ 2262 return (EINVAL); 2263 2264 n = 0; 2265 mtx_lock(&sw_dev_mtx); 2266 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2267 if (n == *name) { 2268 mtx_unlock(&sw_dev_mtx); 2269 xs.xsw_version = XSWDEV_VERSION; 2270 xs.xsw_dev = sp->sw_dev; 2271 xs.xsw_flags = sp->sw_flags; 2272 xs.xsw_nblks = sp->sw_nblks; 2273 xs.xsw_used = sp->sw_used; 2274 2275 error = SYSCTL_OUT(req, &xs, sizeof(xs)); 2276 return (error); 2277 } 2278 n++; 2279 } 2280 mtx_unlock(&sw_dev_mtx); 2281 return (ENOENT); 2282 } 2283 2284 SYSCTL_INT(_vm, OID_AUTO, nswapdev, CTLFLAG_RD, &nswapdev, 0, 2285 "Number of swap devices"); 2286 SYSCTL_NODE(_vm, OID_AUTO, swap_info, CTLFLAG_RD, sysctl_vm_swap_info, 2287 "Swap statistics by device"); 2288 2289 /* 2290 * vmspace_swap_count() - count the approximate swap useage in pages for a 2291 * vmspace. 2292 * 2293 * The map must be locked. 2294 * 2295 * Swap useage is determined by taking the proportional swap used by 2296 * VM objects backing the VM map. To make up for fractional losses, 2297 * if the VM object has any swap use at all the associated map entries 2298 * count for at least 1 swap page. 2299 */ 2300 int 2301 vmspace_swap_count(struct vmspace *vmspace) 2302 { 2303 vm_map_t map = &vmspace->vm_map; 2304 vm_map_entry_t cur; 2305 int count = 0; 2306 2307 for (cur = map->header.next; cur != &map->header; cur = cur->next) { 2308 vm_object_t object; 2309 2310 if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && 2311 (object = cur->object.vm_object) != NULL) { 2312 VM_OBJECT_LOCK(object); 2313 if (object->type == OBJT_SWAP && 2314 object->un_pager.swp.swp_bcount != 0) { 2315 int n = (cur->end - cur->start) / PAGE_SIZE; 2316 2317 count += object->un_pager.swp.swp_bcount * 2318 SWAP_META_PAGES * n / object->size + 1; 2319 } 2320 VM_OBJECT_UNLOCK(object); 2321 } 2322 } 2323 return (count); 2324 } 2325 2326 /* 2327 * GEOM backend 2328 * 2329 * Swapping onto disk devices. 2330 * 2331 */ 2332 2333 static g_orphan_t swapgeom_orphan; 2334 2335 static struct g_class g_swap_class = { 2336 .name = "SWAP", 2337 .version = G_VERSION, 2338 .orphan = swapgeom_orphan, 2339 }; 2340 2341 DECLARE_GEOM_CLASS(g_swap_class, g_class); 2342 2343 2344 static void 2345 swapgeom_done(struct bio *bp2) 2346 { 2347 struct buf *bp; 2348 2349 bp = bp2->bio_caller2; 2350 bp->b_ioflags = bp2->bio_flags; 2351 if (bp2->bio_error) 2352 bp->b_ioflags |= BIO_ERROR; 2353 bp->b_resid = bp->b_bcount - bp2->bio_completed; 2354 bp->b_error = bp2->bio_error; 2355 bufdone(bp); 2356 g_destroy_bio(bp2); 2357 } 2358 2359 static void 2360 swapgeom_strategy(struct buf *bp, struct swdevt *sp) 2361 { 2362 struct bio *bio; 2363 struct g_consumer *cp; 2364 2365 cp = sp->sw_id; 2366 if (cp == NULL) { 2367 bp->b_error = ENXIO; 2368 bp->b_ioflags |= BIO_ERROR; 2369 bufdone(bp); 2370 return; 2371 } 2372 bio = g_alloc_bio(); 2373 #if 0 2374 /* 2375 * XXX: We shouldn't really sleep here when we run out of buffers 2376 * XXX: but the alternative is worse right now. 2377 */ 2378 if (bio == NULL) { 2379 bp->b_error = ENOMEM; 2380 bp->b_ioflags |= BIO_ERROR; 2381 bufdone(bp); 2382 return; 2383 } 2384 #endif 2385 bio->bio_caller2 = bp; 2386 bio->bio_cmd = bp->b_iocmd; 2387 bio->bio_data = bp->b_data; 2388 bio->bio_offset = (bp->b_blkno - sp->sw_first) * PAGE_SIZE; 2389 bio->bio_length = bp->b_bcount; 2390 bio->bio_done = swapgeom_done; 2391 g_io_request(bio, cp); 2392 return; 2393 } 2394 2395 static void 2396 swapgeom_orphan(struct g_consumer *cp) 2397 { 2398 struct swdevt *sp; 2399 2400 mtx_lock(&sw_dev_mtx); 2401 TAILQ_FOREACH(sp, &swtailq, sw_list) 2402 if (sp->sw_id == cp) 2403 sp->sw_id = NULL; 2404 mtx_unlock(&sw_dev_mtx); 2405 } 2406 2407 static void 2408 swapgeom_close_ev(void *arg, int flags) 2409 { 2410 struct g_consumer *cp; 2411 2412 cp = arg; 2413 g_access(cp, -1, -1, 0); 2414 g_detach(cp); 2415 g_destroy_consumer(cp); 2416 } 2417 2418 static void 2419 swapgeom_close(struct thread *td, struct swdevt *sw) 2420 { 2421 2422 /* XXX: direct call when Giant untangled */ 2423 g_waitfor_event(swapgeom_close_ev, sw->sw_id, M_WAITOK, NULL); 2424 } 2425 2426 2427 struct swh0h0 { 2428 struct cdev *dev; 2429 struct vnode *vp; 2430 int error; 2431 }; 2432 2433 static void 2434 swapongeom_ev(void *arg, int flags) 2435 { 2436 struct swh0h0 *swh; 2437 struct g_provider *pp; 2438 struct g_consumer *cp; 2439 static struct g_geom *gp; 2440 struct swdevt *sp; 2441 u_long nblks; 2442 int error; 2443 2444 swh = arg; 2445 swh->error = 0; 2446 pp = g_dev_getprovider(swh->dev); 2447 if (pp == NULL) { 2448 swh->error = ENODEV; 2449 return; 2450 } 2451 mtx_lock(&sw_dev_mtx); 2452 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2453 cp = sp->sw_id; 2454 if (cp != NULL && cp->provider == pp) { 2455 mtx_unlock(&sw_dev_mtx); 2456 swh->error = EBUSY; 2457 return; 2458 } 2459 } 2460 mtx_unlock(&sw_dev_mtx); 2461 if (gp == NULL) 2462 gp = g_new_geomf(&g_swap_class, "swap", NULL); 2463 cp = g_new_consumer(gp); 2464 g_attach(cp, pp); 2465 /* 2466 * XXX: Everytime you think you can improve the margin for 2467 * footshooting, somebody depends on the ability to do so: 2468 * savecore(8) wants to write to our swapdev so we cannot 2469 * set an exclusive count :-( 2470 */ 2471 error = g_access(cp, 1, 1, 0); 2472 if (error) { 2473 g_detach(cp); 2474 g_destroy_consumer(cp); 2475 swh->error = error; 2476 return; 2477 } 2478 nblks = pp->mediasize / DEV_BSIZE; 2479 swaponsomething(swh->vp, cp, nblks, swapgeom_strategy, 2480 swapgeom_close, dev2udev(swh->dev)); 2481 swh->error = 0; 2482 return; 2483 } 2484 2485 static int 2486 swapongeom(struct thread *td, struct vnode *vp) 2487 { 2488 int error; 2489 struct swh0h0 swh; 2490 2491 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 2492 2493 swh.dev = vp->v_rdev; 2494 swh.vp = vp; 2495 swh.error = 0; 2496 /* XXX: direct call when Giant untangled */ 2497 error = g_waitfor_event(swapongeom_ev, &swh, M_WAITOK, NULL); 2498 if (!error) 2499 error = swh.error; 2500 VOP_UNLOCK(vp, 0, td); 2501 return (error); 2502 } 2503 2504 /* 2505 * VNODE backend 2506 * 2507 * This is used mainly for network filesystem (read: probably only tested 2508 * with NFS) swapfiles. 2509 * 2510 */ 2511 2512 static void 2513 swapdev_strategy(struct buf *bp, struct swdevt *sp) 2514 { 2515 struct vnode *vp2; 2516 2517 bp->b_blkno = ctodb(bp->b_blkno - sp->sw_first); 2518 2519 vp2 = sp->sw_id; 2520 vhold(vp2); 2521 if (bp->b_iocmd == BIO_WRITE) { 2522 if (bp->b_bufobj) 2523 bufobj_wdrop(bp->b_bufobj); 2524 bufobj_wref(&vp2->v_bufobj); 2525 } 2526 if (bp->b_bufobj != &vp2->v_bufobj) 2527 bp->b_bufobj = &vp2->v_bufobj; 2528 bp->b_vp = vp2; 2529 bp->b_iooffset = dbtob(bp->b_blkno); 2530 bstrategy(bp); 2531 return; 2532 } 2533 2534 static void 2535 swapdev_close(struct thread *td, struct swdevt *sp) 2536 { 2537 2538 VOP_CLOSE(sp->sw_vp, FREAD | FWRITE, td->td_ucred, td); 2539 vrele(sp->sw_vp); 2540 } 2541 2542 2543 static int 2544 swaponvp(struct thread *td, struct vnode *vp, u_long nblks) 2545 { 2546 struct swdevt *sp; 2547 int error; 2548 2549 if (nblks == 0) 2550 return (ENXIO); 2551 mtx_lock(&sw_dev_mtx); 2552 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2553 if (sp->sw_id == vp) { 2554 mtx_unlock(&sw_dev_mtx); 2555 return (EBUSY); 2556 } 2557 } 2558 mtx_unlock(&sw_dev_mtx); 2559 2560 (void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 2561 #ifdef MAC 2562 error = mac_check_system_swapon(td->td_ucred, vp); 2563 if (error == 0) 2564 #endif 2565 error = VOP_OPEN(vp, FREAD | FWRITE, td->td_ucred, td, -1); 2566 (void) VOP_UNLOCK(vp, 0, td); 2567 if (error) 2568 return (error); 2569 2570 swaponsomething(vp, vp, nblks, swapdev_strategy, swapdev_close, 2571 NODEV); 2572 return (0); 2573 } 2574