1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 1998 Matthew Dillon, 5 * Copyright (c) 1994 John S. Dyson 6 * Copyright (c) 1990 University of Utah. 7 * Copyright (c) 1982, 1986, 1989, 1993 8 * The Regents of the University of California. All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * the Systems Programming Group of the University of Utah Computer 12 * Science Department. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. All advertising materials mentioning features or use of this software 23 * must display the following acknowledgement: 24 * This product includes software developed by the University of 25 * California, Berkeley and its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * New Swap System 43 * Matthew Dillon 44 * 45 * Radix Bitmap 'blists'. 46 * 47 * - The new swapper uses the new radix bitmap code. This should scale 48 * to arbitrarily small or arbitrarily large swap spaces and an almost 49 * arbitrary degree of fragmentation. 50 * 51 * Features: 52 * 53 * - on the fly reallocation of swap during putpages. The new system 54 * does not try to keep previously allocated swap blocks for dirty 55 * pages. 56 * 57 * - on the fly deallocation of swap 58 * 59 * - No more garbage collection required. Unnecessarily allocated swap 60 * blocks only exist for dirty vm_page_t's now and these are already 61 * cycled (in a high-load system) by the pager. We also do on-the-fly 62 * removal of invalidated swap blocks when a page is destroyed 63 * or renamed. 64 * 65 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 66 * 67 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 68 * @(#)vm_swap.c 8.5 (Berkeley) 2/17/94 69 */ 70 71 #include <sys/cdefs.h> 72 __FBSDID("$FreeBSD$"); 73 74 #include "opt_vm.h" 75 76 #include <sys/param.h> 77 #include <sys/bio.h> 78 #include <sys/blist.h> 79 #include <sys/buf.h> 80 #include <sys/conf.h> 81 #include <sys/disk.h> 82 #include <sys/eventhandler.h> 83 #include <sys/fcntl.h> 84 #include <sys/lock.h> 85 #include <sys/kernel.h> 86 #include <sys/mount.h> 87 #include <sys/namei.h> 88 #include <sys/malloc.h> 89 #include <sys/pctrie.h> 90 #include <sys/priv.h> 91 #include <sys/proc.h> 92 #include <sys/racct.h> 93 #include <sys/resource.h> 94 #include <sys/resourcevar.h> 95 #include <sys/rwlock.h> 96 #include <sys/sbuf.h> 97 #include <sys/sysctl.h> 98 #include <sys/sysproto.h> 99 #include <sys/systm.h> 100 #include <sys/sx.h> 101 #include <sys/vmmeter.h> 102 #include <sys/vnode.h> 103 104 #include <security/mac/mac_framework.h> 105 106 #include <vm/vm.h> 107 #include <vm/pmap.h> 108 #include <vm/vm_map.h> 109 #include <vm/vm_kern.h> 110 #include <vm/vm_object.h> 111 #include <vm/vm_page.h> 112 #include <vm/vm_pager.h> 113 #include <vm/vm_pageout.h> 114 #include <vm/vm_param.h> 115 #include <vm/swap_pager.h> 116 #include <vm/vm_extern.h> 117 #include <vm/uma.h> 118 119 #include <geom/geom.h> 120 121 /* 122 * MAX_PAGEOUT_CLUSTER must be a power of 2 between 1 and 64. 123 * The 64-page limit is due to the radix code (kern/subr_blist.c). 124 */ 125 #ifndef MAX_PAGEOUT_CLUSTER 126 #define MAX_PAGEOUT_CLUSTER 32 127 #endif 128 129 #if !defined(SWB_NPAGES) 130 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER 131 #endif 132 133 #define SWAP_META_PAGES PCTRIE_COUNT 134 135 /* 136 * A swblk structure maps each page index within a 137 * SWAP_META_PAGES-aligned and sized range to the address of an 138 * on-disk swap block (or SWAPBLK_NONE). The collection of these 139 * mappings for an entire vm object is implemented as a pc-trie. 140 */ 141 struct swblk { 142 vm_pindex_t p; 143 daddr_t d[SWAP_META_PAGES]; 144 }; 145 146 static MALLOC_DEFINE(M_VMPGDATA, "vm_pgdata", "swap pager private data"); 147 static struct mtx sw_dev_mtx; 148 static TAILQ_HEAD(, swdevt) swtailq = TAILQ_HEAD_INITIALIZER(swtailq); 149 static struct swdevt *swdevhd; /* Allocate from here next */ 150 static int nswapdev; /* Number of swap devices */ 151 int swap_pager_avail; 152 static struct sx swdev_syscall_lock; /* serialize swap(on|off) */ 153 154 static u_long swap_reserved; 155 static u_long swap_total; 156 static int sysctl_page_shift(SYSCTL_HANDLER_ARGS); 157 SYSCTL_PROC(_vm, OID_AUTO, swap_reserved, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, 158 &swap_reserved, 0, sysctl_page_shift, "A", 159 "Amount of swap storage needed to back all allocated anonymous memory."); 160 SYSCTL_PROC(_vm, OID_AUTO, swap_total, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, 161 &swap_total, 0, sysctl_page_shift, "A", 162 "Total amount of available swap storage."); 163 164 static int overcommit = 0; 165 SYSCTL_INT(_vm, VM_OVERCOMMIT, overcommit, CTLFLAG_RW, &overcommit, 0, 166 "Configure virtual memory overcommit behavior. See tuning(7) " 167 "for details."); 168 static unsigned long swzone; 169 SYSCTL_ULONG(_vm, OID_AUTO, swzone, CTLFLAG_RD, &swzone, 0, 170 "Actual size of swap metadata zone"); 171 static unsigned long swap_maxpages; 172 SYSCTL_ULONG(_vm, OID_AUTO, swap_maxpages, CTLFLAG_RD, &swap_maxpages, 0, 173 "Maximum amount of swap supported"); 174 175 /* bits from overcommit */ 176 #define SWAP_RESERVE_FORCE_ON (1 << 0) 177 #define SWAP_RESERVE_RLIMIT_ON (1 << 1) 178 #define SWAP_RESERVE_ALLOW_NONWIRED (1 << 2) 179 180 static int 181 sysctl_page_shift(SYSCTL_HANDLER_ARGS) 182 { 183 uint64_t newval; 184 u_long value = *(u_long *)arg1; 185 186 newval = ((uint64_t)value) << PAGE_SHIFT; 187 return (sysctl_handle_64(oidp, &newval, 0, req)); 188 } 189 190 int 191 swap_reserve(vm_ooffset_t incr) 192 { 193 194 return (swap_reserve_by_cred(incr, curthread->td_ucred)); 195 } 196 197 int 198 swap_reserve_by_cred(vm_ooffset_t incr, struct ucred *cred) 199 { 200 u_long r, s, prev, pincr; 201 int res, error; 202 static int curfail; 203 static struct timeval lastfail; 204 struct uidinfo *uip; 205 206 uip = cred->cr_ruidinfo; 207 208 KASSERT((incr & PAGE_MASK) == 0, ("%s: incr: %ju & PAGE_MASK", __func__, 209 (uintmax_t)incr)); 210 211 #ifdef RACCT 212 if (racct_enable) { 213 PROC_LOCK(curproc); 214 error = racct_add(curproc, RACCT_SWAP, incr); 215 PROC_UNLOCK(curproc); 216 if (error != 0) 217 return (0); 218 } 219 #endif 220 221 pincr = atop(incr); 222 res = 0; 223 prev = atomic_fetchadd_long(&swap_reserved, pincr); 224 r = prev + pincr; 225 if (overcommit & SWAP_RESERVE_ALLOW_NONWIRED) { 226 s = vm_cnt.v_page_count - vm_cnt.v_free_reserved - 227 vm_wire_count(); 228 } else 229 s = 0; 230 s += swap_total; 231 if ((overcommit & SWAP_RESERVE_FORCE_ON) == 0 || r <= s || 232 (error = priv_check(curthread, PRIV_VM_SWAP_NOQUOTA)) == 0) { 233 res = 1; 234 } else { 235 prev = atomic_fetchadd_long(&swap_reserved, -pincr); 236 if (prev < pincr) 237 panic("swap_reserved < incr on overcommit fail"); 238 } 239 if (res) { 240 prev = atomic_fetchadd_long(&uip->ui_vmsize, pincr); 241 if ((overcommit & SWAP_RESERVE_RLIMIT_ON) != 0 && 242 prev + pincr > lim_cur(curthread, RLIMIT_SWAP) && 243 priv_check(curthread, PRIV_VM_SWAP_NORLIMIT)) { 244 res = 0; 245 prev = atomic_fetchadd_long(&uip->ui_vmsize, -pincr); 246 if (prev < pincr) 247 panic("uip->ui_vmsize < incr on overcommit fail"); 248 } 249 } 250 if (!res && ppsratecheck(&lastfail, &curfail, 1)) { 251 printf("uid %d, pid %d: swap reservation for %jd bytes failed\n", 252 uip->ui_uid, curproc->p_pid, incr); 253 } 254 255 #ifdef RACCT 256 if (racct_enable && !res) { 257 PROC_LOCK(curproc); 258 racct_sub(curproc, RACCT_SWAP, incr); 259 PROC_UNLOCK(curproc); 260 } 261 #endif 262 263 return (res); 264 } 265 266 void 267 swap_reserve_force(vm_ooffset_t incr) 268 { 269 struct uidinfo *uip; 270 u_long pincr; 271 272 KASSERT((incr & PAGE_MASK) == 0, ("%s: incr: %ju & PAGE_MASK", __func__, 273 (uintmax_t)incr)); 274 275 PROC_LOCK(curproc); 276 #ifdef RACCT 277 if (racct_enable) 278 racct_add_force(curproc, RACCT_SWAP, incr); 279 #endif 280 pincr = atop(incr); 281 atomic_add_long(&swap_reserved, pincr); 282 uip = curproc->p_ucred->cr_ruidinfo; 283 atomic_add_long(&uip->ui_vmsize, pincr); 284 PROC_UNLOCK(curproc); 285 } 286 287 void 288 swap_release(vm_ooffset_t decr) 289 { 290 struct ucred *cred; 291 292 PROC_LOCK(curproc); 293 cred = curproc->p_ucred; 294 swap_release_by_cred(decr, cred); 295 PROC_UNLOCK(curproc); 296 } 297 298 void 299 swap_release_by_cred(vm_ooffset_t decr, struct ucred *cred) 300 { 301 u_long prev, pdecr; 302 struct uidinfo *uip; 303 304 uip = cred->cr_ruidinfo; 305 306 KASSERT((decr & PAGE_MASK) == 0, ("%s: decr: %ju & PAGE_MASK", __func__, 307 (uintmax_t)decr)); 308 309 pdecr = atop(decr); 310 prev = atomic_fetchadd_long(&swap_reserved, -pdecr); 311 if (prev < pdecr) 312 panic("swap_reserved < decr"); 313 314 prev = atomic_fetchadd_long(&uip->ui_vmsize, -pdecr); 315 if (prev < pdecr) 316 printf("negative vmsize for uid = %d\n", uip->ui_uid); 317 #ifdef RACCT 318 if (racct_enable) 319 racct_sub_cred(cred, RACCT_SWAP, decr); 320 #endif 321 } 322 323 #define SWM_POP 0x01 /* pop out */ 324 325 static int swap_pager_full = 2; /* swap space exhaustion (task killing) */ 326 static int swap_pager_almost_full = 1; /* swap space exhaustion (w/hysteresis)*/ 327 static struct mtx swbuf_mtx; /* to sync nsw_wcount_async */ 328 static int nsw_wcount_async; /* limit async write buffers */ 329 static int nsw_wcount_async_max;/* assigned maximum */ 330 static int nsw_cluster_max; /* maximum VOP I/O allowed */ 331 332 static int sysctl_swap_async_max(SYSCTL_HANDLER_ARGS); 333 SYSCTL_PROC(_vm, OID_AUTO, swap_async_max, CTLTYPE_INT | CTLFLAG_RW | 334 CTLFLAG_MPSAFE, NULL, 0, sysctl_swap_async_max, "I", 335 "Maximum running async swap ops"); 336 static int sysctl_swap_fragmentation(SYSCTL_HANDLER_ARGS); 337 SYSCTL_PROC(_vm, OID_AUTO, swap_fragmentation, CTLTYPE_STRING | CTLFLAG_RD | 338 CTLFLAG_MPSAFE, NULL, 0, sysctl_swap_fragmentation, "A", 339 "Swap Fragmentation Info"); 340 341 static struct sx sw_alloc_sx; 342 343 /* 344 * "named" and "unnamed" anon region objects. Try to reduce the overhead 345 * of searching a named list by hashing it just a little. 346 */ 347 348 #define NOBJLISTS 8 349 350 #define NOBJLIST(handle) \ 351 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)]) 352 353 static struct pagerlst swap_pager_object_list[NOBJLISTS]; 354 static uma_zone_t swwbuf_zone; 355 static uma_zone_t swrbuf_zone; 356 static uma_zone_t swblk_zone; 357 static uma_zone_t swpctrie_zone; 358 359 /* 360 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 361 * calls hooked from other parts of the VM system and do not appear here. 362 * (see vm/swap_pager.h). 363 */ 364 static vm_object_t 365 swap_pager_alloc(void *handle, vm_ooffset_t size, 366 vm_prot_t prot, vm_ooffset_t offset, struct ucred *); 367 static void swap_pager_dealloc(vm_object_t object); 368 static int swap_pager_getpages(vm_object_t, vm_page_t *, int, int *, 369 int *); 370 static int swap_pager_getpages_async(vm_object_t, vm_page_t *, int, int *, 371 int *, pgo_getpages_iodone_t, void *); 372 static void swap_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *); 373 static boolean_t 374 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after); 375 static void swap_pager_init(void); 376 static void swap_pager_unswapped(vm_page_t); 377 static void swap_pager_swapoff(struct swdevt *sp); 378 379 struct pagerops swappagerops = { 380 .pgo_init = swap_pager_init, /* early system initialization of pager */ 381 .pgo_alloc = swap_pager_alloc, /* allocate an OBJT_SWAP object */ 382 .pgo_dealloc = swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 383 .pgo_getpages = swap_pager_getpages, /* pagein */ 384 .pgo_getpages_async = swap_pager_getpages_async, /* pagein (async) */ 385 .pgo_putpages = swap_pager_putpages, /* pageout */ 386 .pgo_haspage = swap_pager_haspage, /* get backing store status for page */ 387 .pgo_pageunswapped = swap_pager_unswapped, /* remove swap related to page */ 388 }; 389 390 /* 391 * swap_*() routines are externally accessible. swp_*() routines are 392 * internal. 393 */ 394 static int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 395 static int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 396 397 SYSCTL_INT(_vm, OID_AUTO, dmmax, CTLFLAG_RD, &nsw_cluster_max, 0, 398 "Maximum size of a swap block in pages"); 399 400 static void swp_sizecheck(void); 401 static void swp_pager_async_iodone(struct buf *bp); 402 static bool swp_pager_swblk_empty(struct swblk *sb, int start, int limit); 403 static int swapongeom(struct vnode *); 404 static int swaponvp(struct thread *, struct vnode *, u_long); 405 static int swapoff_one(struct swdevt *sp, struct ucred *cred); 406 407 /* 408 * Swap bitmap functions 409 */ 410 static void swp_pager_freeswapspace(daddr_t blk, daddr_t npages); 411 static daddr_t swp_pager_getswapspace(int *npages, int limit); 412 413 /* 414 * Metadata functions 415 */ 416 static daddr_t swp_pager_meta_build(vm_object_t, vm_pindex_t, daddr_t); 417 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t); 418 static void swp_pager_meta_free_all(vm_object_t); 419 static daddr_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int); 420 421 static void 422 swp_pager_init_freerange(daddr_t *start, daddr_t *num) 423 { 424 425 *start = SWAPBLK_NONE; 426 *num = 0; 427 } 428 429 static void 430 swp_pager_update_freerange(daddr_t *start, daddr_t *num, daddr_t addr) 431 { 432 433 if (*start + *num == addr) { 434 (*num)++; 435 } else { 436 swp_pager_freeswapspace(*start, *num); 437 *start = addr; 438 *num = 1; 439 } 440 } 441 442 static void * 443 swblk_trie_alloc(struct pctrie *ptree) 444 { 445 446 return (uma_zalloc(swpctrie_zone, M_NOWAIT | (curproc == pageproc ? 447 M_USE_RESERVE : 0))); 448 } 449 450 static void 451 swblk_trie_free(struct pctrie *ptree, void *node) 452 { 453 454 uma_zfree(swpctrie_zone, node); 455 } 456 457 PCTRIE_DEFINE(SWAP, swblk, p, swblk_trie_alloc, swblk_trie_free); 458 459 /* 460 * SWP_SIZECHECK() - update swap_pager_full indication 461 * 462 * update the swap_pager_almost_full indication and warn when we are 463 * about to run out of swap space, using lowat/hiwat hysteresis. 464 * 465 * Clear swap_pager_full ( task killing ) indication when lowat is met. 466 * 467 * No restrictions on call 468 * This routine may not block. 469 */ 470 static void 471 swp_sizecheck(void) 472 { 473 474 if (swap_pager_avail < nswap_lowat) { 475 if (swap_pager_almost_full == 0) { 476 printf("swap_pager: out of swap space\n"); 477 swap_pager_almost_full = 1; 478 } 479 } else { 480 swap_pager_full = 0; 481 if (swap_pager_avail > nswap_hiwat) 482 swap_pager_almost_full = 0; 483 } 484 } 485 486 /* 487 * SWAP_PAGER_INIT() - initialize the swap pager! 488 * 489 * Expected to be started from system init. NOTE: This code is run 490 * before much else so be careful what you depend on. Most of the VM 491 * system has yet to be initialized at this point. 492 */ 493 static void 494 swap_pager_init(void) 495 { 496 /* 497 * Initialize object lists 498 */ 499 int i; 500 501 for (i = 0; i < NOBJLISTS; ++i) 502 TAILQ_INIT(&swap_pager_object_list[i]); 503 mtx_init(&sw_dev_mtx, "swapdev", NULL, MTX_DEF); 504 sx_init(&sw_alloc_sx, "swspsx"); 505 sx_init(&swdev_syscall_lock, "swsysc"); 506 } 507 508 /* 509 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 510 * 511 * Expected to be started from pageout process once, prior to entering 512 * its main loop. 513 */ 514 void 515 swap_pager_swap_init(void) 516 { 517 unsigned long n, n2; 518 519 /* 520 * Number of in-transit swap bp operations. Don't 521 * exhaust the pbufs completely. Make sure we 522 * initialize workable values (0 will work for hysteresis 523 * but it isn't very efficient). 524 * 525 * The nsw_cluster_max is constrained by the bp->b_pages[] 526 * array (MAXPHYS/PAGE_SIZE) and our locally defined 527 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 528 * constrained by the swap device interleave stripe size. 529 * 530 * Currently we hardwire nsw_wcount_async to 4. This limit is 531 * designed to prevent other I/O from having high latencies due to 532 * our pageout I/O. The value 4 works well for one or two active swap 533 * devices but is probably a little low if you have more. Even so, 534 * a higher value would probably generate only a limited improvement 535 * with three or four active swap devices since the system does not 536 * typically have to pageout at extreme bandwidths. We will want 537 * at least 2 per swap devices, and 4 is a pretty good value if you 538 * have one NFS swap device due to the command/ack latency over NFS. 539 * So it all works out pretty well. 540 */ 541 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 542 543 nsw_wcount_async = 4; 544 nsw_wcount_async_max = nsw_wcount_async; 545 mtx_init(&swbuf_mtx, "async swbuf mutex", NULL, MTX_DEF); 546 547 swwbuf_zone = pbuf_zsecond_create("swwbuf", nswbuf / 4); 548 swrbuf_zone = pbuf_zsecond_create("swrbuf", nswbuf / 2); 549 550 /* 551 * Initialize our zone, taking the user's requested size or 552 * estimating the number we need based on the number of pages 553 * in the system. 554 */ 555 n = maxswzone != 0 ? maxswzone / sizeof(struct swblk) : 556 vm_cnt.v_page_count / 2; 557 swpctrie_zone = uma_zcreate("swpctrie", pctrie_node_size(), NULL, NULL, 558 pctrie_zone_init, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); 559 if (swpctrie_zone == NULL) 560 panic("failed to create swap pctrie zone."); 561 swblk_zone = uma_zcreate("swblk", sizeof(struct swblk), NULL, NULL, 562 NULL, NULL, _Alignof(struct swblk) - 1, UMA_ZONE_VM); 563 if (swblk_zone == NULL) 564 panic("failed to create swap blk zone."); 565 n2 = n; 566 do { 567 if (uma_zone_reserve_kva(swblk_zone, n)) 568 break; 569 /* 570 * if the allocation failed, try a zone two thirds the 571 * size of the previous attempt. 572 */ 573 n -= ((n + 2) / 3); 574 } while (n > 0); 575 576 /* 577 * Often uma_zone_reserve_kva() cannot reserve exactly the 578 * requested size. Account for the difference when 579 * calculating swap_maxpages. 580 */ 581 n = uma_zone_get_max(swblk_zone); 582 583 if (n < n2) 584 printf("Swap blk zone entries changed from %lu to %lu.\n", 585 n2, n); 586 swap_maxpages = n * SWAP_META_PAGES; 587 swzone = n * sizeof(struct swblk); 588 if (!uma_zone_reserve_kva(swpctrie_zone, n)) 589 printf("Cannot reserve swap pctrie zone, " 590 "reduce kern.maxswzone.\n"); 591 } 592 593 static vm_object_t 594 swap_pager_alloc_init(void *handle, struct ucred *cred, vm_ooffset_t size, 595 vm_ooffset_t offset) 596 { 597 vm_object_t object; 598 599 if (cred != NULL) { 600 if (!swap_reserve_by_cred(size, cred)) 601 return (NULL); 602 crhold(cred); 603 } 604 605 /* 606 * The un_pager.swp.swp_blks trie is initialized by 607 * vm_object_allocate() to ensure the correct order of 608 * visibility to other threads. 609 */ 610 object = vm_object_allocate(OBJT_SWAP, OFF_TO_IDX(offset + 611 PAGE_MASK + size)); 612 613 object->handle = handle; 614 if (cred != NULL) { 615 object->cred = cred; 616 object->charge = size; 617 } 618 return (object); 619 } 620 621 /* 622 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 623 * its metadata structures. 624 * 625 * This routine is called from the mmap and fork code to create a new 626 * OBJT_SWAP object. 627 * 628 * This routine must ensure that no live duplicate is created for 629 * the named object request, which is protected against by 630 * holding the sw_alloc_sx lock in case handle != NULL. 631 */ 632 static vm_object_t 633 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 634 vm_ooffset_t offset, struct ucred *cred) 635 { 636 vm_object_t object; 637 638 if (handle != NULL) { 639 /* 640 * Reference existing named region or allocate new one. There 641 * should not be a race here against swp_pager_meta_build() 642 * as called from vm_page_remove() in regards to the lookup 643 * of the handle. 644 */ 645 sx_xlock(&sw_alloc_sx); 646 object = vm_pager_object_lookup(NOBJLIST(handle), handle); 647 if (object == NULL) { 648 object = swap_pager_alloc_init(handle, cred, size, 649 offset); 650 if (object != NULL) { 651 TAILQ_INSERT_TAIL(NOBJLIST(object->handle), 652 object, pager_object_list); 653 } 654 } 655 sx_xunlock(&sw_alloc_sx); 656 } else { 657 object = swap_pager_alloc_init(handle, cred, size, offset); 658 } 659 return (object); 660 } 661 662 /* 663 * SWAP_PAGER_DEALLOC() - remove swap metadata from object 664 * 665 * The swap backing for the object is destroyed. The code is 666 * designed such that we can reinstantiate it later, but this 667 * routine is typically called only when the entire object is 668 * about to be destroyed. 669 * 670 * The object must be locked. 671 */ 672 static void 673 swap_pager_dealloc(vm_object_t object) 674 { 675 676 VM_OBJECT_ASSERT_WLOCKED(object); 677 KASSERT((object->flags & OBJ_DEAD) != 0, ("dealloc of reachable obj")); 678 679 /* 680 * Remove from list right away so lookups will fail if we block for 681 * pageout completion. 682 */ 683 if (object->handle != NULL) { 684 VM_OBJECT_WUNLOCK(object); 685 sx_xlock(&sw_alloc_sx); 686 TAILQ_REMOVE(NOBJLIST(object->handle), object, 687 pager_object_list); 688 sx_xunlock(&sw_alloc_sx); 689 VM_OBJECT_WLOCK(object); 690 } 691 692 vm_object_pip_wait(object, "swpdea"); 693 694 /* 695 * Free all remaining metadata. We only bother to free it from 696 * the swap meta data. We do not attempt to free swapblk's still 697 * associated with vm_page_t's for this object. We do not care 698 * if paging is still in progress on some objects. 699 */ 700 swp_pager_meta_free_all(object); 701 object->handle = NULL; 702 object->type = OBJT_DEAD; 703 } 704 705 /************************************************************************ 706 * SWAP PAGER BITMAP ROUTINES * 707 ************************************************************************/ 708 709 /* 710 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 711 * 712 * Allocate swap for up to the requested number of pages, and at 713 * least a minimum number of pages. The starting swap block number 714 * (a page index) is returned or SWAPBLK_NONE if the allocation 715 * failed. 716 * 717 * Also has the side effect of advising that somebody made a mistake 718 * when they configured swap and didn't configure enough. 719 * 720 * This routine may not sleep. 721 * 722 * We allocate in round-robin fashion from the configured devices. 723 */ 724 static daddr_t 725 swp_pager_getswapspace(int *io_npages, int limit) 726 { 727 daddr_t blk; 728 struct swdevt *sp; 729 int mpages, npages; 730 731 blk = SWAPBLK_NONE; 732 npages = mpages = *io_npages; 733 mtx_lock(&sw_dev_mtx); 734 sp = swdevhd; 735 while (!TAILQ_EMPTY(&swtailq)) { 736 if (sp == NULL) 737 sp = TAILQ_FIRST(&swtailq); 738 if ((sp->sw_flags & SW_CLOSING) == 0) 739 blk = blist_alloc(sp->sw_blist, &npages, mpages); 740 if (blk != SWAPBLK_NONE) 741 break; 742 sp = TAILQ_NEXT(sp, sw_list); 743 if (swdevhd == sp) { 744 if (npages <= limit) 745 break; 746 mpages = npages - 1; 747 npages >>= 1; 748 } 749 } 750 if (blk != SWAPBLK_NONE) { 751 *io_npages = npages; 752 blk += sp->sw_first; 753 sp->sw_used += npages; 754 swap_pager_avail -= npages; 755 swp_sizecheck(); 756 swdevhd = TAILQ_NEXT(sp, sw_list); 757 } else { 758 if (swap_pager_full != 2) { 759 printf("swp_pager_getswapspace(%d): failed\n", 760 *io_npages); 761 swap_pager_full = 2; 762 swap_pager_almost_full = 1; 763 } 764 swdevhd = NULL; 765 } 766 mtx_unlock(&sw_dev_mtx); 767 return (blk); 768 } 769 770 static bool 771 swp_pager_isondev(daddr_t blk, struct swdevt *sp) 772 { 773 774 return (blk >= sp->sw_first && blk < sp->sw_end); 775 } 776 777 static void 778 swp_pager_strategy(struct buf *bp) 779 { 780 struct swdevt *sp; 781 782 mtx_lock(&sw_dev_mtx); 783 TAILQ_FOREACH(sp, &swtailq, sw_list) { 784 if (swp_pager_isondev(bp->b_blkno, sp)) { 785 mtx_unlock(&sw_dev_mtx); 786 if ((sp->sw_flags & SW_UNMAPPED) != 0 && 787 unmapped_buf_allowed) { 788 bp->b_data = unmapped_buf; 789 bp->b_offset = 0; 790 } else { 791 pmap_qenter((vm_offset_t)bp->b_data, 792 &bp->b_pages[0], bp->b_bcount / PAGE_SIZE); 793 } 794 sp->sw_strategy(bp, sp); 795 return; 796 } 797 } 798 panic("Swapdev not found"); 799 } 800 801 802 /* 803 * SWP_PAGER_FREESWAPSPACE() - free raw swap space 804 * 805 * This routine returns the specified swap blocks back to the bitmap. 806 * 807 * This routine may not sleep. 808 */ 809 static void 810 swp_pager_freeswapspace(daddr_t blk, daddr_t npages) 811 { 812 struct swdevt *sp; 813 814 if (npages == 0) 815 return; 816 mtx_lock(&sw_dev_mtx); 817 TAILQ_FOREACH(sp, &swtailq, sw_list) { 818 if (swp_pager_isondev(blk, sp)) { 819 sp->sw_used -= npages; 820 /* 821 * If we are attempting to stop swapping on 822 * this device, we don't want to mark any 823 * blocks free lest they be reused. 824 */ 825 if ((sp->sw_flags & SW_CLOSING) == 0) { 826 blist_free(sp->sw_blist, blk - sp->sw_first, 827 npages); 828 swap_pager_avail += npages; 829 swp_sizecheck(); 830 } 831 mtx_unlock(&sw_dev_mtx); 832 return; 833 } 834 } 835 panic("Swapdev not found"); 836 } 837 838 /* 839 * SYSCTL_SWAP_FRAGMENTATION() - produce raw swap space stats 840 */ 841 static int 842 sysctl_swap_fragmentation(SYSCTL_HANDLER_ARGS) 843 { 844 struct sbuf sbuf; 845 struct swdevt *sp; 846 const char *devname; 847 int error; 848 849 error = sysctl_wire_old_buffer(req, 0); 850 if (error != 0) 851 return (error); 852 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 853 mtx_lock(&sw_dev_mtx); 854 TAILQ_FOREACH(sp, &swtailq, sw_list) { 855 if (vn_isdisk(sp->sw_vp, NULL)) 856 devname = devtoname(sp->sw_vp->v_rdev); 857 else 858 devname = "[file]"; 859 sbuf_printf(&sbuf, "\nFree space on device %s:\n", devname); 860 blist_stats(sp->sw_blist, &sbuf); 861 } 862 mtx_unlock(&sw_dev_mtx); 863 error = sbuf_finish(&sbuf); 864 sbuf_delete(&sbuf); 865 return (error); 866 } 867 868 /* 869 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 870 * range within an object. 871 * 872 * This is a globally accessible routine. 873 * 874 * This routine removes swapblk assignments from swap metadata. 875 * 876 * The external callers of this routine typically have already destroyed 877 * or renamed vm_page_t's associated with this range in the object so 878 * we should be ok. 879 * 880 * The object must be locked. 881 */ 882 void 883 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size) 884 { 885 886 swp_pager_meta_free(object, start, size); 887 } 888 889 /* 890 * SWAP_PAGER_RESERVE() - reserve swap blocks in object 891 * 892 * Assigns swap blocks to the specified range within the object. The 893 * swap blocks are not zeroed. Any previous swap assignment is destroyed. 894 * 895 * Returns 0 on success, -1 on failure. 896 */ 897 int 898 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 899 { 900 daddr_t addr, blk, n_free, s_free; 901 int i, j, n; 902 903 swp_pager_init_freerange(&s_free, &n_free); 904 VM_OBJECT_WLOCK(object); 905 for (i = 0; i < size; i += n) { 906 n = min(BLIST_MAX_ALLOC, size - i); 907 blk = swp_pager_getswapspace(&n, 1); 908 if (blk == SWAPBLK_NONE) { 909 swp_pager_meta_free(object, start, i); 910 VM_OBJECT_WUNLOCK(object); 911 return (-1); 912 } 913 for (j = 0; j < n; ++j) { 914 addr = swp_pager_meta_build(object, 915 start + i + j, blk + j); 916 if (addr != SWAPBLK_NONE) 917 swp_pager_update_freerange(&s_free, &n_free, 918 addr); 919 } 920 } 921 swp_pager_freeswapspace(s_free, n_free); 922 VM_OBJECT_WUNLOCK(object); 923 return (0); 924 } 925 926 /* 927 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 928 * and destroy the source. 929 * 930 * Copy any valid swapblks from the source to the destination. In 931 * cases where both the source and destination have a valid swapblk, 932 * we keep the destination's. 933 * 934 * This routine is allowed to sleep. It may sleep allocating metadata 935 * indirectly through swp_pager_meta_build() or if paging is still in 936 * progress on the source. 937 * 938 * The source object contains no vm_page_t's (which is just as well) 939 * 940 * The source object is of type OBJT_SWAP. 941 * 942 * The source and destination objects must be locked. 943 * Both object locks may temporarily be released. 944 */ 945 void 946 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject, 947 vm_pindex_t offset, int destroysource) 948 { 949 vm_pindex_t i; 950 daddr_t dstaddr, n_free, s_free, srcaddr; 951 952 VM_OBJECT_ASSERT_WLOCKED(srcobject); 953 VM_OBJECT_ASSERT_WLOCKED(dstobject); 954 955 /* 956 * If destroysource is set, we remove the source object from the 957 * swap_pager internal queue now. 958 */ 959 if (destroysource && srcobject->handle != NULL) { 960 vm_object_pip_add(srcobject, 1); 961 VM_OBJECT_WUNLOCK(srcobject); 962 vm_object_pip_add(dstobject, 1); 963 VM_OBJECT_WUNLOCK(dstobject); 964 sx_xlock(&sw_alloc_sx); 965 TAILQ_REMOVE(NOBJLIST(srcobject->handle), srcobject, 966 pager_object_list); 967 sx_xunlock(&sw_alloc_sx); 968 VM_OBJECT_WLOCK(dstobject); 969 vm_object_pip_wakeup(dstobject); 970 VM_OBJECT_WLOCK(srcobject); 971 vm_object_pip_wakeup(srcobject); 972 } 973 974 /* 975 * Transfer source to destination. 976 */ 977 swp_pager_init_freerange(&s_free, &n_free); 978 for (i = 0; i < dstobject->size; ++i) { 979 srcaddr = swp_pager_meta_ctl(srcobject, i + offset, SWM_POP); 980 if (srcaddr == SWAPBLK_NONE) 981 continue; 982 dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 983 if (dstaddr != SWAPBLK_NONE) { 984 /* 985 * Destination has valid swapblk or it is represented 986 * by a resident page. We destroy the source block. 987 */ 988 swp_pager_update_freerange(&s_free, &n_free, srcaddr); 989 continue; 990 } 991 992 /* 993 * Destination has no swapblk and is not resident, 994 * copy source. 995 * 996 * swp_pager_meta_build() can sleep. 997 */ 998 vm_object_pip_add(srcobject, 1); 999 VM_OBJECT_WUNLOCK(srcobject); 1000 vm_object_pip_add(dstobject, 1); 1001 dstaddr = swp_pager_meta_build(dstobject, i, srcaddr); 1002 KASSERT(dstaddr == SWAPBLK_NONE, 1003 ("Unexpected destination swapblk")); 1004 vm_object_pip_wakeup(dstobject); 1005 VM_OBJECT_WLOCK(srcobject); 1006 vm_object_pip_wakeup(srcobject); 1007 } 1008 swp_pager_freeswapspace(s_free, n_free); 1009 1010 /* 1011 * Free left over swap blocks in source. 1012 * 1013 * We have to revert the type to OBJT_DEFAULT so we do not accidentally 1014 * double-remove the object from the swap queues. 1015 */ 1016 if (destroysource) { 1017 swp_pager_meta_free_all(srcobject); 1018 /* 1019 * Reverting the type is not necessary, the caller is going 1020 * to destroy srcobject directly, but I'm doing it here 1021 * for consistency since we've removed the object from its 1022 * queues. 1023 */ 1024 srcobject->type = OBJT_DEFAULT; 1025 } 1026 } 1027 1028 /* 1029 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 1030 * the requested page. 1031 * 1032 * We determine whether good backing store exists for the requested 1033 * page and return TRUE if it does, FALSE if it doesn't. 1034 * 1035 * If TRUE, we also try to determine how much valid, contiguous backing 1036 * store exists before and after the requested page. 1037 */ 1038 static boolean_t 1039 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, 1040 int *after) 1041 { 1042 daddr_t blk, blk0; 1043 int i; 1044 1045 VM_OBJECT_ASSERT_LOCKED(object); 1046 1047 /* 1048 * do we have good backing store at the requested index ? 1049 */ 1050 blk0 = swp_pager_meta_ctl(object, pindex, 0); 1051 if (blk0 == SWAPBLK_NONE) { 1052 if (before) 1053 *before = 0; 1054 if (after) 1055 *after = 0; 1056 return (FALSE); 1057 } 1058 1059 /* 1060 * find backwards-looking contiguous good backing store 1061 */ 1062 if (before != NULL) { 1063 for (i = 1; i < SWB_NPAGES; i++) { 1064 if (i > pindex) 1065 break; 1066 blk = swp_pager_meta_ctl(object, pindex - i, 0); 1067 if (blk != blk0 - i) 1068 break; 1069 } 1070 *before = i - 1; 1071 } 1072 1073 /* 1074 * find forward-looking contiguous good backing store 1075 */ 1076 if (after != NULL) { 1077 for (i = 1; i < SWB_NPAGES; i++) { 1078 blk = swp_pager_meta_ctl(object, pindex + i, 0); 1079 if (blk != blk0 + i) 1080 break; 1081 } 1082 *after = i - 1; 1083 } 1084 return (TRUE); 1085 } 1086 1087 /* 1088 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 1089 * 1090 * This removes any associated swap backing store, whether valid or 1091 * not, from the page. 1092 * 1093 * This routine is typically called when a page is made dirty, at 1094 * which point any associated swap can be freed. MADV_FREE also 1095 * calls us in a special-case situation 1096 * 1097 * NOTE!!! If the page is clean and the swap was valid, the caller 1098 * should make the page dirty before calling this routine. This routine 1099 * does NOT change the m->dirty status of the page. Also: MADV_FREE 1100 * depends on it. 1101 * 1102 * This routine may not sleep. 1103 * 1104 * The object containing the page must be locked. 1105 */ 1106 static void 1107 swap_pager_unswapped(vm_page_t m) 1108 { 1109 daddr_t srcaddr; 1110 1111 srcaddr = swp_pager_meta_ctl(m->object, m->pindex, SWM_POP); 1112 if (srcaddr != SWAPBLK_NONE) 1113 swp_pager_freeswapspace(srcaddr, 1); 1114 } 1115 1116 /* 1117 * swap_pager_getpages() - bring pages in from swap 1118 * 1119 * Attempt to page in the pages in array "ma" of length "count". The 1120 * caller may optionally specify that additional pages preceding and 1121 * succeeding the specified range be paged in. The number of such pages 1122 * is returned in the "rbehind" and "rahead" parameters, and they will 1123 * be in the inactive queue upon return. 1124 * 1125 * The pages in "ma" must be busied and will remain busied upon return. 1126 */ 1127 static int 1128 swap_pager_getpages(vm_object_t object, vm_page_t *ma, int count, int *rbehind, 1129 int *rahead) 1130 { 1131 struct buf *bp; 1132 vm_page_t bm, mpred, msucc, p; 1133 vm_pindex_t pindex; 1134 daddr_t blk; 1135 int i, maxahead, maxbehind, reqcount; 1136 1137 reqcount = count; 1138 1139 /* 1140 * Determine the final number of read-behind pages and 1141 * allocate them BEFORE releasing the object lock. Otherwise, 1142 * there can be a problematic race with vm_object_split(). 1143 * Specifically, vm_object_split() might first transfer pages 1144 * that precede ma[0] in the current object to a new object, 1145 * and then this function incorrectly recreates those pages as 1146 * read-behind pages in the current object. 1147 */ 1148 if (!swap_pager_haspage(object, ma[0]->pindex, &maxbehind, &maxahead)) 1149 return (VM_PAGER_FAIL); 1150 1151 /* 1152 * Clip the readahead and readbehind ranges to exclude resident pages. 1153 */ 1154 if (rahead != NULL) { 1155 KASSERT(reqcount - 1 <= maxahead, 1156 ("page count %d extends beyond swap block", reqcount)); 1157 *rahead = imin(*rahead, maxahead - (reqcount - 1)); 1158 pindex = ma[reqcount - 1]->pindex; 1159 msucc = TAILQ_NEXT(ma[reqcount - 1], listq); 1160 if (msucc != NULL && msucc->pindex - pindex - 1 < *rahead) 1161 *rahead = msucc->pindex - pindex - 1; 1162 } 1163 if (rbehind != NULL) { 1164 *rbehind = imin(*rbehind, maxbehind); 1165 pindex = ma[0]->pindex; 1166 mpred = TAILQ_PREV(ma[0], pglist, listq); 1167 if (mpred != NULL && pindex - mpred->pindex - 1 < *rbehind) 1168 *rbehind = pindex - mpred->pindex - 1; 1169 } 1170 1171 bm = ma[0]; 1172 for (i = 0; i < count; i++) 1173 ma[i]->oflags |= VPO_SWAPINPROG; 1174 1175 /* 1176 * Allocate readahead and readbehind pages. 1177 */ 1178 if (rbehind != NULL) { 1179 for (i = 1; i <= *rbehind; i++) { 1180 p = vm_page_alloc(object, ma[0]->pindex - i, 1181 VM_ALLOC_NORMAL); 1182 if (p == NULL) 1183 break; 1184 p->oflags |= VPO_SWAPINPROG; 1185 bm = p; 1186 } 1187 *rbehind = i - 1; 1188 } 1189 if (rahead != NULL) { 1190 for (i = 0; i < *rahead; i++) { 1191 p = vm_page_alloc(object, 1192 ma[reqcount - 1]->pindex + i + 1, VM_ALLOC_NORMAL); 1193 if (p == NULL) 1194 break; 1195 p->oflags |= VPO_SWAPINPROG; 1196 } 1197 *rahead = i; 1198 } 1199 if (rbehind != NULL) 1200 count += *rbehind; 1201 if (rahead != NULL) 1202 count += *rahead; 1203 1204 vm_object_pip_add(object, count); 1205 1206 pindex = bm->pindex; 1207 blk = swp_pager_meta_ctl(object, pindex, 0); 1208 KASSERT(blk != SWAPBLK_NONE, 1209 ("no swap blocking containing %p(%jx)", object, (uintmax_t)pindex)); 1210 1211 VM_OBJECT_WUNLOCK(object); 1212 bp = uma_zalloc(swrbuf_zone, M_WAITOK); 1213 /* Pages cannot leave the object while busy. */ 1214 for (i = 0, p = bm; i < count; i++, p = TAILQ_NEXT(p, listq)) { 1215 MPASS(p->pindex == bm->pindex + i); 1216 bp->b_pages[i] = p; 1217 } 1218 1219 bp->b_flags |= B_PAGING; 1220 bp->b_iocmd = BIO_READ; 1221 bp->b_iodone = swp_pager_async_iodone; 1222 bp->b_rcred = crhold(thread0.td_ucred); 1223 bp->b_wcred = crhold(thread0.td_ucred); 1224 bp->b_blkno = blk; 1225 bp->b_bcount = PAGE_SIZE * count; 1226 bp->b_bufsize = PAGE_SIZE * count; 1227 bp->b_npages = count; 1228 bp->b_pgbefore = rbehind != NULL ? *rbehind : 0; 1229 bp->b_pgafter = rahead != NULL ? *rahead : 0; 1230 1231 VM_CNT_INC(v_swapin); 1232 VM_CNT_ADD(v_swappgsin, count); 1233 1234 /* 1235 * perform the I/O. NOTE!!! bp cannot be considered valid after 1236 * this point because we automatically release it on completion. 1237 * Instead, we look at the one page we are interested in which we 1238 * still hold a lock on even through the I/O completion. 1239 * 1240 * The other pages in our ma[] array are also released on completion, 1241 * so we cannot assume they are valid anymore either. 1242 * 1243 * NOTE: b_blkno is destroyed by the call to swapdev_strategy 1244 */ 1245 BUF_KERNPROC(bp); 1246 swp_pager_strategy(bp); 1247 1248 /* 1249 * Wait for the pages we want to complete. VPO_SWAPINPROG is always 1250 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 1251 * is set in the metadata for each page in the request. 1252 */ 1253 VM_OBJECT_WLOCK(object); 1254 while ((ma[0]->oflags & VPO_SWAPINPROG) != 0) { 1255 ma[0]->oflags |= VPO_SWAPSLEEP; 1256 VM_CNT_INC(v_intrans); 1257 if (VM_OBJECT_SLEEP(object, &object->paging_in_progress, PSWP, 1258 "swread", hz * 20)) { 1259 printf( 1260 "swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n", 1261 bp->b_bufobj, (intmax_t)bp->b_blkno, bp->b_bcount); 1262 } 1263 } 1264 1265 /* 1266 * If we had an unrecoverable read error pages will not be valid. 1267 */ 1268 for (i = 0; i < reqcount; i++) 1269 if (ma[i]->valid != VM_PAGE_BITS_ALL) 1270 return (VM_PAGER_ERROR); 1271 1272 return (VM_PAGER_OK); 1273 1274 /* 1275 * A final note: in a low swap situation, we cannot deallocate swap 1276 * and mark a page dirty here because the caller is likely to mark 1277 * the page clean when we return, causing the page to possibly revert 1278 * to all-zero's later. 1279 */ 1280 } 1281 1282 /* 1283 * swap_pager_getpages_async(): 1284 * 1285 * Right now this is emulation of asynchronous operation on top of 1286 * swap_pager_getpages(). 1287 */ 1288 static int 1289 swap_pager_getpages_async(vm_object_t object, vm_page_t *ma, int count, 1290 int *rbehind, int *rahead, pgo_getpages_iodone_t iodone, void *arg) 1291 { 1292 int r, error; 1293 1294 r = swap_pager_getpages(object, ma, count, rbehind, rahead); 1295 VM_OBJECT_WUNLOCK(object); 1296 switch (r) { 1297 case VM_PAGER_OK: 1298 error = 0; 1299 break; 1300 case VM_PAGER_ERROR: 1301 error = EIO; 1302 break; 1303 case VM_PAGER_FAIL: 1304 error = EINVAL; 1305 break; 1306 default: 1307 panic("unhandled swap_pager_getpages() error %d", r); 1308 } 1309 (iodone)(arg, ma, count, error); 1310 VM_OBJECT_WLOCK(object); 1311 1312 return (r); 1313 } 1314 1315 /* 1316 * swap_pager_putpages: 1317 * 1318 * Assign swap (if necessary) and initiate I/O on the specified pages. 1319 * 1320 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 1321 * are automatically converted to SWAP objects. 1322 * 1323 * In a low memory situation we may block in VOP_STRATEGY(), but the new 1324 * vm_page reservation system coupled with properly written VFS devices 1325 * should ensure that no low-memory deadlock occurs. This is an area 1326 * which needs work. 1327 * 1328 * The parent has N vm_object_pip_add() references prior to 1329 * calling us and will remove references for rtvals[] that are 1330 * not set to VM_PAGER_PEND. We need to remove the rest on I/O 1331 * completion. 1332 * 1333 * The parent has soft-busy'd the pages it passes us and will unbusy 1334 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 1335 * We need to unbusy the rest on I/O completion. 1336 */ 1337 static void 1338 swap_pager_putpages(vm_object_t object, vm_page_t *ma, int count, 1339 int flags, int *rtvals) 1340 { 1341 int i, n; 1342 boolean_t sync; 1343 daddr_t addr, n_free, s_free; 1344 1345 swp_pager_init_freerange(&s_free, &n_free); 1346 if (count && ma[0]->object != object) { 1347 panic("swap_pager_putpages: object mismatch %p/%p", 1348 object, 1349 ma[0]->object 1350 ); 1351 } 1352 1353 /* 1354 * Step 1 1355 * 1356 * Turn object into OBJT_SWAP 1357 * check for bogus sysops 1358 * force sync if not pageout process 1359 */ 1360 if (object->type != OBJT_SWAP) { 1361 addr = swp_pager_meta_build(object, 0, SWAPBLK_NONE); 1362 KASSERT(addr == SWAPBLK_NONE, 1363 ("unexpected object swap block")); 1364 } 1365 VM_OBJECT_WUNLOCK(object); 1366 1367 n = 0; 1368 if (curproc != pageproc) 1369 sync = TRUE; 1370 else 1371 sync = (flags & VM_PAGER_PUT_SYNC) != 0; 1372 1373 /* 1374 * Step 2 1375 * 1376 * Assign swap blocks and issue I/O. We reallocate swap on the fly. 1377 * The page is left dirty until the pageout operation completes 1378 * successfully. 1379 */ 1380 for (i = 0; i < count; i += n) { 1381 int j; 1382 struct buf *bp; 1383 daddr_t blk; 1384 1385 /* 1386 * Maximum I/O size is limited by a number of factors. 1387 */ 1388 n = min(BLIST_MAX_ALLOC, count - i); 1389 n = min(n, nsw_cluster_max); 1390 1391 /* Get a block of swap of size up to size n. */ 1392 blk = swp_pager_getswapspace(&n, 4); 1393 if (blk == SWAPBLK_NONE) { 1394 for (j = 0; j < n; ++j) 1395 rtvals[i+j] = VM_PAGER_FAIL; 1396 continue; 1397 } 1398 1399 /* 1400 * All I/O parameters have been satisfied, build the I/O 1401 * request and assign the swap space. 1402 */ 1403 if (sync != TRUE) { 1404 mtx_lock(&swbuf_mtx); 1405 while (nsw_wcount_async == 0) 1406 msleep(&nsw_wcount_async, &swbuf_mtx, PVM, 1407 "swbufa", 0); 1408 nsw_wcount_async--; 1409 mtx_unlock(&swbuf_mtx); 1410 } 1411 bp = uma_zalloc(swwbuf_zone, M_WAITOK); 1412 if (sync != TRUE) 1413 bp->b_flags = B_ASYNC; 1414 bp->b_flags |= B_PAGING; 1415 bp->b_iocmd = BIO_WRITE; 1416 1417 bp->b_rcred = crhold(thread0.td_ucred); 1418 bp->b_wcred = crhold(thread0.td_ucred); 1419 bp->b_bcount = PAGE_SIZE * n; 1420 bp->b_bufsize = PAGE_SIZE * n; 1421 bp->b_blkno = blk; 1422 1423 VM_OBJECT_WLOCK(object); 1424 for (j = 0; j < n; ++j) { 1425 vm_page_t mreq = ma[i+j]; 1426 1427 addr = swp_pager_meta_build(mreq->object, mreq->pindex, 1428 blk + j); 1429 if (addr != SWAPBLK_NONE) 1430 swp_pager_update_freerange(&s_free, &n_free, 1431 addr); 1432 MPASS(mreq->dirty == VM_PAGE_BITS_ALL); 1433 mreq->oflags |= VPO_SWAPINPROG; 1434 bp->b_pages[j] = mreq; 1435 } 1436 VM_OBJECT_WUNLOCK(object); 1437 bp->b_npages = n; 1438 /* 1439 * Must set dirty range for NFS to work. 1440 */ 1441 bp->b_dirtyoff = 0; 1442 bp->b_dirtyend = bp->b_bcount; 1443 1444 VM_CNT_INC(v_swapout); 1445 VM_CNT_ADD(v_swappgsout, bp->b_npages); 1446 1447 /* 1448 * We unconditionally set rtvals[] to VM_PAGER_PEND so that we 1449 * can call the async completion routine at the end of a 1450 * synchronous I/O operation. Otherwise, our caller would 1451 * perform duplicate unbusy and wakeup operations on the page 1452 * and object, respectively. 1453 */ 1454 for (j = 0; j < n; j++) 1455 rtvals[i + j] = VM_PAGER_PEND; 1456 1457 /* 1458 * asynchronous 1459 * 1460 * NOTE: b_blkno is destroyed by the call to swapdev_strategy 1461 */ 1462 if (sync == FALSE) { 1463 bp->b_iodone = swp_pager_async_iodone; 1464 BUF_KERNPROC(bp); 1465 swp_pager_strategy(bp); 1466 continue; 1467 } 1468 1469 /* 1470 * synchronous 1471 * 1472 * NOTE: b_blkno is destroyed by the call to swapdev_strategy 1473 */ 1474 bp->b_iodone = bdone; 1475 swp_pager_strategy(bp); 1476 1477 /* 1478 * Wait for the sync I/O to complete. 1479 */ 1480 bwait(bp, PVM, "swwrt"); 1481 1482 /* 1483 * Now that we are through with the bp, we can call the 1484 * normal async completion, which frees everything up. 1485 */ 1486 swp_pager_async_iodone(bp); 1487 } 1488 VM_OBJECT_WLOCK(object); 1489 swp_pager_freeswapspace(s_free, n_free); 1490 } 1491 1492 /* 1493 * swp_pager_async_iodone: 1494 * 1495 * Completion routine for asynchronous reads and writes from/to swap. 1496 * Also called manually by synchronous code to finish up a bp. 1497 * 1498 * This routine may not sleep. 1499 */ 1500 static void 1501 swp_pager_async_iodone(struct buf *bp) 1502 { 1503 int i; 1504 vm_object_t object = NULL; 1505 1506 /* 1507 * Report error - unless we ran out of memory, in which case 1508 * we've already logged it in swapgeom_strategy(). 1509 */ 1510 if (bp->b_ioflags & BIO_ERROR && bp->b_error != ENOMEM) { 1511 printf( 1512 "swap_pager: I/O error - %s failed; blkno %ld," 1513 "size %ld, error %d\n", 1514 ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"), 1515 (long)bp->b_blkno, 1516 (long)bp->b_bcount, 1517 bp->b_error 1518 ); 1519 } 1520 1521 /* 1522 * remove the mapping for kernel virtual 1523 */ 1524 if (buf_mapped(bp)) 1525 pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); 1526 else 1527 bp->b_data = bp->b_kvabase; 1528 1529 if (bp->b_npages) { 1530 object = bp->b_pages[0]->object; 1531 VM_OBJECT_WLOCK(object); 1532 } 1533 1534 /* 1535 * cleanup pages. If an error occurs writing to swap, we are in 1536 * very serious trouble. If it happens to be a disk error, though, 1537 * we may be able to recover by reassigning the swap later on. So 1538 * in this case we remove the m->swapblk assignment for the page 1539 * but do not free it in the rlist. The errornous block(s) are thus 1540 * never reallocated as swap. Redirty the page and continue. 1541 */ 1542 for (i = 0; i < bp->b_npages; ++i) { 1543 vm_page_t m = bp->b_pages[i]; 1544 1545 m->oflags &= ~VPO_SWAPINPROG; 1546 if (m->oflags & VPO_SWAPSLEEP) { 1547 m->oflags &= ~VPO_SWAPSLEEP; 1548 wakeup(&object->paging_in_progress); 1549 } 1550 1551 if (bp->b_ioflags & BIO_ERROR) { 1552 /* 1553 * If an error occurs I'd love to throw the swapblk 1554 * away without freeing it back to swapspace, so it 1555 * can never be used again. But I can't from an 1556 * interrupt. 1557 */ 1558 if (bp->b_iocmd == BIO_READ) { 1559 /* 1560 * NOTE: for reads, m->dirty will probably 1561 * be overridden by the original caller of 1562 * getpages so don't play cute tricks here. 1563 */ 1564 m->valid = 0; 1565 } else { 1566 /* 1567 * If a write error occurs, reactivate page 1568 * so it doesn't clog the inactive list, 1569 * then finish the I/O. 1570 */ 1571 MPASS(m->dirty == VM_PAGE_BITS_ALL); 1572 vm_page_lock(m); 1573 vm_page_activate(m); 1574 vm_page_unlock(m); 1575 vm_page_sunbusy(m); 1576 } 1577 } else if (bp->b_iocmd == BIO_READ) { 1578 /* 1579 * NOTE: for reads, m->dirty will probably be 1580 * overridden by the original caller of getpages so 1581 * we cannot set them in order to free the underlying 1582 * swap in a low-swap situation. I don't think we'd 1583 * want to do that anyway, but it was an optimization 1584 * that existed in the old swapper for a time before 1585 * it got ripped out due to precisely this problem. 1586 */ 1587 KASSERT(!pmap_page_is_mapped(m), 1588 ("swp_pager_async_iodone: page %p is mapped", m)); 1589 KASSERT(m->dirty == 0, 1590 ("swp_pager_async_iodone: page %p is dirty", m)); 1591 1592 m->valid = VM_PAGE_BITS_ALL; 1593 if (i < bp->b_pgbefore || 1594 i >= bp->b_npages - bp->b_pgafter) 1595 vm_page_readahead_finish(m); 1596 } else { 1597 /* 1598 * For write success, clear the dirty 1599 * status, then finish the I/O ( which decrements the 1600 * busy count and possibly wakes waiter's up ). 1601 * A page is only written to swap after a period of 1602 * inactivity. Therefore, we do not expect it to be 1603 * reused. 1604 */ 1605 KASSERT(!pmap_page_is_write_mapped(m), 1606 ("swp_pager_async_iodone: page %p is not write" 1607 " protected", m)); 1608 vm_page_undirty(m); 1609 vm_page_lock(m); 1610 vm_page_deactivate_noreuse(m); 1611 vm_page_unlock(m); 1612 vm_page_sunbusy(m); 1613 } 1614 } 1615 1616 /* 1617 * adjust pip. NOTE: the original parent may still have its own 1618 * pip refs on the object. 1619 */ 1620 if (object != NULL) { 1621 vm_object_pip_wakeupn(object, bp->b_npages); 1622 VM_OBJECT_WUNLOCK(object); 1623 } 1624 1625 /* 1626 * swapdev_strategy() manually sets b_vp and b_bufobj before calling 1627 * bstrategy(). Set them back to NULL now we're done with it, or we'll 1628 * trigger a KASSERT in relpbuf(). 1629 */ 1630 if (bp->b_vp) { 1631 bp->b_vp = NULL; 1632 bp->b_bufobj = NULL; 1633 } 1634 /* 1635 * release the physical I/O buffer 1636 */ 1637 if (bp->b_flags & B_ASYNC) { 1638 mtx_lock(&swbuf_mtx); 1639 if (++nsw_wcount_async == 1) 1640 wakeup(&nsw_wcount_async); 1641 mtx_unlock(&swbuf_mtx); 1642 } 1643 uma_zfree((bp->b_iocmd == BIO_READ) ? swrbuf_zone : swwbuf_zone, bp); 1644 } 1645 1646 int 1647 swap_pager_nswapdev(void) 1648 { 1649 1650 return (nswapdev); 1651 } 1652 1653 static void 1654 swp_pager_force_dirty(vm_page_t m) 1655 { 1656 1657 vm_page_dirty(m); 1658 #ifdef INVARIANTS 1659 vm_page_lock(m); 1660 if (!vm_page_wired(m) && m->queue == PQ_NONE) 1661 panic("page %p is neither wired nor queued", m); 1662 vm_page_unlock(m); 1663 #endif 1664 vm_page_xunbusy(m); 1665 } 1666 1667 static void 1668 swp_pager_force_launder(vm_page_t m) 1669 { 1670 1671 vm_page_dirty(m); 1672 vm_page_lock(m); 1673 vm_page_launder(m); 1674 vm_page_unlock(m); 1675 vm_page_xunbusy(m); 1676 } 1677 1678 /* 1679 * SWP_PAGER_FORCE_PAGEIN() - force a swap block to be paged in 1680 * 1681 * This routine dissociates the page at the given index within an object 1682 * from its backing store, paging it in if it does not reside in memory. 1683 * If the page is paged in, it is marked dirty and placed in the laundry 1684 * queue. The page is marked dirty because it no longer has backing 1685 * store. It is placed in the laundry queue because it has not been 1686 * accessed recently. Otherwise, it would already reside in memory. 1687 * 1688 * We also attempt to swap in all other pages in the swap block. 1689 * However, we only guarantee that the one at the specified index is 1690 * paged in. 1691 * 1692 * XXX - The code to page the whole block in doesn't work, so we 1693 * revert to the one-by-one behavior for now. Sigh. 1694 */ 1695 static void 1696 swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex) 1697 { 1698 vm_page_t m; 1699 1700 vm_object_pip_add(object, 1); 1701 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL); 1702 if (m->valid == VM_PAGE_BITS_ALL) { 1703 vm_object_pip_wakeup(object); 1704 swp_pager_force_dirty(m); 1705 vm_pager_page_unswapped(m); 1706 return; 1707 } 1708 1709 if (swap_pager_getpages(object, &m, 1, NULL, NULL) != VM_PAGER_OK) 1710 panic("swap_pager_force_pagein: read from swap failed");/*XXX*/ 1711 vm_object_pip_wakeup(object); 1712 swp_pager_force_launder(m); 1713 vm_pager_page_unswapped(m); 1714 } 1715 1716 /* 1717 * swap_pager_swapoff_object: 1718 * 1719 * Page in all of the pages that have been paged out for an object 1720 * from a given swap device. 1721 */ 1722 static void 1723 swap_pager_swapoff_object(struct swdevt *sp, vm_object_t object) 1724 { 1725 struct swblk *sb; 1726 vm_pindex_t pi; 1727 int i; 1728 1729 for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE( 1730 &object->un_pager.swp.swp_blks, pi)) != NULL; ) { 1731 pi = sb->p + SWAP_META_PAGES; 1732 for (i = 0; i < SWAP_META_PAGES; i++) { 1733 if (sb->d[i] == SWAPBLK_NONE) 1734 continue; 1735 if (swp_pager_isondev(sb->d[i], sp)) 1736 swp_pager_force_pagein(object, sb->p + i); 1737 } 1738 } 1739 } 1740 1741 /* 1742 * swap_pager_swapoff: 1743 * 1744 * Page in all of the pages that have been paged out to the 1745 * given device. The corresponding blocks in the bitmap must be 1746 * marked as allocated and the device must be flagged SW_CLOSING. 1747 * There may be no processes swapped out to the device. 1748 * 1749 * This routine may block. 1750 */ 1751 static void 1752 swap_pager_swapoff(struct swdevt *sp) 1753 { 1754 vm_object_t object; 1755 int retries; 1756 1757 sx_assert(&swdev_syscall_lock, SA_XLOCKED); 1758 1759 retries = 0; 1760 full_rescan: 1761 mtx_lock(&vm_object_list_mtx); 1762 TAILQ_FOREACH(object, &vm_object_list, object_list) { 1763 if (object->type != OBJT_SWAP) 1764 continue; 1765 mtx_unlock(&vm_object_list_mtx); 1766 /* Depends on type-stability. */ 1767 VM_OBJECT_WLOCK(object); 1768 1769 /* 1770 * Dead objects are eventually terminated on their own. 1771 */ 1772 if ((object->flags & OBJ_DEAD) != 0) 1773 goto next_obj; 1774 1775 /* 1776 * Sync with fences placed after pctrie 1777 * initialization. We must not access pctrie below 1778 * unless we checked that our object is swap and not 1779 * dead. 1780 */ 1781 atomic_thread_fence_acq(); 1782 if (object->type != OBJT_SWAP) 1783 goto next_obj; 1784 1785 swap_pager_swapoff_object(sp, object); 1786 next_obj: 1787 VM_OBJECT_WUNLOCK(object); 1788 mtx_lock(&vm_object_list_mtx); 1789 } 1790 mtx_unlock(&vm_object_list_mtx); 1791 1792 if (sp->sw_used) { 1793 /* 1794 * Objects may be locked or paging to the device being 1795 * removed, so we will miss their pages and need to 1796 * make another pass. We have marked this device as 1797 * SW_CLOSING, so the activity should finish soon. 1798 */ 1799 retries++; 1800 if (retries > 100) { 1801 panic("swapoff: failed to locate %d swap blocks", 1802 sp->sw_used); 1803 } 1804 pause("swpoff", hz / 20); 1805 goto full_rescan; 1806 } 1807 EVENTHANDLER_INVOKE(swapoff, sp); 1808 } 1809 1810 /************************************************************************ 1811 * SWAP META DATA * 1812 ************************************************************************ 1813 * 1814 * These routines manipulate the swap metadata stored in the 1815 * OBJT_SWAP object. 1816 * 1817 * Swap metadata is implemented with a global hash and not directly 1818 * linked into the object. Instead the object simply contains 1819 * appropriate tracking counters. 1820 */ 1821 1822 /* 1823 * SWP_PAGER_SWBLK_EMPTY() - is a range of blocks free? 1824 */ 1825 static bool 1826 swp_pager_swblk_empty(struct swblk *sb, int start, int limit) 1827 { 1828 int i; 1829 1830 MPASS(0 <= start && start <= limit && limit <= SWAP_META_PAGES); 1831 for (i = start; i < limit; i++) { 1832 if (sb->d[i] != SWAPBLK_NONE) 1833 return (false); 1834 } 1835 return (true); 1836 } 1837 1838 /* 1839 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 1840 * 1841 * We first convert the object to a swap object if it is a default 1842 * object. 1843 * 1844 * The specified swapblk is added to the object's swap metadata. If 1845 * the swapblk is not valid, it is freed instead. Any previously 1846 * assigned swapblk is returned. 1847 */ 1848 static daddr_t 1849 swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk) 1850 { 1851 static volatile int swblk_zone_exhausted, swpctrie_zone_exhausted; 1852 struct swblk *sb, *sb1; 1853 vm_pindex_t modpi, rdpi; 1854 daddr_t prev_swapblk; 1855 int error, i; 1856 1857 VM_OBJECT_ASSERT_WLOCKED(object); 1858 1859 /* 1860 * Convert default object to swap object if necessary 1861 */ 1862 if (object->type != OBJT_SWAP) { 1863 pctrie_init(&object->un_pager.swp.swp_blks); 1864 1865 /* 1866 * Ensure that swap_pager_swapoff()'s iteration over 1867 * object_list does not see a garbage pctrie. 1868 */ 1869 atomic_thread_fence_rel(); 1870 1871 object->type = OBJT_SWAP; 1872 KASSERT(object->handle == NULL, ("default pager with handle")); 1873 } 1874 1875 rdpi = rounddown(pindex, SWAP_META_PAGES); 1876 sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks, rdpi); 1877 if (sb == NULL) { 1878 if (swapblk == SWAPBLK_NONE) 1879 return (SWAPBLK_NONE); 1880 for (;;) { 1881 sb = uma_zalloc(swblk_zone, M_NOWAIT | (curproc == 1882 pageproc ? M_USE_RESERVE : 0)); 1883 if (sb != NULL) { 1884 sb->p = rdpi; 1885 for (i = 0; i < SWAP_META_PAGES; i++) 1886 sb->d[i] = SWAPBLK_NONE; 1887 if (atomic_cmpset_int(&swblk_zone_exhausted, 1888 1, 0)) 1889 printf("swblk zone ok\n"); 1890 break; 1891 } 1892 VM_OBJECT_WUNLOCK(object); 1893 if (uma_zone_exhausted(swblk_zone)) { 1894 if (atomic_cmpset_int(&swblk_zone_exhausted, 1895 0, 1)) 1896 printf("swap blk zone exhausted, " 1897 "increase kern.maxswzone\n"); 1898 vm_pageout_oom(VM_OOM_SWAPZ); 1899 pause("swzonxb", 10); 1900 } else 1901 uma_zwait(swblk_zone); 1902 VM_OBJECT_WLOCK(object); 1903 sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks, 1904 rdpi); 1905 if (sb != NULL) 1906 /* 1907 * Somebody swapped out a nearby page, 1908 * allocating swblk at the rdpi index, 1909 * while we dropped the object lock. 1910 */ 1911 goto allocated; 1912 } 1913 for (;;) { 1914 error = SWAP_PCTRIE_INSERT( 1915 &object->un_pager.swp.swp_blks, sb); 1916 if (error == 0) { 1917 if (atomic_cmpset_int(&swpctrie_zone_exhausted, 1918 1, 0)) 1919 printf("swpctrie zone ok\n"); 1920 break; 1921 } 1922 VM_OBJECT_WUNLOCK(object); 1923 if (uma_zone_exhausted(swpctrie_zone)) { 1924 if (atomic_cmpset_int(&swpctrie_zone_exhausted, 1925 0, 1)) 1926 printf("swap pctrie zone exhausted, " 1927 "increase kern.maxswzone\n"); 1928 vm_pageout_oom(VM_OOM_SWAPZ); 1929 pause("swzonxp", 10); 1930 } else 1931 uma_zwait(swpctrie_zone); 1932 VM_OBJECT_WLOCK(object); 1933 sb1 = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks, 1934 rdpi); 1935 if (sb1 != NULL) { 1936 uma_zfree(swblk_zone, sb); 1937 sb = sb1; 1938 goto allocated; 1939 } 1940 } 1941 } 1942 allocated: 1943 MPASS(sb->p == rdpi); 1944 1945 modpi = pindex % SWAP_META_PAGES; 1946 /* Return prior contents of metadata. */ 1947 prev_swapblk = sb->d[modpi]; 1948 /* Enter block into metadata. */ 1949 sb->d[modpi] = swapblk; 1950 1951 /* 1952 * Free the swblk if we end up with the empty page run. 1953 */ 1954 if (swapblk == SWAPBLK_NONE && 1955 swp_pager_swblk_empty(sb, 0, SWAP_META_PAGES)) { 1956 SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks, rdpi); 1957 uma_zfree(swblk_zone, sb); 1958 } 1959 return (prev_swapblk); 1960 } 1961 1962 /* 1963 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 1964 * 1965 * The requested range of blocks is freed, with any associated swap 1966 * returned to the swap bitmap. 1967 * 1968 * This routine will free swap metadata structures as they are cleaned 1969 * out. This routine does *NOT* operate on swap metadata associated 1970 * with resident pages. 1971 */ 1972 static void 1973 swp_pager_meta_free(vm_object_t object, vm_pindex_t pindex, vm_pindex_t count) 1974 { 1975 struct swblk *sb; 1976 daddr_t n_free, s_free; 1977 vm_pindex_t last; 1978 int i, limit, start; 1979 1980 VM_OBJECT_ASSERT_WLOCKED(object); 1981 if (object->type != OBJT_SWAP || count == 0) 1982 return; 1983 1984 swp_pager_init_freerange(&s_free, &n_free); 1985 last = pindex + count; 1986 for (;;) { 1987 sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks, 1988 rounddown(pindex, SWAP_META_PAGES)); 1989 if (sb == NULL || sb->p >= last) 1990 break; 1991 start = pindex > sb->p ? pindex - sb->p : 0; 1992 limit = last - sb->p < SWAP_META_PAGES ? last - sb->p : 1993 SWAP_META_PAGES; 1994 for (i = start; i < limit; i++) { 1995 if (sb->d[i] == SWAPBLK_NONE) 1996 continue; 1997 swp_pager_update_freerange(&s_free, &n_free, sb->d[i]); 1998 sb->d[i] = SWAPBLK_NONE; 1999 } 2000 pindex = sb->p + SWAP_META_PAGES; 2001 if (swp_pager_swblk_empty(sb, 0, start) && 2002 swp_pager_swblk_empty(sb, limit, SWAP_META_PAGES)) { 2003 SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks, 2004 sb->p); 2005 uma_zfree(swblk_zone, sb); 2006 } 2007 } 2008 swp_pager_freeswapspace(s_free, n_free); 2009 } 2010 2011 /* 2012 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 2013 * 2014 * This routine locates and destroys all swap metadata associated with 2015 * an object. 2016 */ 2017 static void 2018 swp_pager_meta_free_all(vm_object_t object) 2019 { 2020 struct swblk *sb; 2021 daddr_t n_free, s_free; 2022 vm_pindex_t pindex; 2023 int i; 2024 2025 VM_OBJECT_ASSERT_WLOCKED(object); 2026 if (object->type != OBJT_SWAP) 2027 return; 2028 2029 swp_pager_init_freerange(&s_free, &n_free); 2030 for (pindex = 0; (sb = SWAP_PCTRIE_LOOKUP_GE( 2031 &object->un_pager.swp.swp_blks, pindex)) != NULL;) { 2032 pindex = sb->p + SWAP_META_PAGES; 2033 for (i = 0; i < SWAP_META_PAGES; i++) { 2034 if (sb->d[i] == SWAPBLK_NONE) 2035 continue; 2036 swp_pager_update_freerange(&s_free, &n_free, sb->d[i]); 2037 } 2038 SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks, sb->p); 2039 uma_zfree(swblk_zone, sb); 2040 } 2041 swp_pager_freeswapspace(s_free, n_free); 2042 } 2043 2044 /* 2045 * SWP_PAGER_METACTL() - misc control of swap meta data. 2046 * 2047 * This routine is capable of looking up, or removing swapblk 2048 * assignments in the swap meta data. It returns the swapblk being 2049 * looked-up, popped, or SWAPBLK_NONE if the block was invalid. 2050 * 2051 * When acting on a busy resident page and paging is in progress, we 2052 * have to wait until paging is complete but otherwise can act on the 2053 * busy page. 2054 * 2055 * SWM_POP remove from meta data but do not free it 2056 */ 2057 static daddr_t 2058 swp_pager_meta_ctl(vm_object_t object, vm_pindex_t pindex, int flags) 2059 { 2060 struct swblk *sb; 2061 daddr_t r1; 2062 2063 if ((flags & SWM_POP) != 0) 2064 VM_OBJECT_ASSERT_WLOCKED(object); 2065 else 2066 VM_OBJECT_ASSERT_LOCKED(object); 2067 2068 /* 2069 * The meta data only exists if the object is OBJT_SWAP 2070 * and even then might not be allocated yet. 2071 */ 2072 if (object->type != OBJT_SWAP) 2073 return (SWAPBLK_NONE); 2074 2075 sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks, 2076 rounddown(pindex, SWAP_META_PAGES)); 2077 if (sb == NULL) 2078 return (SWAPBLK_NONE); 2079 r1 = sb->d[pindex % SWAP_META_PAGES]; 2080 if (r1 == SWAPBLK_NONE) 2081 return (SWAPBLK_NONE); 2082 if ((flags & SWM_POP) != 0) { 2083 sb->d[pindex % SWAP_META_PAGES] = SWAPBLK_NONE; 2084 if (swp_pager_swblk_empty(sb, 0, SWAP_META_PAGES)) { 2085 SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks, 2086 rounddown(pindex, SWAP_META_PAGES)); 2087 uma_zfree(swblk_zone, sb); 2088 } 2089 } 2090 return (r1); 2091 } 2092 2093 /* 2094 * Returns the least page index which is greater than or equal to the 2095 * parameter pindex and for which there is a swap block allocated. 2096 * Returns object's size if the object's type is not swap or if there 2097 * are no allocated swap blocks for the object after the requested 2098 * pindex. 2099 */ 2100 vm_pindex_t 2101 swap_pager_find_least(vm_object_t object, vm_pindex_t pindex) 2102 { 2103 struct swblk *sb; 2104 int i; 2105 2106 VM_OBJECT_ASSERT_LOCKED(object); 2107 if (object->type != OBJT_SWAP) 2108 return (object->size); 2109 2110 sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks, 2111 rounddown(pindex, SWAP_META_PAGES)); 2112 if (sb == NULL) 2113 return (object->size); 2114 if (sb->p < pindex) { 2115 for (i = pindex % SWAP_META_PAGES; i < SWAP_META_PAGES; i++) { 2116 if (sb->d[i] != SWAPBLK_NONE) 2117 return (sb->p + i); 2118 } 2119 sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks, 2120 roundup(pindex, SWAP_META_PAGES)); 2121 if (sb == NULL) 2122 return (object->size); 2123 } 2124 for (i = 0; i < SWAP_META_PAGES; i++) { 2125 if (sb->d[i] != SWAPBLK_NONE) 2126 return (sb->p + i); 2127 } 2128 2129 /* 2130 * We get here if a swblk is present in the trie but it 2131 * doesn't map any blocks. 2132 */ 2133 MPASS(0); 2134 return (object->size); 2135 } 2136 2137 /* 2138 * System call swapon(name) enables swapping on device name, 2139 * which must be in the swdevsw. Return EBUSY 2140 * if already swapping on this device. 2141 */ 2142 #ifndef _SYS_SYSPROTO_H_ 2143 struct swapon_args { 2144 char *name; 2145 }; 2146 #endif 2147 2148 /* 2149 * MPSAFE 2150 */ 2151 /* ARGSUSED */ 2152 int 2153 sys_swapon(struct thread *td, struct swapon_args *uap) 2154 { 2155 struct vattr attr; 2156 struct vnode *vp; 2157 struct nameidata nd; 2158 int error; 2159 2160 error = priv_check(td, PRIV_SWAPON); 2161 if (error) 2162 return (error); 2163 2164 sx_xlock(&swdev_syscall_lock); 2165 2166 /* 2167 * Swap metadata may not fit in the KVM if we have physical 2168 * memory of >1GB. 2169 */ 2170 if (swblk_zone == NULL) { 2171 error = ENOMEM; 2172 goto done; 2173 } 2174 2175 NDINIT(&nd, LOOKUP, ISOPEN | FOLLOW | AUDITVNODE1, UIO_USERSPACE, 2176 uap->name, td); 2177 error = namei(&nd); 2178 if (error) 2179 goto done; 2180 2181 NDFREE(&nd, NDF_ONLY_PNBUF); 2182 vp = nd.ni_vp; 2183 2184 if (vn_isdisk(vp, &error)) { 2185 error = swapongeom(vp); 2186 } else if (vp->v_type == VREG && 2187 (vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 && 2188 (error = VOP_GETATTR(vp, &attr, td->td_ucred)) == 0) { 2189 /* 2190 * Allow direct swapping to NFS regular files in the same 2191 * way that nfs_mountroot() sets up diskless swapping. 2192 */ 2193 error = swaponvp(td, vp, attr.va_size / DEV_BSIZE); 2194 } 2195 2196 if (error) 2197 vrele(vp); 2198 done: 2199 sx_xunlock(&swdev_syscall_lock); 2200 return (error); 2201 } 2202 2203 /* 2204 * Check that the total amount of swap currently configured does not 2205 * exceed half the theoretical maximum. If it does, print a warning 2206 * message. 2207 */ 2208 static void 2209 swapon_check_swzone(void) 2210 { 2211 unsigned long maxpages, npages; 2212 2213 npages = swap_total; 2214 /* absolute maximum we can handle assuming 100% efficiency */ 2215 maxpages = uma_zone_get_max(swblk_zone) * SWAP_META_PAGES; 2216 2217 /* recommend using no more than half that amount */ 2218 if (npages > maxpages / 2) { 2219 printf("warning: total configured swap (%lu pages) " 2220 "exceeds maximum recommended amount (%lu pages).\n", 2221 npages, maxpages / 2); 2222 printf("warning: increase kern.maxswzone " 2223 "or reduce amount of swap.\n"); 2224 } 2225 } 2226 2227 static void 2228 swaponsomething(struct vnode *vp, void *id, u_long nblks, 2229 sw_strategy_t *strategy, sw_close_t *close, dev_t dev, int flags) 2230 { 2231 struct swdevt *sp, *tsp; 2232 swblk_t dvbase; 2233 u_long mblocks; 2234 2235 /* 2236 * nblks is in DEV_BSIZE'd chunks, convert to PAGE_SIZE'd chunks. 2237 * First chop nblks off to page-align it, then convert. 2238 * 2239 * sw->sw_nblks is in page-sized chunks now too. 2240 */ 2241 nblks &= ~(ctodb(1) - 1); 2242 nblks = dbtoc(nblks); 2243 2244 /* 2245 * If we go beyond this, we get overflows in the radix 2246 * tree bitmap code. 2247 */ 2248 mblocks = 0x40000000 / BLIST_META_RADIX; 2249 if (nblks > mblocks) { 2250 printf( 2251 "WARNING: reducing swap size to maximum of %luMB per unit\n", 2252 mblocks / 1024 / 1024 * PAGE_SIZE); 2253 nblks = mblocks; 2254 } 2255 2256 sp = malloc(sizeof *sp, M_VMPGDATA, M_WAITOK | M_ZERO); 2257 sp->sw_vp = vp; 2258 sp->sw_id = id; 2259 sp->sw_dev = dev; 2260 sp->sw_nblks = nblks; 2261 sp->sw_used = 0; 2262 sp->sw_strategy = strategy; 2263 sp->sw_close = close; 2264 sp->sw_flags = flags; 2265 2266 sp->sw_blist = blist_create(nblks, M_WAITOK); 2267 /* 2268 * Do not free the first two block in order to avoid overwriting 2269 * any bsd label at the front of the partition 2270 */ 2271 blist_free(sp->sw_blist, 2, nblks - 2); 2272 2273 dvbase = 0; 2274 mtx_lock(&sw_dev_mtx); 2275 TAILQ_FOREACH(tsp, &swtailq, sw_list) { 2276 if (tsp->sw_end >= dvbase) { 2277 /* 2278 * We put one uncovered page between the devices 2279 * in order to definitively prevent any cross-device 2280 * I/O requests 2281 */ 2282 dvbase = tsp->sw_end + 1; 2283 } 2284 } 2285 sp->sw_first = dvbase; 2286 sp->sw_end = dvbase + nblks; 2287 TAILQ_INSERT_TAIL(&swtailq, sp, sw_list); 2288 nswapdev++; 2289 swap_pager_avail += nblks - 2; 2290 swap_total += nblks; 2291 swapon_check_swzone(); 2292 swp_sizecheck(); 2293 mtx_unlock(&sw_dev_mtx); 2294 EVENTHANDLER_INVOKE(swapon, sp); 2295 } 2296 2297 /* 2298 * SYSCALL: swapoff(devname) 2299 * 2300 * Disable swapping on the given device. 2301 * 2302 * XXX: Badly designed system call: it should use a device index 2303 * rather than filename as specification. We keep sw_vp around 2304 * only to make this work. 2305 */ 2306 #ifndef _SYS_SYSPROTO_H_ 2307 struct swapoff_args { 2308 char *name; 2309 }; 2310 #endif 2311 2312 /* 2313 * MPSAFE 2314 */ 2315 /* ARGSUSED */ 2316 int 2317 sys_swapoff(struct thread *td, struct swapoff_args *uap) 2318 { 2319 struct vnode *vp; 2320 struct nameidata nd; 2321 struct swdevt *sp; 2322 int error; 2323 2324 error = priv_check(td, PRIV_SWAPOFF); 2325 if (error) 2326 return (error); 2327 2328 sx_xlock(&swdev_syscall_lock); 2329 2330 NDINIT(&nd, LOOKUP, FOLLOW | AUDITVNODE1, UIO_USERSPACE, uap->name, 2331 td); 2332 error = namei(&nd); 2333 if (error) 2334 goto done; 2335 NDFREE(&nd, NDF_ONLY_PNBUF); 2336 vp = nd.ni_vp; 2337 2338 mtx_lock(&sw_dev_mtx); 2339 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2340 if (sp->sw_vp == vp) 2341 break; 2342 } 2343 mtx_unlock(&sw_dev_mtx); 2344 if (sp == NULL) { 2345 error = EINVAL; 2346 goto done; 2347 } 2348 error = swapoff_one(sp, td->td_ucred); 2349 done: 2350 sx_xunlock(&swdev_syscall_lock); 2351 return (error); 2352 } 2353 2354 static int 2355 swapoff_one(struct swdevt *sp, struct ucred *cred) 2356 { 2357 u_long nblks; 2358 #ifdef MAC 2359 int error; 2360 #endif 2361 2362 sx_assert(&swdev_syscall_lock, SA_XLOCKED); 2363 #ifdef MAC 2364 (void) vn_lock(sp->sw_vp, LK_EXCLUSIVE | LK_RETRY); 2365 error = mac_system_check_swapoff(cred, sp->sw_vp); 2366 (void) VOP_UNLOCK(sp->sw_vp, 0); 2367 if (error != 0) 2368 return (error); 2369 #endif 2370 nblks = sp->sw_nblks; 2371 2372 /* 2373 * We can turn off this swap device safely only if the 2374 * available virtual memory in the system will fit the amount 2375 * of data we will have to page back in, plus an epsilon so 2376 * the system doesn't become critically low on swap space. 2377 */ 2378 if (vm_free_count() + swap_pager_avail < nblks + nswap_lowat) 2379 return (ENOMEM); 2380 2381 /* 2382 * Prevent further allocations on this device. 2383 */ 2384 mtx_lock(&sw_dev_mtx); 2385 sp->sw_flags |= SW_CLOSING; 2386 swap_pager_avail -= blist_fill(sp->sw_blist, 0, nblks); 2387 swap_total -= nblks; 2388 mtx_unlock(&sw_dev_mtx); 2389 2390 /* 2391 * Page in the contents of the device and close it. 2392 */ 2393 swap_pager_swapoff(sp); 2394 2395 sp->sw_close(curthread, sp); 2396 mtx_lock(&sw_dev_mtx); 2397 sp->sw_id = NULL; 2398 TAILQ_REMOVE(&swtailq, sp, sw_list); 2399 nswapdev--; 2400 if (nswapdev == 0) { 2401 swap_pager_full = 2; 2402 swap_pager_almost_full = 1; 2403 } 2404 if (swdevhd == sp) 2405 swdevhd = NULL; 2406 mtx_unlock(&sw_dev_mtx); 2407 blist_destroy(sp->sw_blist); 2408 free(sp, M_VMPGDATA); 2409 return (0); 2410 } 2411 2412 void 2413 swapoff_all(void) 2414 { 2415 struct swdevt *sp, *spt; 2416 const char *devname; 2417 int error; 2418 2419 sx_xlock(&swdev_syscall_lock); 2420 2421 mtx_lock(&sw_dev_mtx); 2422 TAILQ_FOREACH_SAFE(sp, &swtailq, sw_list, spt) { 2423 mtx_unlock(&sw_dev_mtx); 2424 if (vn_isdisk(sp->sw_vp, NULL)) 2425 devname = devtoname(sp->sw_vp->v_rdev); 2426 else 2427 devname = "[file]"; 2428 error = swapoff_one(sp, thread0.td_ucred); 2429 if (error != 0) { 2430 printf("Cannot remove swap device %s (error=%d), " 2431 "skipping.\n", devname, error); 2432 } else if (bootverbose) { 2433 printf("Swap device %s removed.\n", devname); 2434 } 2435 mtx_lock(&sw_dev_mtx); 2436 } 2437 mtx_unlock(&sw_dev_mtx); 2438 2439 sx_xunlock(&swdev_syscall_lock); 2440 } 2441 2442 void 2443 swap_pager_status(int *total, int *used) 2444 { 2445 struct swdevt *sp; 2446 2447 *total = 0; 2448 *used = 0; 2449 mtx_lock(&sw_dev_mtx); 2450 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2451 *total += sp->sw_nblks; 2452 *used += sp->sw_used; 2453 } 2454 mtx_unlock(&sw_dev_mtx); 2455 } 2456 2457 int 2458 swap_dev_info(int name, struct xswdev *xs, char *devname, size_t len) 2459 { 2460 struct swdevt *sp; 2461 const char *tmp_devname; 2462 int error, n; 2463 2464 n = 0; 2465 error = ENOENT; 2466 mtx_lock(&sw_dev_mtx); 2467 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2468 if (n != name) { 2469 n++; 2470 continue; 2471 } 2472 xs->xsw_version = XSWDEV_VERSION; 2473 xs->xsw_dev = sp->sw_dev; 2474 xs->xsw_flags = sp->sw_flags; 2475 xs->xsw_nblks = sp->sw_nblks; 2476 xs->xsw_used = sp->sw_used; 2477 if (devname != NULL) { 2478 if (vn_isdisk(sp->sw_vp, NULL)) 2479 tmp_devname = devtoname(sp->sw_vp->v_rdev); 2480 else 2481 tmp_devname = "[file]"; 2482 strncpy(devname, tmp_devname, len); 2483 } 2484 error = 0; 2485 break; 2486 } 2487 mtx_unlock(&sw_dev_mtx); 2488 return (error); 2489 } 2490 2491 #if defined(COMPAT_FREEBSD11) 2492 #define XSWDEV_VERSION_11 1 2493 struct xswdev11 { 2494 u_int xsw_version; 2495 uint32_t xsw_dev; 2496 int xsw_flags; 2497 int xsw_nblks; 2498 int xsw_used; 2499 }; 2500 #endif 2501 2502 #if defined(__amd64__) && defined(COMPAT_FREEBSD32) 2503 struct xswdev32 { 2504 u_int xsw_version; 2505 u_int xsw_dev1, xsw_dev2; 2506 int xsw_flags; 2507 int xsw_nblks; 2508 int xsw_used; 2509 }; 2510 #endif 2511 2512 static int 2513 sysctl_vm_swap_info(SYSCTL_HANDLER_ARGS) 2514 { 2515 struct xswdev xs; 2516 #if defined(__amd64__) && defined(COMPAT_FREEBSD32) 2517 struct xswdev32 xs32; 2518 #endif 2519 #if defined(COMPAT_FREEBSD11) 2520 struct xswdev11 xs11; 2521 #endif 2522 int error; 2523 2524 if (arg2 != 1) /* name length */ 2525 return (EINVAL); 2526 error = swap_dev_info(*(int *)arg1, &xs, NULL, 0); 2527 if (error != 0) 2528 return (error); 2529 #if defined(__amd64__) && defined(COMPAT_FREEBSD32) 2530 if (req->oldlen == sizeof(xs32)) { 2531 xs32.xsw_version = XSWDEV_VERSION; 2532 xs32.xsw_dev1 = xs.xsw_dev; 2533 xs32.xsw_dev2 = xs.xsw_dev >> 32; 2534 xs32.xsw_flags = xs.xsw_flags; 2535 xs32.xsw_nblks = xs.xsw_nblks; 2536 xs32.xsw_used = xs.xsw_used; 2537 error = SYSCTL_OUT(req, &xs32, sizeof(xs32)); 2538 return (error); 2539 } 2540 #endif 2541 #if defined(COMPAT_FREEBSD11) 2542 if (req->oldlen == sizeof(xs11)) { 2543 xs11.xsw_version = XSWDEV_VERSION_11; 2544 xs11.xsw_dev = xs.xsw_dev; /* truncation */ 2545 xs11.xsw_flags = xs.xsw_flags; 2546 xs11.xsw_nblks = xs.xsw_nblks; 2547 xs11.xsw_used = xs.xsw_used; 2548 error = SYSCTL_OUT(req, &xs11, sizeof(xs11)); 2549 return (error); 2550 } 2551 #endif 2552 error = SYSCTL_OUT(req, &xs, sizeof(xs)); 2553 return (error); 2554 } 2555 2556 SYSCTL_INT(_vm, OID_AUTO, nswapdev, CTLFLAG_RD, &nswapdev, 0, 2557 "Number of swap devices"); 2558 SYSCTL_NODE(_vm, OID_AUTO, swap_info, CTLFLAG_RD | CTLFLAG_MPSAFE, 2559 sysctl_vm_swap_info, 2560 "Swap statistics by device"); 2561 2562 /* 2563 * Count the approximate swap usage in pages for a vmspace. The 2564 * shadowed or not yet copied on write swap blocks are not accounted. 2565 * The map must be locked. 2566 */ 2567 long 2568 vmspace_swap_count(struct vmspace *vmspace) 2569 { 2570 vm_map_t map; 2571 vm_map_entry_t cur; 2572 vm_object_t object; 2573 struct swblk *sb; 2574 vm_pindex_t e, pi; 2575 long count; 2576 int i; 2577 2578 map = &vmspace->vm_map; 2579 count = 0; 2580 2581 for (cur = map->header.next; cur != &map->header; cur = cur->next) { 2582 if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) 2583 continue; 2584 object = cur->object.vm_object; 2585 if (object == NULL || object->type != OBJT_SWAP) 2586 continue; 2587 VM_OBJECT_RLOCK(object); 2588 if (object->type != OBJT_SWAP) 2589 goto unlock; 2590 pi = OFF_TO_IDX(cur->offset); 2591 e = pi + OFF_TO_IDX(cur->end - cur->start); 2592 for (;; pi = sb->p + SWAP_META_PAGES) { 2593 sb = SWAP_PCTRIE_LOOKUP_GE( 2594 &object->un_pager.swp.swp_blks, pi); 2595 if (sb == NULL || sb->p >= e) 2596 break; 2597 for (i = 0; i < SWAP_META_PAGES; i++) { 2598 if (sb->p + i < e && 2599 sb->d[i] != SWAPBLK_NONE) 2600 count++; 2601 } 2602 } 2603 unlock: 2604 VM_OBJECT_RUNLOCK(object); 2605 } 2606 return (count); 2607 } 2608 2609 /* 2610 * GEOM backend 2611 * 2612 * Swapping onto disk devices. 2613 * 2614 */ 2615 2616 static g_orphan_t swapgeom_orphan; 2617 2618 static struct g_class g_swap_class = { 2619 .name = "SWAP", 2620 .version = G_VERSION, 2621 .orphan = swapgeom_orphan, 2622 }; 2623 2624 DECLARE_GEOM_CLASS(g_swap_class, g_class); 2625 2626 2627 static void 2628 swapgeom_close_ev(void *arg, int flags) 2629 { 2630 struct g_consumer *cp; 2631 2632 cp = arg; 2633 g_access(cp, -1, -1, 0); 2634 g_detach(cp); 2635 g_destroy_consumer(cp); 2636 } 2637 2638 /* 2639 * Add a reference to the g_consumer for an inflight transaction. 2640 */ 2641 static void 2642 swapgeom_acquire(struct g_consumer *cp) 2643 { 2644 2645 mtx_assert(&sw_dev_mtx, MA_OWNED); 2646 cp->index++; 2647 } 2648 2649 /* 2650 * Remove a reference from the g_consumer. Post a close event if all 2651 * references go away, since the function might be called from the 2652 * biodone context. 2653 */ 2654 static void 2655 swapgeom_release(struct g_consumer *cp, struct swdevt *sp) 2656 { 2657 2658 mtx_assert(&sw_dev_mtx, MA_OWNED); 2659 cp->index--; 2660 if (cp->index == 0) { 2661 if (g_post_event(swapgeom_close_ev, cp, M_NOWAIT, NULL) == 0) 2662 sp->sw_id = NULL; 2663 } 2664 } 2665 2666 static void 2667 swapgeom_done(struct bio *bp2) 2668 { 2669 struct swdevt *sp; 2670 struct buf *bp; 2671 struct g_consumer *cp; 2672 2673 bp = bp2->bio_caller2; 2674 cp = bp2->bio_from; 2675 bp->b_ioflags = bp2->bio_flags; 2676 if (bp2->bio_error) 2677 bp->b_ioflags |= BIO_ERROR; 2678 bp->b_resid = bp->b_bcount - bp2->bio_completed; 2679 bp->b_error = bp2->bio_error; 2680 bp->b_caller1 = NULL; 2681 bufdone(bp); 2682 sp = bp2->bio_caller1; 2683 mtx_lock(&sw_dev_mtx); 2684 swapgeom_release(cp, sp); 2685 mtx_unlock(&sw_dev_mtx); 2686 g_destroy_bio(bp2); 2687 } 2688 2689 static void 2690 swapgeom_strategy(struct buf *bp, struct swdevt *sp) 2691 { 2692 struct bio *bio; 2693 struct g_consumer *cp; 2694 2695 mtx_lock(&sw_dev_mtx); 2696 cp = sp->sw_id; 2697 if (cp == NULL) { 2698 mtx_unlock(&sw_dev_mtx); 2699 bp->b_error = ENXIO; 2700 bp->b_ioflags |= BIO_ERROR; 2701 bufdone(bp); 2702 return; 2703 } 2704 swapgeom_acquire(cp); 2705 mtx_unlock(&sw_dev_mtx); 2706 if (bp->b_iocmd == BIO_WRITE) 2707 bio = g_new_bio(); 2708 else 2709 bio = g_alloc_bio(); 2710 if (bio == NULL) { 2711 mtx_lock(&sw_dev_mtx); 2712 swapgeom_release(cp, sp); 2713 mtx_unlock(&sw_dev_mtx); 2714 bp->b_error = ENOMEM; 2715 bp->b_ioflags |= BIO_ERROR; 2716 printf("swap_pager: cannot allocate bio\n"); 2717 bufdone(bp); 2718 return; 2719 } 2720 2721 bp->b_caller1 = bio; 2722 bio->bio_caller1 = sp; 2723 bio->bio_caller2 = bp; 2724 bio->bio_cmd = bp->b_iocmd; 2725 bio->bio_offset = (bp->b_blkno - sp->sw_first) * PAGE_SIZE; 2726 bio->bio_length = bp->b_bcount; 2727 bio->bio_done = swapgeom_done; 2728 if (!buf_mapped(bp)) { 2729 bio->bio_ma = bp->b_pages; 2730 bio->bio_data = unmapped_buf; 2731 bio->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK; 2732 bio->bio_ma_n = bp->b_npages; 2733 bio->bio_flags |= BIO_UNMAPPED; 2734 } else { 2735 bio->bio_data = bp->b_data; 2736 bio->bio_ma = NULL; 2737 } 2738 g_io_request(bio, cp); 2739 return; 2740 } 2741 2742 static void 2743 swapgeom_orphan(struct g_consumer *cp) 2744 { 2745 struct swdevt *sp; 2746 int destroy; 2747 2748 mtx_lock(&sw_dev_mtx); 2749 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2750 if (sp->sw_id == cp) { 2751 sp->sw_flags |= SW_CLOSING; 2752 break; 2753 } 2754 } 2755 /* 2756 * Drop reference we were created with. Do directly since we're in a 2757 * special context where we don't have to queue the call to 2758 * swapgeom_close_ev(). 2759 */ 2760 cp->index--; 2761 destroy = ((sp != NULL) && (cp->index == 0)); 2762 if (destroy) 2763 sp->sw_id = NULL; 2764 mtx_unlock(&sw_dev_mtx); 2765 if (destroy) 2766 swapgeom_close_ev(cp, 0); 2767 } 2768 2769 static void 2770 swapgeom_close(struct thread *td, struct swdevt *sw) 2771 { 2772 struct g_consumer *cp; 2773 2774 mtx_lock(&sw_dev_mtx); 2775 cp = sw->sw_id; 2776 sw->sw_id = NULL; 2777 mtx_unlock(&sw_dev_mtx); 2778 2779 /* 2780 * swapgeom_close() may be called from the biodone context, 2781 * where we cannot perform topology changes. Delegate the 2782 * work to the events thread. 2783 */ 2784 if (cp != NULL) 2785 g_waitfor_event(swapgeom_close_ev, cp, M_WAITOK, NULL); 2786 } 2787 2788 static int 2789 swapongeom_locked(struct cdev *dev, struct vnode *vp) 2790 { 2791 struct g_provider *pp; 2792 struct g_consumer *cp; 2793 static struct g_geom *gp; 2794 struct swdevt *sp; 2795 u_long nblks; 2796 int error; 2797 2798 pp = g_dev_getprovider(dev); 2799 if (pp == NULL) 2800 return (ENODEV); 2801 mtx_lock(&sw_dev_mtx); 2802 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2803 cp = sp->sw_id; 2804 if (cp != NULL && cp->provider == pp) { 2805 mtx_unlock(&sw_dev_mtx); 2806 return (EBUSY); 2807 } 2808 } 2809 mtx_unlock(&sw_dev_mtx); 2810 if (gp == NULL) 2811 gp = g_new_geomf(&g_swap_class, "swap"); 2812 cp = g_new_consumer(gp); 2813 cp->index = 1; /* Number of active I/Os, plus one for being active. */ 2814 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 2815 g_attach(cp, pp); 2816 /* 2817 * XXX: Every time you think you can improve the margin for 2818 * footshooting, somebody depends on the ability to do so: 2819 * savecore(8) wants to write to our swapdev so we cannot 2820 * set an exclusive count :-( 2821 */ 2822 error = g_access(cp, 1, 1, 0); 2823 if (error != 0) { 2824 g_detach(cp); 2825 g_destroy_consumer(cp); 2826 return (error); 2827 } 2828 nblks = pp->mediasize / DEV_BSIZE; 2829 swaponsomething(vp, cp, nblks, swapgeom_strategy, 2830 swapgeom_close, dev2udev(dev), 2831 (pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ? SW_UNMAPPED : 0); 2832 return (0); 2833 } 2834 2835 static int 2836 swapongeom(struct vnode *vp) 2837 { 2838 int error; 2839 2840 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2841 if (vp->v_type != VCHR || (vp->v_iflag & VI_DOOMED) != 0) { 2842 error = ENOENT; 2843 } else { 2844 g_topology_lock(); 2845 error = swapongeom_locked(vp->v_rdev, vp); 2846 g_topology_unlock(); 2847 } 2848 VOP_UNLOCK(vp, 0); 2849 return (error); 2850 } 2851 2852 /* 2853 * VNODE backend 2854 * 2855 * This is used mainly for network filesystem (read: probably only tested 2856 * with NFS) swapfiles. 2857 * 2858 */ 2859 2860 static void 2861 swapdev_strategy(struct buf *bp, struct swdevt *sp) 2862 { 2863 struct vnode *vp2; 2864 2865 bp->b_blkno = ctodb(bp->b_blkno - sp->sw_first); 2866 2867 vp2 = sp->sw_id; 2868 vhold(vp2); 2869 if (bp->b_iocmd == BIO_WRITE) { 2870 if (bp->b_bufobj) 2871 bufobj_wdrop(bp->b_bufobj); 2872 bufobj_wref(&vp2->v_bufobj); 2873 } 2874 if (bp->b_bufobj != &vp2->v_bufobj) 2875 bp->b_bufobj = &vp2->v_bufobj; 2876 bp->b_vp = vp2; 2877 bp->b_iooffset = dbtob(bp->b_blkno); 2878 bstrategy(bp); 2879 return; 2880 } 2881 2882 static void 2883 swapdev_close(struct thread *td, struct swdevt *sp) 2884 { 2885 2886 VOP_CLOSE(sp->sw_vp, FREAD | FWRITE, td->td_ucred, td); 2887 vrele(sp->sw_vp); 2888 } 2889 2890 2891 static int 2892 swaponvp(struct thread *td, struct vnode *vp, u_long nblks) 2893 { 2894 struct swdevt *sp; 2895 int error; 2896 2897 if (nblks == 0) 2898 return (ENXIO); 2899 mtx_lock(&sw_dev_mtx); 2900 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2901 if (sp->sw_id == vp) { 2902 mtx_unlock(&sw_dev_mtx); 2903 return (EBUSY); 2904 } 2905 } 2906 mtx_unlock(&sw_dev_mtx); 2907 2908 (void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2909 #ifdef MAC 2910 error = mac_system_check_swapon(td->td_ucred, vp); 2911 if (error == 0) 2912 #endif 2913 error = VOP_OPEN(vp, FREAD | FWRITE, td->td_ucred, td, NULL); 2914 (void) VOP_UNLOCK(vp, 0); 2915 if (error) 2916 return (error); 2917 2918 swaponsomething(vp, vp, nblks, swapdev_strategy, swapdev_close, 2919 NODEV, 0); 2920 return (0); 2921 } 2922 2923 static int 2924 sysctl_swap_async_max(SYSCTL_HANDLER_ARGS) 2925 { 2926 int error, new, n; 2927 2928 new = nsw_wcount_async_max; 2929 error = sysctl_handle_int(oidp, &new, 0, req); 2930 if (error != 0 || req->newptr == NULL) 2931 return (error); 2932 2933 if (new > nswbuf / 2 || new < 1) 2934 return (EINVAL); 2935 2936 mtx_lock(&swbuf_mtx); 2937 while (nsw_wcount_async_max != new) { 2938 /* 2939 * Adjust difference. If the current async count is too low, 2940 * we will need to sqeeze our update slowly in. Sleep with a 2941 * higher priority than getpbuf() to finish faster. 2942 */ 2943 n = new - nsw_wcount_async_max; 2944 if (nsw_wcount_async + n >= 0) { 2945 nsw_wcount_async += n; 2946 nsw_wcount_async_max += n; 2947 wakeup(&nsw_wcount_async); 2948 } else { 2949 nsw_wcount_async_max -= nsw_wcount_async; 2950 nsw_wcount_async = 0; 2951 msleep(&nsw_wcount_async, &swbuf_mtx, PSWP, 2952 "swpsysctl", 0); 2953 } 2954 } 2955 mtx_unlock(&swbuf_mtx); 2956 2957 return (0); 2958 } 2959