1 /*- 2 * Copyright (c) 1998 Matthew Dillon, 3 * Copyright (c) 1994 John S. Dyson 4 * Copyright (c) 1990 University of Utah. 5 * Copyright (c) 1982, 1986, 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * New Swap System 41 * Matthew Dillon 42 * 43 * Radix Bitmap 'blists'. 44 * 45 * - The new swapper uses the new radix bitmap code. This should scale 46 * to arbitrarily small or arbitrarily large swap spaces and an almost 47 * arbitrary degree of fragmentation. 48 * 49 * Features: 50 * 51 * - on the fly reallocation of swap during putpages. The new system 52 * does not try to keep previously allocated swap blocks for dirty 53 * pages. 54 * 55 * - on the fly deallocation of swap 56 * 57 * - No more garbage collection required. Unnecessarily allocated swap 58 * blocks only exist for dirty vm_page_t's now and these are already 59 * cycled (in a high-load system) by the pager. We also do on-the-fly 60 * removal of invalidated swap blocks when a page is destroyed 61 * or renamed. 62 * 63 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 64 * 65 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 66 * @(#)vm_swap.c 8.5 (Berkeley) 2/17/94 67 */ 68 69 #include <sys/cdefs.h> 70 __FBSDID("$FreeBSD$"); 71 72 #include "opt_swap.h" 73 #include "opt_vm.h" 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/conf.h> 78 #include <sys/kernel.h> 79 #include <sys/priv.h> 80 #include <sys/proc.h> 81 #include <sys/bio.h> 82 #include <sys/buf.h> 83 #include <sys/disk.h> 84 #include <sys/fcntl.h> 85 #include <sys/mount.h> 86 #include <sys/namei.h> 87 #include <sys/vnode.h> 88 #include <sys/malloc.h> 89 #include <sys/resource.h> 90 #include <sys/resourcevar.h> 91 #include <sys/sysctl.h> 92 #include <sys/sysproto.h> 93 #include <sys/blist.h> 94 #include <sys/lock.h> 95 #include <sys/sx.h> 96 #include <sys/vmmeter.h> 97 98 #include <security/mac/mac_framework.h> 99 100 #include <vm/vm.h> 101 #include <vm/pmap.h> 102 #include <vm/vm_map.h> 103 #include <vm/vm_kern.h> 104 #include <vm/vm_object.h> 105 #include <vm/vm_page.h> 106 #include <vm/vm_pager.h> 107 #include <vm/vm_pageout.h> 108 #include <vm/vm_param.h> 109 #include <vm/swap_pager.h> 110 #include <vm/vm_extern.h> 111 #include <vm/uma.h> 112 113 #include <geom/geom.h> 114 115 /* 116 * SWB_NPAGES must be a power of 2. It may be set to 1, 2, 4, 8, or 16 117 * pages per allocation. We recommend you stick with the default of 8. 118 * The 16-page limit is due to the radix code (kern/subr_blist.c). 119 */ 120 #ifndef MAX_PAGEOUT_CLUSTER 121 #define MAX_PAGEOUT_CLUSTER 16 122 #endif 123 124 #if !defined(SWB_NPAGES) 125 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER 126 #endif 127 128 /* 129 * Piecemeal swap metadata structure. Swap is stored in a radix tree. 130 * 131 * If SWB_NPAGES is 8 and sizeof(char *) == sizeof(daddr_t), our radix 132 * is basically 8. Assuming PAGE_SIZE == 4096, one tree level represents 133 * 32K worth of data, two levels represent 256K, three levels represent 134 * 2 MBytes. This is acceptable. 135 * 136 * Overall memory utilization is about the same as the old swap structure. 137 */ 138 #define SWCORRECT(n) (sizeof(void *) * (n) / sizeof(daddr_t)) 139 #define SWAP_META_PAGES (SWB_NPAGES * 2) 140 #define SWAP_META_MASK (SWAP_META_PAGES - 1) 141 142 struct swblock { 143 struct swblock *swb_hnext; 144 vm_object_t swb_object; 145 vm_pindex_t swb_index; 146 int swb_count; 147 daddr_t swb_pages[SWAP_META_PAGES]; 148 }; 149 150 static struct mtx sw_dev_mtx; 151 static TAILQ_HEAD(, swdevt) swtailq = TAILQ_HEAD_INITIALIZER(swtailq); 152 static struct swdevt *swdevhd; /* Allocate from here next */ 153 static int nswapdev; /* Number of swap devices */ 154 int swap_pager_avail; 155 static int swdev_syscall_active = 0; /* serialize swap(on|off) */ 156 157 static vm_ooffset_t swap_total; 158 SYSCTL_QUAD(_vm, OID_AUTO, swap_total, CTLFLAG_RD, &swap_total, 0, 159 "Total amount of available swap storage."); 160 static vm_ooffset_t swap_reserved; 161 SYSCTL_QUAD(_vm, OID_AUTO, swap_reserved, CTLFLAG_RD, &swap_reserved, 0, 162 "Amount of swap storage needed to back all allocated anonymous memory."); 163 static int overcommit = 0; 164 SYSCTL_INT(_vm, OID_AUTO, overcommit, CTLFLAG_RW, &overcommit, 0, 165 "Configure virtual memory overcommit behavior. See tuning(7) " 166 "for details."); 167 168 /* bits from overcommit */ 169 #define SWAP_RESERVE_FORCE_ON (1 << 0) 170 #define SWAP_RESERVE_RLIMIT_ON (1 << 1) 171 #define SWAP_RESERVE_ALLOW_NONWIRED (1 << 2) 172 173 int 174 swap_reserve(vm_ooffset_t incr) 175 { 176 177 return (swap_reserve_by_cred(incr, curthread->td_ucred)); 178 } 179 180 int 181 swap_reserve_by_cred(vm_ooffset_t incr, struct ucred *cred) 182 { 183 vm_ooffset_t r, s; 184 int res, error; 185 static int curfail; 186 static struct timeval lastfail; 187 struct uidinfo *uip; 188 189 uip = cred->cr_ruidinfo; 190 191 if (incr & PAGE_MASK) 192 panic("swap_reserve: & PAGE_MASK"); 193 194 res = 0; 195 mtx_lock(&sw_dev_mtx); 196 r = swap_reserved + incr; 197 if (overcommit & SWAP_RESERVE_ALLOW_NONWIRED) { 198 s = cnt.v_page_count - cnt.v_free_reserved - cnt.v_wire_count; 199 s *= PAGE_SIZE; 200 } else 201 s = 0; 202 s += swap_total; 203 if ((overcommit & SWAP_RESERVE_FORCE_ON) == 0 || r <= s || 204 (error = priv_check(curthread, PRIV_VM_SWAP_NOQUOTA)) == 0) { 205 res = 1; 206 swap_reserved = r; 207 } 208 mtx_unlock(&sw_dev_mtx); 209 210 if (res) { 211 PROC_LOCK(curproc); 212 UIDINFO_VMSIZE_LOCK(uip); 213 if ((overcommit & SWAP_RESERVE_RLIMIT_ON) != 0 && 214 uip->ui_vmsize + incr > lim_cur(curproc, RLIMIT_SWAP) && 215 priv_check(curthread, PRIV_VM_SWAP_NORLIMIT)) 216 res = 0; 217 else 218 uip->ui_vmsize += incr; 219 UIDINFO_VMSIZE_UNLOCK(uip); 220 PROC_UNLOCK(curproc); 221 if (!res) { 222 mtx_lock(&sw_dev_mtx); 223 swap_reserved -= incr; 224 mtx_unlock(&sw_dev_mtx); 225 } 226 } 227 if (!res && ppsratecheck(&lastfail, &curfail, 1)) { 228 printf("uid %d, pid %d: swap reservation for %jd bytes failed\n", 229 curproc->p_pid, uip->ui_uid, incr); 230 } 231 232 return (res); 233 } 234 235 void 236 swap_reserve_force(vm_ooffset_t incr) 237 { 238 struct uidinfo *uip; 239 240 mtx_lock(&sw_dev_mtx); 241 swap_reserved += incr; 242 mtx_unlock(&sw_dev_mtx); 243 244 uip = curthread->td_ucred->cr_ruidinfo; 245 PROC_LOCK(curproc); 246 UIDINFO_VMSIZE_LOCK(uip); 247 uip->ui_vmsize += incr; 248 UIDINFO_VMSIZE_UNLOCK(uip); 249 PROC_UNLOCK(curproc); 250 } 251 252 void 253 swap_release(vm_ooffset_t decr) 254 { 255 struct ucred *cred; 256 257 PROC_LOCK(curproc); 258 cred = curthread->td_ucred; 259 swap_release_by_cred(decr, cred); 260 PROC_UNLOCK(curproc); 261 } 262 263 void 264 swap_release_by_cred(vm_ooffset_t decr, struct ucred *cred) 265 { 266 struct uidinfo *uip; 267 268 uip = cred->cr_ruidinfo; 269 270 if (decr & PAGE_MASK) 271 panic("swap_release: & PAGE_MASK"); 272 273 mtx_lock(&sw_dev_mtx); 274 if (swap_reserved < decr) 275 panic("swap_reserved < decr"); 276 swap_reserved -= decr; 277 mtx_unlock(&sw_dev_mtx); 278 279 UIDINFO_VMSIZE_LOCK(uip); 280 if (uip->ui_vmsize < decr) 281 printf("negative vmsize for uid = %d\n", uip->ui_uid); 282 uip->ui_vmsize -= decr; 283 UIDINFO_VMSIZE_UNLOCK(uip); 284 } 285 286 static void swapdev_strategy(struct buf *, struct swdevt *sw); 287 288 #define SWM_FREE 0x02 /* free, period */ 289 #define SWM_POP 0x04 /* pop out */ 290 291 int swap_pager_full = 2; /* swap space exhaustion (task killing) */ 292 static int swap_pager_almost_full = 1; /* swap space exhaustion (w/hysteresis)*/ 293 static int nsw_rcount; /* free read buffers */ 294 static int nsw_wcount_sync; /* limit write buffers / synchronous */ 295 static int nsw_wcount_async; /* limit write buffers / asynchronous */ 296 static int nsw_wcount_async_max;/* assigned maximum */ 297 static int nsw_cluster_max; /* maximum VOP I/O allowed */ 298 299 static struct swblock **swhash; 300 static int swhash_mask; 301 static struct mtx swhash_mtx; 302 303 static int swap_async_max = 4; /* maximum in-progress async I/O's */ 304 static struct sx sw_alloc_sx; 305 306 307 SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 308 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 309 310 /* 311 * "named" and "unnamed" anon region objects. Try to reduce the overhead 312 * of searching a named list by hashing it just a little. 313 */ 314 315 #define NOBJLISTS 8 316 317 #define NOBJLIST(handle) \ 318 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)]) 319 320 static struct mtx sw_alloc_mtx; /* protect list manipulation */ 321 static struct pagerlst swap_pager_object_list[NOBJLISTS]; 322 static uma_zone_t swap_zone; 323 static struct vm_object swap_zone_obj; 324 325 /* 326 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 327 * calls hooked from other parts of the VM system and do not appear here. 328 * (see vm/swap_pager.h). 329 */ 330 static vm_object_t 331 swap_pager_alloc(void *handle, vm_ooffset_t size, 332 vm_prot_t prot, vm_ooffset_t offset, struct ucred *); 333 static void swap_pager_dealloc(vm_object_t object); 334 static int swap_pager_getpages(vm_object_t, vm_page_t *, int, int); 335 static void swap_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *); 336 static boolean_t 337 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after); 338 static void swap_pager_init(void); 339 static void swap_pager_unswapped(vm_page_t); 340 static void swap_pager_swapoff(struct swdevt *sp); 341 342 struct pagerops swappagerops = { 343 .pgo_init = swap_pager_init, /* early system initialization of pager */ 344 .pgo_alloc = swap_pager_alloc, /* allocate an OBJT_SWAP object */ 345 .pgo_dealloc = swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 346 .pgo_getpages = swap_pager_getpages, /* pagein */ 347 .pgo_putpages = swap_pager_putpages, /* pageout */ 348 .pgo_haspage = swap_pager_haspage, /* get backing store status for page */ 349 .pgo_pageunswapped = swap_pager_unswapped, /* remove swap related to page */ 350 }; 351 352 /* 353 * dmmax is in page-sized chunks with the new swap system. It was 354 * dev-bsized chunks in the old. dmmax is always a power of 2. 355 * 356 * swap_*() routines are externally accessible. swp_*() routines are 357 * internal. 358 */ 359 static int dmmax; 360 static int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 361 static int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 362 363 SYSCTL_INT(_vm, OID_AUTO, dmmax, 364 CTLFLAG_RD, &dmmax, 0, "Maximum size of a swap block"); 365 366 static void swp_sizecheck(void); 367 static void swp_pager_async_iodone(struct buf *bp); 368 static int swapongeom(struct thread *, struct vnode *); 369 static int swaponvp(struct thread *, struct vnode *, u_long); 370 static int swapoff_one(struct swdevt *sp, struct ucred *cred); 371 372 /* 373 * Swap bitmap functions 374 */ 375 static void swp_pager_freeswapspace(daddr_t blk, int npages); 376 static daddr_t swp_pager_getswapspace(int npages); 377 378 /* 379 * Metadata functions 380 */ 381 static struct swblock **swp_pager_hash(vm_object_t object, vm_pindex_t index); 382 static void swp_pager_meta_build(vm_object_t, vm_pindex_t, daddr_t); 383 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, daddr_t); 384 static void swp_pager_meta_free_all(vm_object_t); 385 static daddr_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int); 386 387 static void 388 swp_pager_free_nrpage(vm_page_t m) 389 { 390 391 vm_page_lock(m); 392 if (m->wire_count == 0) 393 vm_page_free(m); 394 vm_page_unlock(m); 395 } 396 397 /* 398 * SWP_SIZECHECK() - update swap_pager_full indication 399 * 400 * update the swap_pager_almost_full indication and warn when we are 401 * about to run out of swap space, using lowat/hiwat hysteresis. 402 * 403 * Clear swap_pager_full ( task killing ) indication when lowat is met. 404 * 405 * No restrictions on call 406 * This routine may not block. 407 * This routine must be called at splvm() 408 */ 409 static void 410 swp_sizecheck(void) 411 { 412 413 if (swap_pager_avail < nswap_lowat) { 414 if (swap_pager_almost_full == 0) { 415 printf("swap_pager: out of swap space\n"); 416 swap_pager_almost_full = 1; 417 } 418 } else { 419 swap_pager_full = 0; 420 if (swap_pager_avail > nswap_hiwat) 421 swap_pager_almost_full = 0; 422 } 423 } 424 425 /* 426 * SWP_PAGER_HASH() - hash swap meta data 427 * 428 * This is an helper function which hashes the swapblk given 429 * the object and page index. It returns a pointer to a pointer 430 * to the object, or a pointer to a NULL pointer if it could not 431 * find a swapblk. 432 * 433 * This routine must be called at splvm(). 434 */ 435 static struct swblock ** 436 swp_pager_hash(vm_object_t object, vm_pindex_t index) 437 { 438 struct swblock **pswap; 439 struct swblock *swap; 440 441 index &= ~(vm_pindex_t)SWAP_META_MASK; 442 pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask]; 443 while ((swap = *pswap) != NULL) { 444 if (swap->swb_object == object && 445 swap->swb_index == index 446 ) { 447 break; 448 } 449 pswap = &swap->swb_hnext; 450 } 451 return (pswap); 452 } 453 454 /* 455 * SWAP_PAGER_INIT() - initialize the swap pager! 456 * 457 * Expected to be started from system init. NOTE: This code is run 458 * before much else so be careful what you depend on. Most of the VM 459 * system has yet to be initialized at this point. 460 */ 461 static void 462 swap_pager_init(void) 463 { 464 /* 465 * Initialize object lists 466 */ 467 int i; 468 469 for (i = 0; i < NOBJLISTS; ++i) 470 TAILQ_INIT(&swap_pager_object_list[i]); 471 mtx_init(&sw_alloc_mtx, "swap_pager list", NULL, MTX_DEF); 472 mtx_init(&sw_dev_mtx, "swapdev", NULL, MTX_DEF); 473 474 /* 475 * Device Stripe, in PAGE_SIZE'd blocks 476 */ 477 dmmax = SWB_NPAGES * 2; 478 } 479 480 /* 481 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 482 * 483 * Expected to be started from pageout process once, prior to entering 484 * its main loop. 485 */ 486 void 487 swap_pager_swap_init(void) 488 { 489 int n, n2; 490 491 /* 492 * Number of in-transit swap bp operations. Don't 493 * exhaust the pbufs completely. Make sure we 494 * initialize workable values (0 will work for hysteresis 495 * but it isn't very efficient). 496 * 497 * The nsw_cluster_max is constrained by the bp->b_pages[] 498 * array (MAXPHYS/PAGE_SIZE) and our locally defined 499 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 500 * constrained by the swap device interleave stripe size. 501 * 502 * Currently we hardwire nsw_wcount_async to 4. This limit is 503 * designed to prevent other I/O from having high latencies due to 504 * our pageout I/O. The value 4 works well for one or two active swap 505 * devices but is probably a little low if you have more. Even so, 506 * a higher value would probably generate only a limited improvement 507 * with three or four active swap devices since the system does not 508 * typically have to pageout at extreme bandwidths. We will want 509 * at least 2 per swap devices, and 4 is a pretty good value if you 510 * have one NFS swap device due to the command/ack latency over NFS. 511 * So it all works out pretty well. 512 */ 513 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 514 515 mtx_lock(&pbuf_mtx); 516 nsw_rcount = (nswbuf + 1) / 2; 517 nsw_wcount_sync = (nswbuf + 3) / 4; 518 nsw_wcount_async = 4; 519 nsw_wcount_async_max = nsw_wcount_async; 520 mtx_unlock(&pbuf_mtx); 521 522 /* 523 * Initialize our zone. Right now I'm just guessing on the number 524 * we need based on the number of pages in the system. Each swblock 525 * can hold 16 pages, so this is probably overkill. This reservation 526 * is typically limited to around 32MB by default. 527 */ 528 n = cnt.v_page_count / 2; 529 if (maxswzone && n > maxswzone / sizeof(struct swblock)) 530 n = maxswzone / sizeof(struct swblock); 531 n2 = n; 532 swap_zone = uma_zcreate("SWAPMETA", sizeof(struct swblock), NULL, NULL, 533 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM); 534 if (swap_zone == NULL) 535 panic("failed to create swap_zone."); 536 do { 537 if (uma_zone_set_obj(swap_zone, &swap_zone_obj, n)) 538 break; 539 /* 540 * if the allocation failed, try a zone two thirds the 541 * size of the previous attempt. 542 */ 543 n -= ((n + 2) / 3); 544 } while (n > 0); 545 if (n2 != n) 546 printf("Swap zone entries reduced from %d to %d.\n", n2, n); 547 n2 = n; 548 549 /* 550 * Initialize our meta-data hash table. The swapper does not need to 551 * be quite as efficient as the VM system, so we do not use an 552 * oversized hash table. 553 * 554 * n: size of hash table, must be power of 2 555 * swhash_mask: hash table index mask 556 */ 557 for (n = 1; n < n2 / 8; n *= 2) 558 ; 559 swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK | M_ZERO); 560 swhash_mask = n - 1; 561 mtx_init(&swhash_mtx, "swap_pager swhash", NULL, MTX_DEF); 562 } 563 564 /* 565 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 566 * its metadata structures. 567 * 568 * This routine is called from the mmap and fork code to create a new 569 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 570 * and then converting it with swp_pager_meta_build(). 571 * 572 * This routine may block in vm_object_allocate() and create a named 573 * object lookup race, so we must interlock. We must also run at 574 * splvm() for the object lookup to handle races with interrupts, but 575 * we do not have to maintain splvm() in between the lookup and the 576 * add because (I believe) it is not possible to attempt to create 577 * a new swap object w/handle when a default object with that handle 578 * already exists. 579 * 580 * MPSAFE 581 */ 582 static vm_object_t 583 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 584 vm_ooffset_t offset, struct ucred *cred) 585 { 586 vm_object_t object; 587 vm_pindex_t pindex; 588 589 pindex = OFF_TO_IDX(offset + PAGE_MASK + size); 590 if (handle) { 591 mtx_lock(&Giant); 592 /* 593 * Reference existing named region or allocate new one. There 594 * should not be a race here against swp_pager_meta_build() 595 * as called from vm_page_remove() in regards to the lookup 596 * of the handle. 597 */ 598 sx_xlock(&sw_alloc_sx); 599 object = vm_pager_object_lookup(NOBJLIST(handle), handle); 600 if (object == NULL) { 601 if (cred != NULL) { 602 if (!swap_reserve_by_cred(size, cred)) { 603 sx_xunlock(&sw_alloc_sx); 604 mtx_unlock(&Giant); 605 return (NULL); 606 } 607 crhold(cred); 608 } 609 object = vm_object_allocate(OBJT_DEFAULT, pindex); 610 VM_OBJECT_LOCK(object); 611 object->handle = handle; 612 if (cred != NULL) { 613 object->cred = cred; 614 object->charge = size; 615 } 616 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 617 VM_OBJECT_UNLOCK(object); 618 } 619 sx_xunlock(&sw_alloc_sx); 620 mtx_unlock(&Giant); 621 } else { 622 if (cred != NULL) { 623 if (!swap_reserve_by_cred(size, cred)) 624 return (NULL); 625 crhold(cred); 626 } 627 object = vm_object_allocate(OBJT_DEFAULT, pindex); 628 VM_OBJECT_LOCK(object); 629 if (cred != NULL) { 630 object->cred = cred; 631 object->charge = size; 632 } 633 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 634 VM_OBJECT_UNLOCK(object); 635 } 636 return (object); 637 } 638 639 /* 640 * SWAP_PAGER_DEALLOC() - remove swap metadata from object 641 * 642 * The swap backing for the object is destroyed. The code is 643 * designed such that we can reinstantiate it later, but this 644 * routine is typically called only when the entire object is 645 * about to be destroyed. 646 * 647 * This routine may block, but no longer does. 648 * 649 * The object must be locked or unreferenceable. 650 */ 651 static void 652 swap_pager_dealloc(vm_object_t object) 653 { 654 655 /* 656 * Remove from list right away so lookups will fail if we block for 657 * pageout completion. 658 */ 659 if (object->handle != NULL) { 660 mtx_lock(&sw_alloc_mtx); 661 TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list); 662 mtx_unlock(&sw_alloc_mtx); 663 } 664 665 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 666 vm_object_pip_wait(object, "swpdea"); 667 668 /* 669 * Free all remaining metadata. We only bother to free it from 670 * the swap meta data. We do not attempt to free swapblk's still 671 * associated with vm_page_t's for this object. We do not care 672 * if paging is still in progress on some objects. 673 */ 674 swp_pager_meta_free_all(object); 675 } 676 677 /************************************************************************ 678 * SWAP PAGER BITMAP ROUTINES * 679 ************************************************************************/ 680 681 /* 682 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 683 * 684 * Allocate swap for the requested number of pages. The starting 685 * swap block number (a page index) is returned or SWAPBLK_NONE 686 * if the allocation failed. 687 * 688 * Also has the side effect of advising that somebody made a mistake 689 * when they configured swap and didn't configure enough. 690 * 691 * Must be called at splvm() to avoid races with bitmap frees from 692 * vm_page_remove() aka swap_pager_page_removed(). 693 * 694 * This routine may not block 695 * This routine must be called at splvm(). 696 * 697 * We allocate in round-robin fashion from the configured devices. 698 */ 699 static daddr_t 700 swp_pager_getswapspace(int npages) 701 { 702 daddr_t blk; 703 struct swdevt *sp; 704 int i; 705 706 blk = SWAPBLK_NONE; 707 mtx_lock(&sw_dev_mtx); 708 sp = swdevhd; 709 for (i = 0; i < nswapdev; i++) { 710 if (sp == NULL) 711 sp = TAILQ_FIRST(&swtailq); 712 if (!(sp->sw_flags & SW_CLOSING)) { 713 blk = blist_alloc(sp->sw_blist, npages); 714 if (blk != SWAPBLK_NONE) { 715 blk += sp->sw_first; 716 sp->sw_used += npages; 717 swap_pager_avail -= npages; 718 swp_sizecheck(); 719 swdevhd = TAILQ_NEXT(sp, sw_list); 720 goto done; 721 } 722 } 723 sp = TAILQ_NEXT(sp, sw_list); 724 } 725 if (swap_pager_full != 2) { 726 printf("swap_pager_getswapspace(%d): failed\n", npages); 727 swap_pager_full = 2; 728 swap_pager_almost_full = 1; 729 } 730 swdevhd = NULL; 731 done: 732 mtx_unlock(&sw_dev_mtx); 733 return (blk); 734 } 735 736 static int 737 swp_pager_isondev(daddr_t blk, struct swdevt *sp) 738 { 739 740 return (blk >= sp->sw_first && blk < sp->sw_end); 741 } 742 743 static void 744 swp_pager_strategy(struct buf *bp) 745 { 746 struct swdevt *sp; 747 748 mtx_lock(&sw_dev_mtx); 749 TAILQ_FOREACH(sp, &swtailq, sw_list) { 750 if (bp->b_blkno >= sp->sw_first && bp->b_blkno < sp->sw_end) { 751 mtx_unlock(&sw_dev_mtx); 752 sp->sw_strategy(bp, sp); 753 return; 754 } 755 } 756 panic("Swapdev not found"); 757 } 758 759 760 /* 761 * SWP_PAGER_FREESWAPSPACE() - free raw swap space 762 * 763 * This routine returns the specified swap blocks back to the bitmap. 764 * 765 * Note: This routine may not block (it could in the old swap code), 766 * and through the use of the new blist routines it does not block. 767 * 768 * We must be called at splvm() to avoid races with bitmap frees from 769 * vm_page_remove() aka swap_pager_page_removed(). 770 * 771 * This routine may not block 772 * This routine must be called at splvm(). 773 */ 774 static void 775 swp_pager_freeswapspace(daddr_t blk, int npages) 776 { 777 struct swdevt *sp; 778 779 mtx_lock(&sw_dev_mtx); 780 TAILQ_FOREACH(sp, &swtailq, sw_list) { 781 if (blk >= sp->sw_first && blk < sp->sw_end) { 782 sp->sw_used -= npages; 783 /* 784 * If we are attempting to stop swapping on 785 * this device, we don't want to mark any 786 * blocks free lest they be reused. 787 */ 788 if ((sp->sw_flags & SW_CLOSING) == 0) { 789 blist_free(sp->sw_blist, blk - sp->sw_first, 790 npages); 791 swap_pager_avail += npages; 792 swp_sizecheck(); 793 } 794 mtx_unlock(&sw_dev_mtx); 795 return; 796 } 797 } 798 panic("Swapdev not found"); 799 } 800 801 /* 802 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 803 * range within an object. 804 * 805 * This is a globally accessible routine. 806 * 807 * This routine removes swapblk assignments from swap metadata. 808 * 809 * The external callers of this routine typically have already destroyed 810 * or renamed vm_page_t's associated with this range in the object so 811 * we should be ok. 812 * 813 * This routine may be called at any spl. We up our spl to splvm temporarily 814 * in order to perform the metadata removal. 815 */ 816 void 817 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size) 818 { 819 820 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 821 swp_pager_meta_free(object, start, size); 822 } 823 824 /* 825 * SWAP_PAGER_RESERVE() - reserve swap blocks in object 826 * 827 * Assigns swap blocks to the specified range within the object. The 828 * swap blocks are not zerod. Any previous swap assignment is destroyed. 829 * 830 * Returns 0 on success, -1 on failure. 831 */ 832 int 833 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 834 { 835 int n = 0; 836 daddr_t blk = SWAPBLK_NONE; 837 vm_pindex_t beg = start; /* save start index */ 838 839 VM_OBJECT_LOCK(object); 840 while (size) { 841 if (n == 0) { 842 n = BLIST_MAX_ALLOC; 843 while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) { 844 n >>= 1; 845 if (n == 0) { 846 swp_pager_meta_free(object, beg, start - beg); 847 VM_OBJECT_UNLOCK(object); 848 return (-1); 849 } 850 } 851 } 852 swp_pager_meta_build(object, start, blk); 853 --size; 854 ++start; 855 ++blk; 856 --n; 857 } 858 swp_pager_meta_free(object, start, n); 859 VM_OBJECT_UNLOCK(object); 860 return (0); 861 } 862 863 /* 864 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 865 * and destroy the source. 866 * 867 * Copy any valid swapblks from the source to the destination. In 868 * cases where both the source and destination have a valid swapblk, 869 * we keep the destination's. 870 * 871 * This routine is allowed to block. It may block allocating metadata 872 * indirectly through swp_pager_meta_build() or if paging is still in 873 * progress on the source. 874 * 875 * This routine can be called at any spl 876 * 877 * XXX vm_page_collapse() kinda expects us not to block because we 878 * supposedly do not need to allocate memory, but for the moment we 879 * *may* have to get a little memory from the zone allocator, but 880 * it is taken from the interrupt memory. We should be ok. 881 * 882 * The source object contains no vm_page_t's (which is just as well) 883 * 884 * The source object is of type OBJT_SWAP. 885 * 886 * The source and destination objects must be locked or 887 * inaccessible (XXX are they ?) 888 */ 889 void 890 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject, 891 vm_pindex_t offset, int destroysource) 892 { 893 vm_pindex_t i; 894 895 VM_OBJECT_LOCK_ASSERT(srcobject, MA_OWNED); 896 VM_OBJECT_LOCK_ASSERT(dstobject, MA_OWNED); 897 898 /* 899 * If destroysource is set, we remove the source object from the 900 * swap_pager internal queue now. 901 */ 902 if (destroysource) { 903 if (srcobject->handle != NULL) { 904 mtx_lock(&sw_alloc_mtx); 905 TAILQ_REMOVE( 906 NOBJLIST(srcobject->handle), 907 srcobject, 908 pager_object_list 909 ); 910 mtx_unlock(&sw_alloc_mtx); 911 } 912 } 913 914 /* 915 * transfer source to destination. 916 */ 917 for (i = 0; i < dstobject->size; ++i) { 918 daddr_t dstaddr; 919 920 /* 921 * Locate (without changing) the swapblk on the destination, 922 * unless it is invalid in which case free it silently, or 923 * if the destination is a resident page, in which case the 924 * source is thrown away. 925 */ 926 dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 927 928 if (dstaddr == SWAPBLK_NONE) { 929 /* 930 * Destination has no swapblk and is not resident, 931 * copy source. 932 */ 933 daddr_t srcaddr; 934 935 srcaddr = swp_pager_meta_ctl( 936 srcobject, 937 i + offset, 938 SWM_POP 939 ); 940 941 if (srcaddr != SWAPBLK_NONE) { 942 /* 943 * swp_pager_meta_build() can sleep. 944 */ 945 vm_object_pip_add(srcobject, 1); 946 VM_OBJECT_UNLOCK(srcobject); 947 vm_object_pip_add(dstobject, 1); 948 swp_pager_meta_build(dstobject, i, srcaddr); 949 vm_object_pip_wakeup(dstobject); 950 VM_OBJECT_LOCK(srcobject); 951 vm_object_pip_wakeup(srcobject); 952 } 953 } else { 954 /* 955 * Destination has valid swapblk or it is represented 956 * by a resident page. We destroy the sourceblock. 957 */ 958 959 swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE); 960 } 961 } 962 963 /* 964 * Free left over swap blocks in source. 965 * 966 * We have to revert the type to OBJT_DEFAULT so we do not accidently 967 * double-remove the object from the swap queues. 968 */ 969 if (destroysource) { 970 swp_pager_meta_free_all(srcobject); 971 /* 972 * Reverting the type is not necessary, the caller is going 973 * to destroy srcobject directly, but I'm doing it here 974 * for consistency since we've removed the object from its 975 * queues. 976 */ 977 srcobject->type = OBJT_DEFAULT; 978 } 979 } 980 981 /* 982 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 983 * the requested page. 984 * 985 * We determine whether good backing store exists for the requested 986 * page and return TRUE if it does, FALSE if it doesn't. 987 * 988 * If TRUE, we also try to determine how much valid, contiguous backing 989 * store exists before and after the requested page within a reasonable 990 * distance. We do not try to restrict it to the swap device stripe 991 * (that is handled in getpages/putpages). It probably isn't worth 992 * doing here. 993 */ 994 static boolean_t 995 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after) 996 { 997 daddr_t blk0; 998 999 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1000 /* 1001 * do we have good backing store at the requested index ? 1002 */ 1003 blk0 = swp_pager_meta_ctl(object, pindex, 0); 1004 1005 if (blk0 == SWAPBLK_NONE) { 1006 if (before) 1007 *before = 0; 1008 if (after) 1009 *after = 0; 1010 return (FALSE); 1011 } 1012 1013 /* 1014 * find backwards-looking contiguous good backing store 1015 */ 1016 if (before != NULL) { 1017 int i; 1018 1019 for (i = 1; i < (SWB_NPAGES/2); ++i) { 1020 daddr_t blk; 1021 1022 if (i > pindex) 1023 break; 1024 blk = swp_pager_meta_ctl(object, pindex - i, 0); 1025 if (blk != blk0 - i) 1026 break; 1027 } 1028 *before = (i - 1); 1029 } 1030 1031 /* 1032 * find forward-looking contiguous good backing store 1033 */ 1034 if (after != NULL) { 1035 int i; 1036 1037 for (i = 1; i < (SWB_NPAGES/2); ++i) { 1038 daddr_t blk; 1039 1040 blk = swp_pager_meta_ctl(object, pindex + i, 0); 1041 if (blk != blk0 + i) 1042 break; 1043 } 1044 *after = (i - 1); 1045 } 1046 return (TRUE); 1047 } 1048 1049 /* 1050 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 1051 * 1052 * This removes any associated swap backing store, whether valid or 1053 * not, from the page. 1054 * 1055 * This routine is typically called when a page is made dirty, at 1056 * which point any associated swap can be freed. MADV_FREE also 1057 * calls us in a special-case situation 1058 * 1059 * NOTE!!! If the page is clean and the swap was valid, the caller 1060 * should make the page dirty before calling this routine. This routine 1061 * does NOT change the m->dirty status of the page. Also: MADV_FREE 1062 * depends on it. 1063 * 1064 * This routine may not block 1065 * This routine must be called at splvm() 1066 */ 1067 static void 1068 swap_pager_unswapped(vm_page_t m) 1069 { 1070 1071 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1072 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 1073 } 1074 1075 /* 1076 * SWAP_PAGER_GETPAGES() - bring pages in from swap 1077 * 1078 * Attempt to retrieve (m, count) pages from backing store, but make 1079 * sure we retrieve at least m[reqpage]. We try to load in as large 1080 * a chunk surrounding m[reqpage] as is contiguous in swap and which 1081 * belongs to the same object. 1082 * 1083 * The code is designed for asynchronous operation and 1084 * immediate-notification of 'reqpage' but tends not to be 1085 * used that way. Please do not optimize-out this algorithmic 1086 * feature, I intend to improve on it in the future. 1087 * 1088 * The parent has a single vm_object_pip_add() reference prior to 1089 * calling us and we should return with the same. 1090 * 1091 * The parent has BUSY'd the pages. We should return with 'm' 1092 * left busy, but the others adjusted. 1093 */ 1094 static int 1095 swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage) 1096 { 1097 struct buf *bp; 1098 vm_page_t mreq; 1099 int i; 1100 int j; 1101 daddr_t blk; 1102 1103 mreq = m[reqpage]; 1104 1105 KASSERT(mreq->object == object, 1106 ("swap_pager_getpages: object mismatch %p/%p", 1107 object, mreq->object)); 1108 1109 /* 1110 * Calculate range to retrieve. The pages have already been assigned 1111 * their swapblks. We require a *contiguous* range but we know it to 1112 * not span devices. If we do not supply it, bad things 1113 * happen. Note that blk, iblk & jblk can be SWAPBLK_NONE, but the 1114 * loops are set up such that the case(s) are handled implicitly. 1115 * 1116 * The swp_*() calls must be made with the object locked. 1117 */ 1118 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 1119 1120 for (i = reqpage - 1; i >= 0; --i) { 1121 daddr_t iblk; 1122 1123 iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0); 1124 if (blk != iblk + (reqpage - i)) 1125 break; 1126 } 1127 ++i; 1128 1129 for (j = reqpage + 1; j < count; ++j) { 1130 daddr_t jblk; 1131 1132 jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0); 1133 if (blk != jblk - (j - reqpage)) 1134 break; 1135 } 1136 1137 /* 1138 * free pages outside our collection range. Note: we never free 1139 * mreq, it must remain busy throughout. 1140 */ 1141 if (0 < i || j < count) { 1142 int k; 1143 1144 for (k = 0; k < i; ++k) 1145 swp_pager_free_nrpage(m[k]); 1146 for (k = j; k < count; ++k) 1147 swp_pager_free_nrpage(m[k]); 1148 } 1149 1150 /* 1151 * Return VM_PAGER_FAIL if we have nothing to do. Return mreq 1152 * still busy, but the others unbusied. 1153 */ 1154 if (blk == SWAPBLK_NONE) 1155 return (VM_PAGER_FAIL); 1156 1157 /* 1158 * Getpbuf() can sleep. 1159 */ 1160 VM_OBJECT_UNLOCK(object); 1161 /* 1162 * Get a swap buffer header to perform the IO 1163 */ 1164 bp = getpbuf(&nsw_rcount); 1165 bp->b_flags |= B_PAGING; 1166 1167 /* 1168 * map our page(s) into kva for input 1169 */ 1170 pmap_qenter((vm_offset_t)bp->b_data, m + i, j - i); 1171 1172 bp->b_iocmd = BIO_READ; 1173 bp->b_iodone = swp_pager_async_iodone; 1174 bp->b_rcred = crhold(thread0.td_ucred); 1175 bp->b_wcred = crhold(thread0.td_ucred); 1176 bp->b_blkno = blk - (reqpage - i); 1177 bp->b_bcount = PAGE_SIZE * (j - i); 1178 bp->b_bufsize = PAGE_SIZE * (j - i); 1179 bp->b_pager.pg_reqpage = reqpage - i; 1180 1181 VM_OBJECT_LOCK(object); 1182 { 1183 int k; 1184 1185 for (k = i; k < j; ++k) { 1186 bp->b_pages[k - i] = m[k]; 1187 m[k]->oflags |= VPO_SWAPINPROG; 1188 } 1189 } 1190 bp->b_npages = j - i; 1191 1192 PCPU_INC(cnt.v_swapin); 1193 PCPU_ADD(cnt.v_swappgsin, bp->b_npages); 1194 1195 /* 1196 * We still hold the lock on mreq, and our automatic completion routine 1197 * does not remove it. 1198 */ 1199 vm_object_pip_add(object, bp->b_npages); 1200 VM_OBJECT_UNLOCK(object); 1201 1202 /* 1203 * perform the I/O. NOTE!!! bp cannot be considered valid after 1204 * this point because we automatically release it on completion. 1205 * Instead, we look at the one page we are interested in which we 1206 * still hold a lock on even through the I/O completion. 1207 * 1208 * The other pages in our m[] array are also released on completion, 1209 * so we cannot assume they are valid anymore either. 1210 * 1211 * NOTE: b_blkno is destroyed by the call to swapdev_strategy 1212 */ 1213 BUF_KERNPROC(bp); 1214 swp_pager_strategy(bp); 1215 1216 /* 1217 * wait for the page we want to complete. VPO_SWAPINPROG is always 1218 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 1219 * is set in the meta-data. 1220 */ 1221 VM_OBJECT_LOCK(object); 1222 while ((mreq->oflags & VPO_SWAPINPROG) != 0) { 1223 mreq->oflags |= VPO_WANTED; 1224 PCPU_INC(cnt.v_intrans); 1225 if (msleep(mreq, VM_OBJECT_MTX(object), PSWP, "swread", hz*20)) { 1226 printf( 1227 "swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n", 1228 bp->b_bufobj, (intmax_t)bp->b_blkno, bp->b_bcount); 1229 } 1230 } 1231 1232 /* 1233 * mreq is left busied after completion, but all the other pages 1234 * are freed. If we had an unrecoverable read error the page will 1235 * not be valid. 1236 */ 1237 if (mreq->valid != VM_PAGE_BITS_ALL) { 1238 return (VM_PAGER_ERROR); 1239 } else { 1240 return (VM_PAGER_OK); 1241 } 1242 1243 /* 1244 * A final note: in a low swap situation, we cannot deallocate swap 1245 * and mark a page dirty here because the caller is likely to mark 1246 * the page clean when we return, causing the page to possibly revert 1247 * to all-zero's later. 1248 */ 1249 } 1250 1251 /* 1252 * swap_pager_putpages: 1253 * 1254 * Assign swap (if necessary) and initiate I/O on the specified pages. 1255 * 1256 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 1257 * are automatically converted to SWAP objects. 1258 * 1259 * In a low memory situation we may block in VOP_STRATEGY(), but the new 1260 * vm_page reservation system coupled with properly written VFS devices 1261 * should ensure that no low-memory deadlock occurs. This is an area 1262 * which needs work. 1263 * 1264 * The parent has N vm_object_pip_add() references prior to 1265 * calling us and will remove references for rtvals[] that are 1266 * not set to VM_PAGER_PEND. We need to remove the rest on I/O 1267 * completion. 1268 * 1269 * The parent has soft-busy'd the pages it passes us and will unbusy 1270 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 1271 * We need to unbusy the rest on I/O completion. 1272 */ 1273 void 1274 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count, 1275 boolean_t sync, int *rtvals) 1276 { 1277 int i; 1278 int n = 0; 1279 1280 if (count && m[0]->object != object) { 1281 panic("swap_pager_putpages: object mismatch %p/%p", 1282 object, 1283 m[0]->object 1284 ); 1285 } 1286 1287 /* 1288 * Step 1 1289 * 1290 * Turn object into OBJT_SWAP 1291 * check for bogus sysops 1292 * force sync if not pageout process 1293 */ 1294 if (object->type != OBJT_SWAP) 1295 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 1296 VM_OBJECT_UNLOCK(object); 1297 1298 if (curproc != pageproc) 1299 sync = TRUE; 1300 1301 /* 1302 * Step 2 1303 * 1304 * Update nsw parameters from swap_async_max sysctl values. 1305 * Do not let the sysop crash the machine with bogus numbers. 1306 */ 1307 mtx_lock(&pbuf_mtx); 1308 if (swap_async_max != nsw_wcount_async_max) { 1309 int n; 1310 1311 /* 1312 * limit range 1313 */ 1314 if ((n = swap_async_max) > nswbuf / 2) 1315 n = nswbuf / 2; 1316 if (n < 1) 1317 n = 1; 1318 swap_async_max = n; 1319 1320 /* 1321 * Adjust difference ( if possible ). If the current async 1322 * count is too low, we may not be able to make the adjustment 1323 * at this time. 1324 */ 1325 n -= nsw_wcount_async_max; 1326 if (nsw_wcount_async + n >= 0) { 1327 nsw_wcount_async += n; 1328 nsw_wcount_async_max += n; 1329 wakeup(&nsw_wcount_async); 1330 } 1331 } 1332 mtx_unlock(&pbuf_mtx); 1333 1334 /* 1335 * Step 3 1336 * 1337 * Assign swap blocks and issue I/O. We reallocate swap on the fly. 1338 * The page is left dirty until the pageout operation completes 1339 * successfully. 1340 */ 1341 for (i = 0; i < count; i += n) { 1342 int j; 1343 struct buf *bp; 1344 daddr_t blk; 1345 1346 /* 1347 * Maximum I/O size is limited by a number of factors. 1348 */ 1349 n = min(BLIST_MAX_ALLOC, count - i); 1350 n = min(n, nsw_cluster_max); 1351 1352 /* 1353 * Get biggest block of swap we can. If we fail, fall 1354 * back and try to allocate a smaller block. Don't go 1355 * overboard trying to allocate space if it would overly 1356 * fragment swap. 1357 */ 1358 while ( 1359 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE && 1360 n > 4 1361 ) { 1362 n >>= 1; 1363 } 1364 if (blk == SWAPBLK_NONE) { 1365 for (j = 0; j < n; ++j) 1366 rtvals[i+j] = VM_PAGER_FAIL; 1367 continue; 1368 } 1369 1370 /* 1371 * All I/O parameters have been satisfied, build the I/O 1372 * request and assign the swap space. 1373 */ 1374 if (sync == TRUE) { 1375 bp = getpbuf(&nsw_wcount_sync); 1376 } else { 1377 bp = getpbuf(&nsw_wcount_async); 1378 bp->b_flags = B_ASYNC; 1379 } 1380 bp->b_flags |= B_PAGING; 1381 bp->b_iocmd = BIO_WRITE; 1382 1383 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n); 1384 1385 bp->b_rcred = crhold(thread0.td_ucred); 1386 bp->b_wcred = crhold(thread0.td_ucred); 1387 bp->b_bcount = PAGE_SIZE * n; 1388 bp->b_bufsize = PAGE_SIZE * n; 1389 bp->b_blkno = blk; 1390 1391 VM_OBJECT_LOCK(object); 1392 for (j = 0; j < n; ++j) { 1393 vm_page_t mreq = m[i+j]; 1394 1395 swp_pager_meta_build( 1396 mreq->object, 1397 mreq->pindex, 1398 blk + j 1399 ); 1400 vm_page_dirty(mreq); 1401 rtvals[i+j] = VM_PAGER_OK; 1402 1403 mreq->oflags |= VPO_SWAPINPROG; 1404 bp->b_pages[j] = mreq; 1405 } 1406 VM_OBJECT_UNLOCK(object); 1407 bp->b_npages = n; 1408 /* 1409 * Must set dirty range for NFS to work. 1410 */ 1411 bp->b_dirtyoff = 0; 1412 bp->b_dirtyend = bp->b_bcount; 1413 1414 PCPU_INC(cnt.v_swapout); 1415 PCPU_ADD(cnt.v_swappgsout, bp->b_npages); 1416 1417 /* 1418 * asynchronous 1419 * 1420 * NOTE: b_blkno is destroyed by the call to swapdev_strategy 1421 */ 1422 if (sync == FALSE) { 1423 bp->b_iodone = swp_pager_async_iodone; 1424 BUF_KERNPROC(bp); 1425 swp_pager_strategy(bp); 1426 1427 for (j = 0; j < n; ++j) 1428 rtvals[i+j] = VM_PAGER_PEND; 1429 /* restart outter loop */ 1430 continue; 1431 } 1432 1433 /* 1434 * synchronous 1435 * 1436 * NOTE: b_blkno is destroyed by the call to swapdev_strategy 1437 */ 1438 bp->b_iodone = bdone; 1439 swp_pager_strategy(bp); 1440 1441 /* 1442 * Wait for the sync I/O to complete, then update rtvals. 1443 * We just set the rtvals[] to VM_PAGER_PEND so we can call 1444 * our async completion routine at the end, thus avoiding a 1445 * double-free. 1446 */ 1447 bwait(bp, PVM, "swwrt"); 1448 for (j = 0; j < n; ++j) 1449 rtvals[i+j] = VM_PAGER_PEND; 1450 /* 1451 * Now that we are through with the bp, we can call the 1452 * normal async completion, which frees everything up. 1453 */ 1454 swp_pager_async_iodone(bp); 1455 } 1456 VM_OBJECT_LOCK(object); 1457 } 1458 1459 /* 1460 * swp_pager_async_iodone: 1461 * 1462 * Completion routine for asynchronous reads and writes from/to swap. 1463 * Also called manually by synchronous code to finish up a bp. 1464 * 1465 * For READ operations, the pages are VPO_BUSY'd. For WRITE operations, 1466 * the pages are vm_page_t->busy'd. For READ operations, we VPO_BUSY 1467 * unbusy all pages except the 'main' request page. For WRITE 1468 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 1469 * because we marked them all VM_PAGER_PEND on return from putpages ). 1470 * 1471 * This routine may not block. 1472 */ 1473 static void 1474 swp_pager_async_iodone(struct buf *bp) 1475 { 1476 int i; 1477 vm_object_t object = NULL; 1478 1479 /* 1480 * report error 1481 */ 1482 if (bp->b_ioflags & BIO_ERROR) { 1483 printf( 1484 "swap_pager: I/O error - %s failed; blkno %ld," 1485 "size %ld, error %d\n", 1486 ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"), 1487 (long)bp->b_blkno, 1488 (long)bp->b_bcount, 1489 bp->b_error 1490 ); 1491 } 1492 1493 /* 1494 * remove the mapping for kernel virtual 1495 */ 1496 pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); 1497 1498 if (bp->b_npages) { 1499 object = bp->b_pages[0]->object; 1500 VM_OBJECT_LOCK(object); 1501 } 1502 1503 /* 1504 * cleanup pages. If an error occurs writing to swap, we are in 1505 * very serious trouble. If it happens to be a disk error, though, 1506 * we may be able to recover by reassigning the swap later on. So 1507 * in this case we remove the m->swapblk assignment for the page 1508 * but do not free it in the rlist. The errornous block(s) are thus 1509 * never reallocated as swap. Redirty the page and continue. 1510 */ 1511 for (i = 0; i < bp->b_npages; ++i) { 1512 vm_page_t m = bp->b_pages[i]; 1513 1514 m->oflags &= ~VPO_SWAPINPROG; 1515 1516 if (bp->b_ioflags & BIO_ERROR) { 1517 /* 1518 * If an error occurs I'd love to throw the swapblk 1519 * away without freeing it back to swapspace, so it 1520 * can never be used again. But I can't from an 1521 * interrupt. 1522 */ 1523 if (bp->b_iocmd == BIO_READ) { 1524 /* 1525 * When reading, reqpage needs to stay 1526 * locked for the parent, but all other 1527 * pages can be freed. We still want to 1528 * wakeup the parent waiting on the page, 1529 * though. ( also: pg_reqpage can be -1 and 1530 * not match anything ). 1531 * 1532 * We have to wake specifically requested pages 1533 * up too because we cleared VPO_SWAPINPROG and 1534 * someone may be waiting for that. 1535 * 1536 * NOTE: for reads, m->dirty will probably 1537 * be overridden by the original caller of 1538 * getpages so don't play cute tricks here. 1539 */ 1540 m->valid = 0; 1541 if (i != bp->b_pager.pg_reqpage) 1542 swp_pager_free_nrpage(m); 1543 else 1544 vm_page_flash(m); 1545 /* 1546 * If i == bp->b_pager.pg_reqpage, do not wake 1547 * the page up. The caller needs to. 1548 */ 1549 } else { 1550 /* 1551 * If a write error occurs, reactivate page 1552 * so it doesn't clog the inactive list, 1553 * then finish the I/O. 1554 */ 1555 vm_page_dirty(m); 1556 vm_page_lock(m); 1557 vm_page_activate(m); 1558 vm_page_unlock(m); 1559 vm_page_io_finish(m); 1560 } 1561 } else if (bp->b_iocmd == BIO_READ) { 1562 /* 1563 * NOTE: for reads, m->dirty will probably be 1564 * overridden by the original caller of getpages so 1565 * we cannot set them in order to free the underlying 1566 * swap in a low-swap situation. I don't think we'd 1567 * want to do that anyway, but it was an optimization 1568 * that existed in the old swapper for a time before 1569 * it got ripped out due to precisely this problem. 1570 * 1571 * If not the requested page then deactivate it. 1572 * 1573 * Note that the requested page, reqpage, is left 1574 * busied, but we still have to wake it up. The 1575 * other pages are released (unbusied) by 1576 * vm_page_wakeup(). 1577 */ 1578 KASSERT(!pmap_page_is_mapped(m), 1579 ("swp_pager_async_iodone: page %p is mapped", m)); 1580 m->valid = VM_PAGE_BITS_ALL; 1581 KASSERT(m->dirty == 0, 1582 ("swp_pager_async_iodone: page %p is dirty", m)); 1583 1584 /* 1585 * We have to wake specifically requested pages 1586 * up too because we cleared VPO_SWAPINPROG and 1587 * could be waiting for it in getpages. However, 1588 * be sure to not unbusy getpages specifically 1589 * requested page - getpages expects it to be 1590 * left busy. 1591 */ 1592 if (i != bp->b_pager.pg_reqpage) { 1593 vm_page_lock(m); 1594 vm_page_deactivate(m); 1595 vm_page_unlock(m); 1596 vm_page_wakeup(m); 1597 } else 1598 vm_page_flash(m); 1599 } else { 1600 /* 1601 * For write success, clear the dirty 1602 * status, then finish the I/O ( which decrements the 1603 * busy count and possibly wakes waiter's up ). 1604 */ 1605 KASSERT((m->flags & PG_WRITEABLE) == 0, 1606 ("swp_pager_async_iodone: page %p is not write" 1607 " protected", m)); 1608 vm_page_undirty(m); 1609 vm_page_io_finish(m); 1610 if (vm_page_count_severe()) { 1611 vm_page_lock(m); 1612 vm_page_try_to_cache(m); 1613 vm_page_unlock(m); 1614 } 1615 } 1616 } 1617 1618 /* 1619 * adjust pip. NOTE: the original parent may still have its own 1620 * pip refs on the object. 1621 */ 1622 if (object != NULL) { 1623 vm_object_pip_wakeupn(object, bp->b_npages); 1624 VM_OBJECT_UNLOCK(object); 1625 } 1626 1627 /* 1628 * swapdev_strategy() manually sets b_vp and b_bufobj before calling 1629 * bstrategy(). Set them back to NULL now we're done with it, or we'll 1630 * trigger a KASSERT in relpbuf(). 1631 */ 1632 if (bp->b_vp) { 1633 bp->b_vp = NULL; 1634 bp->b_bufobj = NULL; 1635 } 1636 /* 1637 * release the physical I/O buffer 1638 */ 1639 relpbuf( 1640 bp, 1641 ((bp->b_iocmd == BIO_READ) ? &nsw_rcount : 1642 ((bp->b_flags & B_ASYNC) ? 1643 &nsw_wcount_async : 1644 &nsw_wcount_sync 1645 ) 1646 ) 1647 ); 1648 } 1649 1650 /* 1651 * swap_pager_isswapped: 1652 * 1653 * Return 1 if at least one page in the given object is paged 1654 * out to the given swap device. 1655 * 1656 * This routine may not block. 1657 */ 1658 int 1659 swap_pager_isswapped(vm_object_t object, struct swdevt *sp) 1660 { 1661 daddr_t index = 0; 1662 int bcount; 1663 int i; 1664 1665 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1666 if (object->type != OBJT_SWAP) 1667 return (0); 1668 1669 mtx_lock(&swhash_mtx); 1670 for (bcount = 0; bcount < object->un_pager.swp.swp_bcount; bcount++) { 1671 struct swblock *swap; 1672 1673 if ((swap = *swp_pager_hash(object, index)) != NULL) { 1674 for (i = 0; i < SWAP_META_PAGES; ++i) { 1675 if (swp_pager_isondev(swap->swb_pages[i], sp)) { 1676 mtx_unlock(&swhash_mtx); 1677 return (1); 1678 } 1679 } 1680 } 1681 index += SWAP_META_PAGES; 1682 if (index > 0x20000000) 1683 panic("swap_pager_isswapped: failed to locate all swap meta blocks"); 1684 } 1685 mtx_unlock(&swhash_mtx); 1686 return (0); 1687 } 1688 1689 /* 1690 * SWP_PAGER_FORCE_PAGEIN() - force a swap block to be paged in 1691 * 1692 * This routine dissociates the page at the given index within a 1693 * swap block from its backing store, paging it in if necessary. 1694 * If the page is paged in, it is placed in the inactive queue, 1695 * since it had its backing store ripped out from under it. 1696 * We also attempt to swap in all other pages in the swap block, 1697 * we only guarantee that the one at the specified index is 1698 * paged in. 1699 * 1700 * XXX - The code to page the whole block in doesn't work, so we 1701 * revert to the one-by-one behavior for now. Sigh. 1702 */ 1703 static inline void 1704 swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex) 1705 { 1706 vm_page_t m; 1707 1708 vm_object_pip_add(object, 1); 1709 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL|VM_ALLOC_RETRY); 1710 if (m->valid == VM_PAGE_BITS_ALL) { 1711 vm_object_pip_subtract(object, 1); 1712 vm_page_dirty(m); 1713 vm_page_lock(m); 1714 vm_page_activate(m); 1715 vm_page_unlock(m); 1716 vm_page_wakeup(m); 1717 vm_pager_page_unswapped(m); 1718 return; 1719 } 1720 1721 if (swap_pager_getpages(object, &m, 1, 0) != VM_PAGER_OK) 1722 panic("swap_pager_force_pagein: read from swap failed");/*XXX*/ 1723 vm_object_pip_subtract(object, 1); 1724 vm_page_dirty(m); 1725 vm_page_lock(m); 1726 vm_page_deactivate(m); 1727 vm_page_unlock(m); 1728 vm_page_wakeup(m); 1729 vm_pager_page_unswapped(m); 1730 } 1731 1732 /* 1733 * swap_pager_swapoff: 1734 * 1735 * Page in all of the pages that have been paged out to the 1736 * given device. The corresponding blocks in the bitmap must be 1737 * marked as allocated and the device must be flagged SW_CLOSING. 1738 * There may be no processes swapped out to the device. 1739 * 1740 * This routine may block. 1741 */ 1742 static void 1743 swap_pager_swapoff(struct swdevt *sp) 1744 { 1745 struct swblock *swap; 1746 int i, j, retries; 1747 1748 GIANT_REQUIRED; 1749 1750 retries = 0; 1751 full_rescan: 1752 mtx_lock(&swhash_mtx); 1753 for (i = 0; i <= swhash_mask; i++) { /* '<=' is correct here */ 1754 restart: 1755 for (swap = swhash[i]; swap != NULL; swap = swap->swb_hnext) { 1756 vm_object_t object = swap->swb_object; 1757 vm_pindex_t pindex = swap->swb_index; 1758 for (j = 0; j < SWAP_META_PAGES; ++j) { 1759 if (swp_pager_isondev(swap->swb_pages[j], sp)) { 1760 /* avoid deadlock */ 1761 if (!VM_OBJECT_TRYLOCK(object)) { 1762 break; 1763 } else { 1764 mtx_unlock(&swhash_mtx); 1765 swp_pager_force_pagein(object, 1766 pindex + j); 1767 VM_OBJECT_UNLOCK(object); 1768 mtx_lock(&swhash_mtx); 1769 goto restart; 1770 } 1771 } 1772 } 1773 } 1774 } 1775 mtx_unlock(&swhash_mtx); 1776 if (sp->sw_used) { 1777 /* 1778 * Objects may be locked or paging to the device being 1779 * removed, so we will miss their pages and need to 1780 * make another pass. We have marked this device as 1781 * SW_CLOSING, so the activity should finish soon. 1782 */ 1783 retries++; 1784 if (retries > 100) { 1785 panic("swapoff: failed to locate %d swap blocks", 1786 sp->sw_used); 1787 } 1788 pause("swpoff", hz / 20); 1789 goto full_rescan; 1790 } 1791 } 1792 1793 /************************************************************************ 1794 * SWAP META DATA * 1795 ************************************************************************ 1796 * 1797 * These routines manipulate the swap metadata stored in the 1798 * OBJT_SWAP object. All swp_*() routines must be called at 1799 * splvm() because swap can be freed up by the low level vm_page 1800 * code which might be called from interrupts beyond what splbio() covers. 1801 * 1802 * Swap metadata is implemented with a global hash and not directly 1803 * linked into the object. Instead the object simply contains 1804 * appropriate tracking counters. 1805 */ 1806 1807 /* 1808 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 1809 * 1810 * We first convert the object to a swap object if it is a default 1811 * object. 1812 * 1813 * The specified swapblk is added to the object's swap metadata. If 1814 * the swapblk is not valid, it is freed instead. Any previously 1815 * assigned swapblk is freed. 1816 * 1817 * This routine must be called at splvm(), except when used to convert 1818 * an OBJT_DEFAULT object into an OBJT_SWAP object. 1819 */ 1820 static void 1821 swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk) 1822 { 1823 struct swblock *swap; 1824 struct swblock **pswap; 1825 int idx; 1826 1827 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1828 /* 1829 * Convert default object to swap object if necessary 1830 */ 1831 if (object->type != OBJT_SWAP) { 1832 object->type = OBJT_SWAP; 1833 object->un_pager.swp.swp_bcount = 0; 1834 1835 if (object->handle != NULL) { 1836 mtx_lock(&sw_alloc_mtx); 1837 TAILQ_INSERT_TAIL( 1838 NOBJLIST(object->handle), 1839 object, 1840 pager_object_list 1841 ); 1842 mtx_unlock(&sw_alloc_mtx); 1843 } 1844 } 1845 1846 /* 1847 * Locate hash entry. If not found create, but if we aren't adding 1848 * anything just return. If we run out of space in the map we wait 1849 * and, since the hash table may have changed, retry. 1850 */ 1851 retry: 1852 mtx_lock(&swhash_mtx); 1853 pswap = swp_pager_hash(object, pindex); 1854 1855 if ((swap = *pswap) == NULL) { 1856 int i; 1857 1858 if (swapblk == SWAPBLK_NONE) 1859 goto done; 1860 1861 swap = *pswap = uma_zalloc(swap_zone, M_NOWAIT); 1862 if (swap == NULL) { 1863 mtx_unlock(&swhash_mtx); 1864 VM_OBJECT_UNLOCK(object); 1865 if (uma_zone_exhausted(swap_zone)) { 1866 printf("swap zone exhausted, increase kern.maxswzone\n"); 1867 vm_pageout_oom(VM_OOM_SWAPZ); 1868 pause("swzonex", 10); 1869 } else 1870 VM_WAIT; 1871 VM_OBJECT_LOCK(object); 1872 goto retry; 1873 } 1874 1875 swap->swb_hnext = NULL; 1876 swap->swb_object = object; 1877 swap->swb_index = pindex & ~(vm_pindex_t)SWAP_META_MASK; 1878 swap->swb_count = 0; 1879 1880 ++object->un_pager.swp.swp_bcount; 1881 1882 for (i = 0; i < SWAP_META_PAGES; ++i) 1883 swap->swb_pages[i] = SWAPBLK_NONE; 1884 } 1885 1886 /* 1887 * Delete prior contents of metadata 1888 */ 1889 idx = pindex & SWAP_META_MASK; 1890 1891 if (swap->swb_pages[idx] != SWAPBLK_NONE) { 1892 swp_pager_freeswapspace(swap->swb_pages[idx], 1); 1893 --swap->swb_count; 1894 } 1895 1896 /* 1897 * Enter block into metadata 1898 */ 1899 swap->swb_pages[idx] = swapblk; 1900 if (swapblk != SWAPBLK_NONE) 1901 ++swap->swb_count; 1902 done: 1903 mtx_unlock(&swhash_mtx); 1904 } 1905 1906 /* 1907 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 1908 * 1909 * The requested range of blocks is freed, with any associated swap 1910 * returned to the swap bitmap. 1911 * 1912 * This routine will free swap metadata structures as they are cleaned 1913 * out. This routine does *NOT* operate on swap metadata associated 1914 * with resident pages. 1915 * 1916 * This routine must be called at splvm() 1917 */ 1918 static void 1919 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count) 1920 { 1921 1922 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1923 if (object->type != OBJT_SWAP) 1924 return; 1925 1926 while (count > 0) { 1927 struct swblock **pswap; 1928 struct swblock *swap; 1929 1930 mtx_lock(&swhash_mtx); 1931 pswap = swp_pager_hash(object, index); 1932 1933 if ((swap = *pswap) != NULL) { 1934 daddr_t v = swap->swb_pages[index & SWAP_META_MASK]; 1935 1936 if (v != SWAPBLK_NONE) { 1937 swp_pager_freeswapspace(v, 1); 1938 swap->swb_pages[index & SWAP_META_MASK] = 1939 SWAPBLK_NONE; 1940 if (--swap->swb_count == 0) { 1941 *pswap = swap->swb_hnext; 1942 uma_zfree(swap_zone, swap); 1943 --object->un_pager.swp.swp_bcount; 1944 } 1945 } 1946 --count; 1947 ++index; 1948 } else { 1949 int n = SWAP_META_PAGES - (index & SWAP_META_MASK); 1950 count -= n; 1951 index += n; 1952 } 1953 mtx_unlock(&swhash_mtx); 1954 } 1955 } 1956 1957 /* 1958 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 1959 * 1960 * This routine locates and destroys all swap metadata associated with 1961 * an object. 1962 * 1963 * This routine must be called at splvm() 1964 */ 1965 static void 1966 swp_pager_meta_free_all(vm_object_t object) 1967 { 1968 daddr_t index = 0; 1969 1970 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1971 if (object->type != OBJT_SWAP) 1972 return; 1973 1974 while (object->un_pager.swp.swp_bcount) { 1975 struct swblock **pswap; 1976 struct swblock *swap; 1977 1978 mtx_lock(&swhash_mtx); 1979 pswap = swp_pager_hash(object, index); 1980 if ((swap = *pswap) != NULL) { 1981 int i; 1982 1983 for (i = 0; i < SWAP_META_PAGES; ++i) { 1984 daddr_t v = swap->swb_pages[i]; 1985 if (v != SWAPBLK_NONE) { 1986 --swap->swb_count; 1987 swp_pager_freeswapspace(v, 1); 1988 } 1989 } 1990 if (swap->swb_count != 0) 1991 panic("swap_pager_meta_free_all: swb_count != 0"); 1992 *pswap = swap->swb_hnext; 1993 uma_zfree(swap_zone, swap); 1994 --object->un_pager.swp.swp_bcount; 1995 } 1996 mtx_unlock(&swhash_mtx); 1997 index += SWAP_META_PAGES; 1998 if (index > 0x20000000) 1999 panic("swp_pager_meta_free_all: failed to locate all swap meta blocks"); 2000 } 2001 } 2002 2003 /* 2004 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 2005 * 2006 * This routine is capable of looking up, popping, or freeing 2007 * swapblk assignments in the swap meta data or in the vm_page_t. 2008 * The routine typically returns the swapblk being looked-up, or popped, 2009 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 2010 * was invalid. This routine will automatically free any invalid 2011 * meta-data swapblks. 2012 * 2013 * It is not possible to store invalid swapblks in the swap meta data 2014 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 2015 * 2016 * When acting on a busy resident page and paging is in progress, we 2017 * have to wait until paging is complete but otherwise can act on the 2018 * busy page. 2019 * 2020 * This routine must be called at splvm(). 2021 * 2022 * SWM_FREE remove and free swap block from metadata 2023 * SWM_POP remove from meta data but do not free.. pop it out 2024 */ 2025 static daddr_t 2026 swp_pager_meta_ctl(vm_object_t object, vm_pindex_t pindex, int flags) 2027 { 2028 struct swblock **pswap; 2029 struct swblock *swap; 2030 daddr_t r1; 2031 int idx; 2032 2033 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2034 /* 2035 * The meta data only exists of the object is OBJT_SWAP 2036 * and even then might not be allocated yet. 2037 */ 2038 if (object->type != OBJT_SWAP) 2039 return (SWAPBLK_NONE); 2040 2041 r1 = SWAPBLK_NONE; 2042 mtx_lock(&swhash_mtx); 2043 pswap = swp_pager_hash(object, pindex); 2044 2045 if ((swap = *pswap) != NULL) { 2046 idx = pindex & SWAP_META_MASK; 2047 r1 = swap->swb_pages[idx]; 2048 2049 if (r1 != SWAPBLK_NONE) { 2050 if (flags & SWM_FREE) { 2051 swp_pager_freeswapspace(r1, 1); 2052 r1 = SWAPBLK_NONE; 2053 } 2054 if (flags & (SWM_FREE|SWM_POP)) { 2055 swap->swb_pages[idx] = SWAPBLK_NONE; 2056 if (--swap->swb_count == 0) { 2057 *pswap = swap->swb_hnext; 2058 uma_zfree(swap_zone, swap); 2059 --object->un_pager.swp.swp_bcount; 2060 } 2061 } 2062 } 2063 } 2064 mtx_unlock(&swhash_mtx); 2065 return (r1); 2066 } 2067 2068 /* 2069 * System call swapon(name) enables swapping on device name, 2070 * which must be in the swdevsw. Return EBUSY 2071 * if already swapping on this device. 2072 */ 2073 #ifndef _SYS_SYSPROTO_H_ 2074 struct swapon_args { 2075 char *name; 2076 }; 2077 #endif 2078 2079 /* 2080 * MPSAFE 2081 */ 2082 /* ARGSUSED */ 2083 int 2084 swapon(struct thread *td, struct swapon_args *uap) 2085 { 2086 struct vattr attr; 2087 struct vnode *vp; 2088 struct nameidata nd; 2089 int error; 2090 2091 error = priv_check(td, PRIV_SWAPON); 2092 if (error) 2093 return (error); 2094 2095 mtx_lock(&Giant); 2096 while (swdev_syscall_active) 2097 tsleep(&swdev_syscall_active, PUSER - 1, "swpon", 0); 2098 swdev_syscall_active = 1; 2099 2100 /* 2101 * Swap metadata may not fit in the KVM if we have physical 2102 * memory of >1GB. 2103 */ 2104 if (swap_zone == NULL) { 2105 error = ENOMEM; 2106 goto done; 2107 } 2108 2109 NDINIT(&nd, LOOKUP, ISOPEN | FOLLOW | AUDITVNODE1, UIO_USERSPACE, 2110 uap->name, td); 2111 error = namei(&nd); 2112 if (error) 2113 goto done; 2114 2115 NDFREE(&nd, NDF_ONLY_PNBUF); 2116 vp = nd.ni_vp; 2117 2118 if (vn_isdisk(vp, &error)) { 2119 error = swapongeom(td, vp); 2120 } else if (vp->v_type == VREG && 2121 (vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 && 2122 (error = VOP_GETATTR(vp, &attr, td->td_ucred)) == 0) { 2123 /* 2124 * Allow direct swapping to NFS regular files in the same 2125 * way that nfs_mountroot() sets up diskless swapping. 2126 */ 2127 error = swaponvp(td, vp, attr.va_size / DEV_BSIZE); 2128 } 2129 2130 if (error) 2131 vrele(vp); 2132 done: 2133 swdev_syscall_active = 0; 2134 wakeup_one(&swdev_syscall_active); 2135 mtx_unlock(&Giant); 2136 return (error); 2137 } 2138 2139 static void 2140 swaponsomething(struct vnode *vp, void *id, u_long nblks, sw_strategy_t *strategy, sw_close_t *close, dev_t dev) 2141 { 2142 struct swdevt *sp, *tsp; 2143 swblk_t dvbase; 2144 u_long mblocks; 2145 2146 /* 2147 * If we go beyond this, we get overflows in the radix 2148 * tree bitmap code. 2149 */ 2150 mblocks = 0x40000000 / BLIST_META_RADIX; 2151 if (nblks > mblocks) { 2152 printf("WARNING: reducing size to maximum of %lu blocks per swap unit\n", 2153 mblocks); 2154 nblks = mblocks; 2155 } 2156 /* 2157 * nblks is in DEV_BSIZE'd chunks, convert to PAGE_SIZE'd chunks. 2158 * First chop nblks off to page-align it, then convert. 2159 * 2160 * sw->sw_nblks is in page-sized chunks now too. 2161 */ 2162 nblks &= ~(ctodb(1) - 1); 2163 nblks = dbtoc(nblks); 2164 2165 sp = malloc(sizeof *sp, M_VMPGDATA, M_WAITOK | M_ZERO); 2166 sp->sw_vp = vp; 2167 sp->sw_id = id; 2168 sp->sw_dev = dev; 2169 sp->sw_flags = 0; 2170 sp->sw_nblks = nblks; 2171 sp->sw_used = 0; 2172 sp->sw_strategy = strategy; 2173 sp->sw_close = close; 2174 2175 sp->sw_blist = blist_create(nblks, M_WAITOK); 2176 /* 2177 * Do not free the first two block in order to avoid overwriting 2178 * any bsd label at the front of the partition 2179 */ 2180 blist_free(sp->sw_blist, 2, nblks - 2); 2181 2182 dvbase = 0; 2183 mtx_lock(&sw_dev_mtx); 2184 TAILQ_FOREACH(tsp, &swtailq, sw_list) { 2185 if (tsp->sw_end >= dvbase) { 2186 /* 2187 * We put one uncovered page between the devices 2188 * in order to definitively prevent any cross-device 2189 * I/O requests 2190 */ 2191 dvbase = tsp->sw_end + 1; 2192 } 2193 } 2194 sp->sw_first = dvbase; 2195 sp->sw_end = dvbase + nblks; 2196 TAILQ_INSERT_TAIL(&swtailq, sp, sw_list); 2197 nswapdev++; 2198 swap_pager_avail += nblks; 2199 swap_total += (vm_ooffset_t)nblks * PAGE_SIZE; 2200 swp_sizecheck(); 2201 mtx_unlock(&sw_dev_mtx); 2202 } 2203 2204 /* 2205 * SYSCALL: swapoff(devname) 2206 * 2207 * Disable swapping on the given device. 2208 * 2209 * XXX: Badly designed system call: it should use a device index 2210 * rather than filename as specification. We keep sw_vp around 2211 * only to make this work. 2212 */ 2213 #ifndef _SYS_SYSPROTO_H_ 2214 struct swapoff_args { 2215 char *name; 2216 }; 2217 #endif 2218 2219 /* 2220 * MPSAFE 2221 */ 2222 /* ARGSUSED */ 2223 int 2224 swapoff(struct thread *td, struct swapoff_args *uap) 2225 { 2226 struct vnode *vp; 2227 struct nameidata nd; 2228 struct swdevt *sp; 2229 int error; 2230 2231 error = priv_check(td, PRIV_SWAPOFF); 2232 if (error) 2233 return (error); 2234 2235 mtx_lock(&Giant); 2236 while (swdev_syscall_active) 2237 tsleep(&swdev_syscall_active, PUSER - 1, "swpoff", 0); 2238 swdev_syscall_active = 1; 2239 2240 NDINIT(&nd, LOOKUP, FOLLOW | AUDITVNODE1, UIO_USERSPACE, uap->name, 2241 td); 2242 error = namei(&nd); 2243 if (error) 2244 goto done; 2245 NDFREE(&nd, NDF_ONLY_PNBUF); 2246 vp = nd.ni_vp; 2247 2248 mtx_lock(&sw_dev_mtx); 2249 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2250 if (sp->sw_vp == vp) 2251 break; 2252 } 2253 mtx_unlock(&sw_dev_mtx); 2254 if (sp == NULL) { 2255 error = EINVAL; 2256 goto done; 2257 } 2258 error = swapoff_one(sp, td->td_ucred); 2259 done: 2260 swdev_syscall_active = 0; 2261 wakeup_one(&swdev_syscall_active); 2262 mtx_unlock(&Giant); 2263 return (error); 2264 } 2265 2266 static int 2267 swapoff_one(struct swdevt *sp, struct ucred *cred) 2268 { 2269 u_long nblks, dvbase; 2270 #ifdef MAC 2271 int error; 2272 #endif 2273 2274 mtx_assert(&Giant, MA_OWNED); 2275 #ifdef MAC 2276 (void) vn_lock(sp->sw_vp, LK_EXCLUSIVE | LK_RETRY); 2277 error = mac_system_check_swapoff(cred, sp->sw_vp); 2278 (void) VOP_UNLOCK(sp->sw_vp, 0); 2279 if (error != 0) 2280 return (error); 2281 #endif 2282 nblks = sp->sw_nblks; 2283 2284 /* 2285 * We can turn off this swap device safely only if the 2286 * available virtual memory in the system will fit the amount 2287 * of data we will have to page back in, plus an epsilon so 2288 * the system doesn't become critically low on swap space. 2289 */ 2290 if (cnt.v_free_count + cnt.v_cache_count + swap_pager_avail < 2291 nblks + nswap_lowat) { 2292 return (ENOMEM); 2293 } 2294 2295 /* 2296 * Prevent further allocations on this device. 2297 */ 2298 mtx_lock(&sw_dev_mtx); 2299 sp->sw_flags |= SW_CLOSING; 2300 for (dvbase = 0; dvbase < sp->sw_end; dvbase += dmmax) { 2301 swap_pager_avail -= blist_fill(sp->sw_blist, 2302 dvbase, dmmax); 2303 } 2304 swap_total -= (vm_ooffset_t)nblks * PAGE_SIZE; 2305 mtx_unlock(&sw_dev_mtx); 2306 2307 /* 2308 * Page in the contents of the device and close it. 2309 */ 2310 swap_pager_swapoff(sp); 2311 2312 sp->sw_close(curthread, sp); 2313 sp->sw_id = NULL; 2314 mtx_lock(&sw_dev_mtx); 2315 TAILQ_REMOVE(&swtailq, sp, sw_list); 2316 nswapdev--; 2317 if (nswapdev == 0) { 2318 swap_pager_full = 2; 2319 swap_pager_almost_full = 1; 2320 } 2321 if (swdevhd == sp) 2322 swdevhd = NULL; 2323 mtx_unlock(&sw_dev_mtx); 2324 blist_destroy(sp->sw_blist); 2325 free(sp, M_VMPGDATA); 2326 return (0); 2327 } 2328 2329 void 2330 swapoff_all(void) 2331 { 2332 struct swdevt *sp, *spt; 2333 const char *devname; 2334 int error; 2335 2336 mtx_lock(&Giant); 2337 while (swdev_syscall_active) 2338 tsleep(&swdev_syscall_active, PUSER - 1, "swpoff", 0); 2339 swdev_syscall_active = 1; 2340 2341 mtx_lock(&sw_dev_mtx); 2342 TAILQ_FOREACH_SAFE(sp, &swtailq, sw_list, spt) { 2343 mtx_unlock(&sw_dev_mtx); 2344 if (vn_isdisk(sp->sw_vp, NULL)) 2345 devname = sp->sw_vp->v_rdev->si_name; 2346 else 2347 devname = "[file]"; 2348 error = swapoff_one(sp, thread0.td_ucred); 2349 if (error != 0) { 2350 printf("Cannot remove swap device %s (error=%d), " 2351 "skipping.\n", devname, error); 2352 } else if (bootverbose) { 2353 printf("Swap device %s removed.\n", devname); 2354 } 2355 mtx_lock(&sw_dev_mtx); 2356 } 2357 mtx_unlock(&sw_dev_mtx); 2358 2359 swdev_syscall_active = 0; 2360 wakeup_one(&swdev_syscall_active); 2361 mtx_unlock(&Giant); 2362 } 2363 2364 void 2365 swap_pager_status(int *total, int *used) 2366 { 2367 struct swdevt *sp; 2368 2369 *total = 0; 2370 *used = 0; 2371 mtx_lock(&sw_dev_mtx); 2372 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2373 *total += sp->sw_nblks; 2374 *used += sp->sw_used; 2375 } 2376 mtx_unlock(&sw_dev_mtx); 2377 } 2378 2379 static int 2380 sysctl_vm_swap_info(SYSCTL_HANDLER_ARGS) 2381 { 2382 int *name = (int *)arg1; 2383 int error, n; 2384 struct xswdev xs; 2385 struct swdevt *sp; 2386 2387 if (arg2 != 1) /* name length */ 2388 return (EINVAL); 2389 2390 n = 0; 2391 mtx_lock(&sw_dev_mtx); 2392 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2393 if (n == *name) { 2394 mtx_unlock(&sw_dev_mtx); 2395 xs.xsw_version = XSWDEV_VERSION; 2396 xs.xsw_dev = sp->sw_dev; 2397 xs.xsw_flags = sp->sw_flags; 2398 xs.xsw_nblks = sp->sw_nblks; 2399 xs.xsw_used = sp->sw_used; 2400 2401 error = SYSCTL_OUT(req, &xs, sizeof(xs)); 2402 return (error); 2403 } 2404 n++; 2405 } 2406 mtx_unlock(&sw_dev_mtx); 2407 return (ENOENT); 2408 } 2409 2410 SYSCTL_INT(_vm, OID_AUTO, nswapdev, CTLFLAG_RD, &nswapdev, 0, 2411 "Number of swap devices"); 2412 SYSCTL_NODE(_vm, OID_AUTO, swap_info, CTLFLAG_RD, sysctl_vm_swap_info, 2413 "Swap statistics by device"); 2414 2415 /* 2416 * vmspace_swap_count() - count the approximate swap usage in pages for a 2417 * vmspace. 2418 * 2419 * The map must be locked. 2420 * 2421 * Swap usage is determined by taking the proportional swap used by 2422 * VM objects backing the VM map. To make up for fractional losses, 2423 * if the VM object has any swap use at all the associated map entries 2424 * count for at least 1 swap page. 2425 */ 2426 int 2427 vmspace_swap_count(struct vmspace *vmspace) 2428 { 2429 vm_map_t map = &vmspace->vm_map; 2430 vm_map_entry_t cur; 2431 int count = 0; 2432 2433 for (cur = map->header.next; cur != &map->header; cur = cur->next) { 2434 vm_object_t object; 2435 2436 if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && 2437 (object = cur->object.vm_object) != NULL) { 2438 VM_OBJECT_LOCK(object); 2439 if (object->type == OBJT_SWAP && 2440 object->un_pager.swp.swp_bcount != 0) { 2441 int n = (cur->end - cur->start) / PAGE_SIZE; 2442 2443 count += object->un_pager.swp.swp_bcount * 2444 SWAP_META_PAGES * n / object->size + 1; 2445 } 2446 VM_OBJECT_UNLOCK(object); 2447 } 2448 } 2449 return (count); 2450 } 2451 2452 /* 2453 * GEOM backend 2454 * 2455 * Swapping onto disk devices. 2456 * 2457 */ 2458 2459 static g_orphan_t swapgeom_orphan; 2460 2461 static struct g_class g_swap_class = { 2462 .name = "SWAP", 2463 .version = G_VERSION, 2464 .orphan = swapgeom_orphan, 2465 }; 2466 2467 DECLARE_GEOM_CLASS(g_swap_class, g_class); 2468 2469 2470 static void 2471 swapgeom_done(struct bio *bp2) 2472 { 2473 struct buf *bp; 2474 2475 bp = bp2->bio_caller2; 2476 bp->b_ioflags = bp2->bio_flags; 2477 if (bp2->bio_error) 2478 bp->b_ioflags |= BIO_ERROR; 2479 bp->b_resid = bp->b_bcount - bp2->bio_completed; 2480 bp->b_error = bp2->bio_error; 2481 bufdone(bp); 2482 g_destroy_bio(bp2); 2483 } 2484 2485 static void 2486 swapgeom_strategy(struct buf *bp, struct swdevt *sp) 2487 { 2488 struct bio *bio; 2489 struct g_consumer *cp; 2490 2491 cp = sp->sw_id; 2492 if (cp == NULL) { 2493 bp->b_error = ENXIO; 2494 bp->b_ioflags |= BIO_ERROR; 2495 bufdone(bp); 2496 return; 2497 } 2498 if (bp->b_iocmd == BIO_WRITE) 2499 bio = g_new_bio(); 2500 else 2501 bio = g_alloc_bio(); 2502 if (bio == NULL) { 2503 bp->b_error = ENOMEM; 2504 bp->b_ioflags |= BIO_ERROR; 2505 bufdone(bp); 2506 return; 2507 } 2508 2509 bio->bio_caller2 = bp; 2510 bio->bio_cmd = bp->b_iocmd; 2511 bio->bio_data = bp->b_data; 2512 bio->bio_offset = (bp->b_blkno - sp->sw_first) * PAGE_SIZE; 2513 bio->bio_length = bp->b_bcount; 2514 bio->bio_done = swapgeom_done; 2515 g_io_request(bio, cp); 2516 return; 2517 } 2518 2519 static void 2520 swapgeom_orphan(struct g_consumer *cp) 2521 { 2522 struct swdevt *sp; 2523 2524 mtx_lock(&sw_dev_mtx); 2525 TAILQ_FOREACH(sp, &swtailq, sw_list) 2526 if (sp->sw_id == cp) 2527 sp->sw_id = NULL; 2528 mtx_unlock(&sw_dev_mtx); 2529 } 2530 2531 static void 2532 swapgeom_close_ev(void *arg, int flags) 2533 { 2534 struct g_consumer *cp; 2535 2536 cp = arg; 2537 g_access(cp, -1, -1, 0); 2538 g_detach(cp); 2539 g_destroy_consumer(cp); 2540 } 2541 2542 static void 2543 swapgeom_close(struct thread *td, struct swdevt *sw) 2544 { 2545 2546 /* XXX: direct call when Giant untangled */ 2547 g_waitfor_event(swapgeom_close_ev, sw->sw_id, M_WAITOK, NULL); 2548 } 2549 2550 2551 struct swh0h0 { 2552 struct cdev *dev; 2553 struct vnode *vp; 2554 int error; 2555 }; 2556 2557 static void 2558 swapongeom_ev(void *arg, int flags) 2559 { 2560 struct swh0h0 *swh; 2561 struct g_provider *pp; 2562 struct g_consumer *cp; 2563 static struct g_geom *gp; 2564 struct swdevt *sp; 2565 u_long nblks; 2566 int error; 2567 2568 swh = arg; 2569 swh->error = 0; 2570 pp = g_dev_getprovider(swh->dev); 2571 if (pp == NULL) { 2572 swh->error = ENODEV; 2573 return; 2574 } 2575 mtx_lock(&sw_dev_mtx); 2576 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2577 cp = sp->sw_id; 2578 if (cp != NULL && cp->provider == pp) { 2579 mtx_unlock(&sw_dev_mtx); 2580 swh->error = EBUSY; 2581 return; 2582 } 2583 } 2584 mtx_unlock(&sw_dev_mtx); 2585 if (gp == NULL) 2586 gp = g_new_geomf(&g_swap_class, "swap", NULL); 2587 cp = g_new_consumer(gp); 2588 g_attach(cp, pp); 2589 /* 2590 * XXX: Everytime you think you can improve the margin for 2591 * footshooting, somebody depends on the ability to do so: 2592 * savecore(8) wants to write to our swapdev so we cannot 2593 * set an exclusive count :-( 2594 */ 2595 error = g_access(cp, 1, 1, 0); 2596 if (error) { 2597 g_detach(cp); 2598 g_destroy_consumer(cp); 2599 swh->error = error; 2600 return; 2601 } 2602 nblks = pp->mediasize / DEV_BSIZE; 2603 swaponsomething(swh->vp, cp, nblks, swapgeom_strategy, 2604 swapgeom_close, dev2udev(swh->dev)); 2605 swh->error = 0; 2606 return; 2607 } 2608 2609 static int 2610 swapongeom(struct thread *td, struct vnode *vp) 2611 { 2612 int error; 2613 struct swh0h0 swh; 2614 2615 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2616 2617 swh.dev = vp->v_rdev; 2618 swh.vp = vp; 2619 swh.error = 0; 2620 /* XXX: direct call when Giant untangled */ 2621 error = g_waitfor_event(swapongeom_ev, &swh, M_WAITOK, NULL); 2622 if (!error) 2623 error = swh.error; 2624 VOP_UNLOCK(vp, 0); 2625 return (error); 2626 } 2627 2628 /* 2629 * VNODE backend 2630 * 2631 * This is used mainly for network filesystem (read: probably only tested 2632 * with NFS) swapfiles. 2633 * 2634 */ 2635 2636 static void 2637 swapdev_strategy(struct buf *bp, struct swdevt *sp) 2638 { 2639 struct vnode *vp2; 2640 2641 bp->b_blkno = ctodb(bp->b_blkno - sp->sw_first); 2642 2643 vp2 = sp->sw_id; 2644 vhold(vp2); 2645 if (bp->b_iocmd == BIO_WRITE) { 2646 if (bp->b_bufobj) 2647 bufobj_wdrop(bp->b_bufobj); 2648 bufobj_wref(&vp2->v_bufobj); 2649 } 2650 if (bp->b_bufobj != &vp2->v_bufobj) 2651 bp->b_bufobj = &vp2->v_bufobj; 2652 bp->b_vp = vp2; 2653 bp->b_iooffset = dbtob(bp->b_blkno); 2654 bstrategy(bp); 2655 return; 2656 } 2657 2658 static void 2659 swapdev_close(struct thread *td, struct swdevt *sp) 2660 { 2661 2662 VOP_CLOSE(sp->sw_vp, FREAD | FWRITE, td->td_ucred, td); 2663 vrele(sp->sw_vp); 2664 } 2665 2666 2667 static int 2668 swaponvp(struct thread *td, struct vnode *vp, u_long nblks) 2669 { 2670 struct swdevt *sp; 2671 int error; 2672 2673 if (nblks == 0) 2674 return (ENXIO); 2675 mtx_lock(&sw_dev_mtx); 2676 TAILQ_FOREACH(sp, &swtailq, sw_list) { 2677 if (sp->sw_id == vp) { 2678 mtx_unlock(&sw_dev_mtx); 2679 return (EBUSY); 2680 } 2681 } 2682 mtx_unlock(&sw_dev_mtx); 2683 2684 (void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2685 #ifdef MAC 2686 error = mac_system_check_swapon(td->td_ucred, vp); 2687 if (error == 0) 2688 #endif 2689 error = VOP_OPEN(vp, FREAD | FWRITE, td->td_ucred, td, NULL); 2690 (void) VOP_UNLOCK(vp, 0); 2691 if (error) 2692 return (error); 2693 2694 swaponsomething(vp, vp, nblks, swapdev_strategy, swapdev_close, 2695 NODEV); 2696 return (0); 2697 } 2698