1 /* 2 * Copyright (c) 1998 Matthew Dillon, 3 * Copyright (c) 1994 John S. Dyson 4 * Copyright (c) 1990 University of Utah. 5 * Copyright (c) 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * New Swap System 41 * Matthew Dillon 42 * 43 * Radix Bitmap 'blists'. 44 * 45 * - The new swapper uses the new radix bitmap code. This should scale 46 * to arbitrarily small or arbitrarily large swap spaces and an almost 47 * arbitrary degree of fragmentation. 48 * 49 * Features: 50 * 51 * - on the fly reallocation of swap during putpages. The new system 52 * does not try to keep previously allocated swap blocks for dirty 53 * pages. 54 * 55 * - on the fly deallocation of swap 56 * 57 * - No more garbage collection required. Unnecessarily allocated swap 58 * blocks only exist for dirty vm_page_t's now and these are already 59 * cycled (in a high-load system) by the pager. We also do on-the-fly 60 * removal of invalidated swap blocks when a page is destroyed 61 * or renamed. 62 * 63 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 64 * 65 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 66 * 67 * $FreeBSD$ 68 */ 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/conf.h> 73 #include <sys/kernel.h> 74 #include <sys/proc.h> 75 #include <sys/bio.h> 76 #include <sys/buf.h> 77 #include <sys/vnode.h> 78 #include <sys/malloc.h> 79 #include <sys/vmmeter.h> 80 #include <sys/sysctl.h> 81 #include <sys/blist.h> 82 #include <sys/lock.h> 83 #include <sys/sx.h> 84 #include <sys/vmmeter.h> 85 86 #ifndef MAX_PAGEOUT_CLUSTER 87 #define MAX_PAGEOUT_CLUSTER 16 88 #endif 89 90 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER 91 92 #include "opt_swap.h" 93 #include <vm/vm.h> 94 #include <vm/pmap.h> 95 #include <vm/vm_map.h> 96 #include <vm/vm_kern.h> 97 #include <vm/vm_object.h> 98 #include <vm/vm_page.h> 99 #include <vm/vm_pager.h> 100 #include <vm/vm_pageout.h> 101 #include <vm/vm_zone.h> 102 #include <vm/swap_pager.h> 103 #include <vm/vm_extern.h> 104 105 #define SWM_FREE 0x02 /* free, period */ 106 #define SWM_POP 0x04 /* pop out */ 107 108 /* 109 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks 110 * in the old system. 111 */ 112 113 extern int vm_swap_size; /* number of free swap blocks, in pages */ 114 115 int swap_pager_full; /* swap space exhaustion (task killing) */ 116 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/ 117 static int nsw_rcount; /* free read buffers */ 118 static int nsw_wcount_sync; /* limit write buffers / synchronous */ 119 static int nsw_wcount_async; /* limit write buffers / asynchronous */ 120 static int nsw_wcount_async_max;/* assigned maximum */ 121 static int nsw_cluster_max; /* maximum VOP I/O allowed */ 122 123 struct blist *swapblist; 124 static struct swblock **swhash; 125 static int swhash_mask; 126 static int swap_async_max = 4; /* maximum in-progress async I/O's */ 127 static struct sx sw_alloc_sx; 128 129 /* from vm_swap.c */ 130 extern struct vnode *swapdev_vp; 131 extern struct swdevt *swdevt; 132 extern int nswdev; 133 134 SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 135 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 136 137 #define BLK2DEVIDX(blk) (nswdev > 1 ? blk / dmmax % nswdev : 0) 138 139 /* 140 * "named" and "unnamed" anon region objects. Try to reduce the overhead 141 * of searching a named list by hashing it just a little. 142 */ 143 144 #define NOBJLISTS 8 145 146 #define NOBJLIST(handle) \ 147 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)]) 148 149 static struct mtx sw_alloc_mtx; /* protect list manipulation */ 150 static struct pagerlst swap_pager_object_list[NOBJLISTS]; 151 struct pagerlst swap_pager_un_object_list; 152 vm_zone_t swap_zone; 153 154 /* 155 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 156 * calls hooked from other parts of the VM system and do not appear here. 157 * (see vm/swap_pager.h). 158 */ 159 160 static vm_object_t 161 swap_pager_alloc __P((void *handle, vm_ooffset_t size, 162 vm_prot_t prot, vm_ooffset_t offset)); 163 static void swap_pager_dealloc __P((vm_object_t object)); 164 static int swap_pager_getpages __P((vm_object_t, vm_page_t *, int, int)); 165 static void swap_pager_init __P((void)); 166 static void swap_pager_unswapped __P((vm_page_t)); 167 static void swap_pager_strategy __P((vm_object_t, struct bio *)); 168 169 struct pagerops swappagerops = { 170 swap_pager_init, /* early system initialization of pager */ 171 swap_pager_alloc, /* allocate an OBJT_SWAP object */ 172 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 173 swap_pager_getpages, /* pagein */ 174 swap_pager_putpages, /* pageout */ 175 swap_pager_haspage, /* get backing store status for page */ 176 swap_pager_unswapped, /* remove swap related to page */ 177 swap_pager_strategy /* pager strategy call */ 178 }; 179 180 static struct buf *getchainbuf(struct bio *bp, struct vnode *vp, int flags); 181 static void flushchainbuf(struct buf *nbp); 182 static void waitchainbuf(struct bio *bp, int count, int done); 183 184 /* 185 * dmmax is in page-sized chunks with the new swap system. It was 186 * dev-bsized chunks in the old. dmmax is always a power of 2. 187 * 188 * swap_*() routines are externally accessible. swp_*() routines are 189 * internal. 190 */ 191 192 int dmmax; 193 static int dmmax_mask; 194 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 195 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 196 197 SYSCTL_INT(_vm, OID_AUTO, dmmax, 198 CTLFLAG_RD, &dmmax, 0, "Maximum size of a swap block"); 199 200 static __inline void swp_sizecheck __P((void)); 201 static void swp_pager_sync_iodone __P((struct buf *bp)); 202 static void swp_pager_async_iodone __P((struct buf *bp)); 203 204 /* 205 * Swap bitmap functions 206 */ 207 208 static __inline void swp_pager_freeswapspace __P((daddr_t blk, int npages)); 209 static __inline daddr_t swp_pager_getswapspace __P((int npages)); 210 211 /* 212 * Metadata functions 213 */ 214 215 static void swp_pager_meta_build __P((vm_object_t, vm_pindex_t, daddr_t)); 216 static void swp_pager_meta_free __P((vm_object_t, vm_pindex_t, daddr_t)); 217 static void swp_pager_meta_free_all __P((vm_object_t)); 218 static daddr_t swp_pager_meta_ctl __P((vm_object_t, vm_pindex_t, int)); 219 220 /* 221 * SWP_SIZECHECK() - update swap_pager_full indication 222 * 223 * update the swap_pager_almost_full indication and warn when we are 224 * about to run out of swap space, using lowat/hiwat hysteresis. 225 * 226 * Clear swap_pager_full ( task killing ) indication when lowat is met. 227 * 228 * No restrictions on call 229 * This routine may not block. 230 * This routine must be called at splvm() 231 */ 232 233 static __inline void 234 swp_sizecheck() 235 { 236 GIANT_REQUIRED; 237 238 if (vm_swap_size < nswap_lowat) { 239 if (swap_pager_almost_full == 0) { 240 printf("swap_pager: out of swap space\n"); 241 swap_pager_almost_full = 1; 242 } 243 } else { 244 swap_pager_full = 0; 245 if (vm_swap_size > nswap_hiwat) 246 swap_pager_almost_full = 0; 247 } 248 } 249 250 /* 251 * SWAP_PAGER_INIT() - initialize the swap pager! 252 * 253 * Expected to be started from system init. NOTE: This code is run 254 * before much else so be careful what you depend on. Most of the VM 255 * system has yet to be initialized at this point. 256 */ 257 258 static void 259 swap_pager_init() 260 { 261 /* 262 * Initialize object lists 263 */ 264 int i; 265 266 for (i = 0; i < NOBJLISTS; ++i) 267 TAILQ_INIT(&swap_pager_object_list[i]); 268 TAILQ_INIT(&swap_pager_un_object_list); 269 mtx_init(&sw_alloc_mtx, "swap_pager list", MTX_DEF); 270 271 /* 272 * Device Stripe, in PAGE_SIZE'd blocks 273 */ 274 275 dmmax = SWB_NPAGES * 2; 276 dmmax_mask = ~(dmmax - 1); 277 } 278 279 /* 280 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 281 * 282 * Expected to be started from pageout process once, prior to entering 283 * its main loop. 284 */ 285 286 void 287 swap_pager_swap_init() 288 { 289 int n, n2; 290 291 /* 292 * Number of in-transit swap bp operations. Don't 293 * exhaust the pbufs completely. Make sure we 294 * initialize workable values (0 will work for hysteresis 295 * but it isn't very efficient). 296 * 297 * The nsw_cluster_max is constrained by the bp->b_pages[] 298 * array (MAXPHYS/PAGE_SIZE) and our locally defined 299 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 300 * constrained by the swap device interleave stripe size. 301 * 302 * Currently we hardwire nsw_wcount_async to 4. This limit is 303 * designed to prevent other I/O from having high latencies due to 304 * our pageout I/O. The value 4 works well for one or two active swap 305 * devices but is probably a little low if you have more. Even so, 306 * a higher value would probably generate only a limited improvement 307 * with three or four active swap devices since the system does not 308 * typically have to pageout at extreme bandwidths. We will want 309 * at least 2 per swap devices, and 4 is a pretty good value if you 310 * have one NFS swap device due to the command/ack latency over NFS. 311 * So it all works out pretty well. 312 */ 313 314 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 315 316 mtx_lock(&pbuf_mtx); 317 nsw_rcount = (nswbuf + 1) / 2; 318 nsw_wcount_sync = (nswbuf + 3) / 4; 319 nsw_wcount_async = 4; 320 nsw_wcount_async_max = nsw_wcount_async; 321 mtx_unlock(&pbuf_mtx); 322 323 /* 324 * Initialize our zone. Right now I'm just guessing on the number 325 * we need based on the number of pages in the system. Each swblock 326 * can hold 16 pages, so this is probably overkill. 327 */ 328 329 n = min(cnt.v_page_count, (kernel_map->max_offset - kernel_map->min_offset) / PAGE_SIZE) * 2; 330 n2 = n; 331 332 while (n > 0 333 && (swap_zone = zinit( 334 "SWAPMETA", 335 sizeof(struct swblock), 336 n, 337 ZONE_INTERRUPT, 338 1 339 )) == NULL) 340 n >>= 1; 341 if (swap_zone == NULL) 342 printf("WARNING: failed to init swap_zone!\n"); 343 if (n2 != n) 344 printf("Swap zone entries reduced to %d.\n", n); 345 n2 = n; 346 347 /* 348 * Initialize our meta-data hash table. The swapper does not need to 349 * be quite as efficient as the VM system, so we do not use an 350 * oversized hash table. 351 * 352 * n: size of hash table, must be power of 2 353 * swhash_mask: hash table index mask 354 */ 355 356 for (n = 1; n < n2 ; n <<= 1) 357 ; 358 359 swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK | M_ZERO); 360 361 swhash_mask = n - 1; 362 } 363 364 /* 365 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 366 * its metadata structures. 367 * 368 * This routine is called from the mmap and fork code to create a new 369 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 370 * and then converting it with swp_pager_meta_build(). 371 * 372 * This routine may block in vm_object_allocate() and create a named 373 * object lookup race, so we must interlock. We must also run at 374 * splvm() for the object lookup to handle races with interrupts, but 375 * we do not have to maintain splvm() in between the lookup and the 376 * add because (I believe) it is not possible to attempt to create 377 * a new swap object w/handle when a default object with that handle 378 * already exists. 379 */ 380 381 static vm_object_t 382 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 383 vm_ooffset_t offset) 384 { 385 vm_object_t object; 386 387 GIANT_REQUIRED; 388 389 if (handle) { 390 /* 391 * Reference existing named region or allocate new one. There 392 * should not be a race here against swp_pager_meta_build() 393 * as called from vm_page_remove() in regards to the lookup 394 * of the handle. 395 */ 396 sx_xlock(&sw_alloc_sx); 397 object = vm_pager_object_lookup(NOBJLIST(handle), handle); 398 399 if (object != NULL) { 400 vm_object_reference(object); 401 } else { 402 object = vm_object_allocate(OBJT_DEFAULT, 403 OFF_TO_IDX(offset + PAGE_MASK + size)); 404 object->handle = handle; 405 406 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 407 } 408 sx_xunlock(&sw_alloc_sx); 409 } else { 410 object = vm_object_allocate(OBJT_DEFAULT, 411 OFF_TO_IDX(offset + PAGE_MASK + size)); 412 413 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 414 } 415 416 return (object); 417 } 418 419 /* 420 * SWAP_PAGER_DEALLOC() - remove swap metadata from object 421 * 422 * The swap backing for the object is destroyed. The code is 423 * designed such that we can reinstantiate it later, but this 424 * routine is typically called only when the entire object is 425 * about to be destroyed. 426 * 427 * This routine may block, but no longer does. 428 * 429 * The object must be locked or unreferenceable. 430 */ 431 432 static void 433 swap_pager_dealloc(object) 434 vm_object_t object; 435 { 436 int s; 437 438 GIANT_REQUIRED; 439 440 /* 441 * Remove from list right away so lookups will fail if we block for 442 * pageout completion. 443 */ 444 mtx_lock(&sw_alloc_mtx); 445 if (object->handle == NULL) { 446 TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list); 447 } else { 448 TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list); 449 } 450 mtx_unlock(&sw_alloc_mtx); 451 452 vm_object_pip_wait(object, "swpdea"); 453 454 /* 455 * Free all remaining metadata. We only bother to free it from 456 * the swap meta data. We do not attempt to free swapblk's still 457 * associated with vm_page_t's for this object. We do not care 458 * if paging is still in progress on some objects. 459 */ 460 s = splvm(); 461 swp_pager_meta_free_all(object); 462 splx(s); 463 } 464 465 /************************************************************************ 466 * SWAP PAGER BITMAP ROUTINES * 467 ************************************************************************/ 468 469 /* 470 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 471 * 472 * Allocate swap for the requested number of pages. The starting 473 * swap block number (a page index) is returned or SWAPBLK_NONE 474 * if the allocation failed. 475 * 476 * Also has the side effect of advising that somebody made a mistake 477 * when they configured swap and didn't configure enough. 478 * 479 * Must be called at splvm() to avoid races with bitmap frees from 480 * vm_page_remove() aka swap_pager_page_removed(). 481 * 482 * This routine may not block 483 * This routine must be called at splvm(). 484 */ 485 486 static __inline daddr_t 487 swp_pager_getswapspace(npages) 488 int npages; 489 { 490 daddr_t blk; 491 492 GIANT_REQUIRED; 493 494 if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) { 495 if (swap_pager_full != 2) { 496 printf("swap_pager_getswapspace: failed\n"); 497 swap_pager_full = 2; 498 swap_pager_almost_full = 1; 499 } 500 } else { 501 vm_swap_size -= npages; 502 /* per-swap area stats */ 503 swdevt[BLK2DEVIDX(blk)].sw_used += npages; 504 swp_sizecheck(); 505 } 506 return(blk); 507 } 508 509 /* 510 * SWP_PAGER_FREESWAPSPACE() - free raw swap space 511 * 512 * This routine returns the specified swap blocks back to the bitmap. 513 * 514 * Note: This routine may not block (it could in the old swap code), 515 * and through the use of the new blist routines it does not block. 516 * 517 * We must be called at splvm() to avoid races with bitmap frees from 518 * vm_page_remove() aka swap_pager_page_removed(). 519 * 520 * This routine may not block 521 * This routine must be called at splvm(). 522 */ 523 524 static __inline void 525 swp_pager_freeswapspace(blk, npages) 526 daddr_t blk; 527 int npages; 528 { 529 GIANT_REQUIRED; 530 531 blist_free(swapblist, blk, npages); 532 vm_swap_size += npages; 533 /* per-swap area stats */ 534 swdevt[BLK2DEVIDX(blk)].sw_used -= npages; 535 swp_sizecheck(); 536 } 537 538 /* 539 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 540 * range within an object. 541 * 542 * This is a globally accessible routine. 543 * 544 * This routine removes swapblk assignments from swap metadata. 545 * 546 * The external callers of this routine typically have already destroyed 547 * or renamed vm_page_t's associated with this range in the object so 548 * we should be ok. 549 * 550 * This routine may be called at any spl. We up our spl to splvm temporarily 551 * in order to perform the metadata removal. 552 */ 553 554 void 555 swap_pager_freespace(object, start, size) 556 vm_object_t object; 557 vm_pindex_t start; 558 vm_size_t size; 559 { 560 int s = splvm(); 561 562 GIANT_REQUIRED; 563 swp_pager_meta_free(object, start, size); 564 splx(s); 565 } 566 567 /* 568 * SWAP_PAGER_RESERVE() - reserve swap blocks in object 569 * 570 * Assigns swap blocks to the specified range within the object. The 571 * swap blocks are not zerod. Any previous swap assignment is destroyed. 572 * 573 * Returns 0 on success, -1 on failure. 574 */ 575 576 int 577 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 578 { 579 int s; 580 int n = 0; 581 daddr_t blk = SWAPBLK_NONE; 582 vm_pindex_t beg = start; /* save start index */ 583 584 s = splvm(); 585 while (size) { 586 if (n == 0) { 587 n = BLIST_MAX_ALLOC; 588 while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) { 589 n >>= 1; 590 if (n == 0) { 591 swp_pager_meta_free(object, beg, start - beg); 592 splx(s); 593 return(-1); 594 } 595 } 596 } 597 swp_pager_meta_build(object, start, blk); 598 --size; 599 ++start; 600 ++blk; 601 --n; 602 } 603 swp_pager_meta_free(object, start, n); 604 splx(s); 605 return(0); 606 } 607 608 /* 609 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 610 * and destroy the source. 611 * 612 * Copy any valid swapblks from the source to the destination. In 613 * cases where both the source and destination have a valid swapblk, 614 * we keep the destination's. 615 * 616 * This routine is allowed to block. It may block allocating metadata 617 * indirectly through swp_pager_meta_build() or if paging is still in 618 * progress on the source. 619 * 620 * This routine can be called at any spl 621 * 622 * XXX vm_page_collapse() kinda expects us not to block because we 623 * supposedly do not need to allocate memory, but for the moment we 624 * *may* have to get a little memory from the zone allocator, but 625 * it is taken from the interrupt memory. We should be ok. 626 * 627 * The source object contains no vm_page_t's (which is just as well) 628 * 629 * The source object is of type OBJT_SWAP. 630 * 631 * The source and destination objects must be locked or 632 * inaccessible (XXX are they ?) 633 */ 634 635 void 636 swap_pager_copy(srcobject, dstobject, offset, destroysource) 637 vm_object_t srcobject; 638 vm_object_t dstobject; 639 vm_pindex_t offset; 640 int destroysource; 641 { 642 vm_pindex_t i; 643 int s; 644 645 GIANT_REQUIRED; 646 647 s = splvm(); 648 /* 649 * If destroysource is set, we remove the source object from the 650 * swap_pager internal queue now. 651 */ 652 653 if (destroysource) { 654 mtx_lock(&sw_alloc_mtx); 655 if (srcobject->handle == NULL) { 656 TAILQ_REMOVE( 657 &swap_pager_un_object_list, 658 srcobject, 659 pager_object_list 660 ); 661 } else { 662 TAILQ_REMOVE( 663 NOBJLIST(srcobject->handle), 664 srcobject, 665 pager_object_list 666 ); 667 } 668 mtx_unlock(&sw_alloc_mtx); 669 } 670 671 /* 672 * transfer source to destination. 673 */ 674 675 for (i = 0; i < dstobject->size; ++i) { 676 daddr_t dstaddr; 677 678 /* 679 * Locate (without changing) the swapblk on the destination, 680 * unless it is invalid in which case free it silently, or 681 * if the destination is a resident page, in which case the 682 * source is thrown away. 683 */ 684 685 dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 686 687 if (dstaddr == SWAPBLK_NONE) { 688 /* 689 * Destination has no swapblk and is not resident, 690 * copy source. 691 */ 692 daddr_t srcaddr; 693 694 srcaddr = swp_pager_meta_ctl( 695 srcobject, 696 i + offset, 697 SWM_POP 698 ); 699 700 if (srcaddr != SWAPBLK_NONE) 701 swp_pager_meta_build(dstobject, i, srcaddr); 702 } else { 703 /* 704 * Destination has valid swapblk or it is represented 705 * by a resident page. We destroy the sourceblock. 706 */ 707 708 swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE); 709 } 710 } 711 712 /* 713 * Free left over swap blocks in source. 714 * 715 * We have to revert the type to OBJT_DEFAULT so we do not accidently 716 * double-remove the object from the swap queues. 717 */ 718 719 if (destroysource) { 720 swp_pager_meta_free_all(srcobject); 721 /* 722 * Reverting the type is not necessary, the caller is going 723 * to destroy srcobject directly, but I'm doing it here 724 * for consistency since we've removed the object from its 725 * queues. 726 */ 727 srcobject->type = OBJT_DEFAULT; 728 } 729 splx(s); 730 } 731 732 /* 733 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 734 * the requested page. 735 * 736 * We determine whether good backing store exists for the requested 737 * page and return TRUE if it does, FALSE if it doesn't. 738 * 739 * If TRUE, we also try to determine how much valid, contiguous backing 740 * store exists before and after the requested page within a reasonable 741 * distance. We do not try to restrict it to the swap device stripe 742 * (that is handled in getpages/putpages). It probably isn't worth 743 * doing here. 744 */ 745 746 boolean_t 747 swap_pager_haspage(object, pindex, before, after) 748 vm_object_t object; 749 vm_pindex_t pindex; 750 int *before; 751 int *after; 752 { 753 daddr_t blk0; 754 int s; 755 756 /* 757 * do we have good backing store at the requested index ? 758 */ 759 760 s = splvm(); 761 blk0 = swp_pager_meta_ctl(object, pindex, 0); 762 763 if (blk0 == SWAPBLK_NONE) { 764 splx(s); 765 if (before) 766 *before = 0; 767 if (after) 768 *after = 0; 769 return (FALSE); 770 } 771 772 /* 773 * find backwards-looking contiguous good backing store 774 */ 775 776 if (before != NULL) { 777 int i; 778 779 for (i = 1; i < (SWB_NPAGES/2); ++i) { 780 daddr_t blk; 781 782 if (i > pindex) 783 break; 784 blk = swp_pager_meta_ctl(object, pindex - i, 0); 785 if (blk != blk0 - i) 786 break; 787 } 788 *before = (i - 1); 789 } 790 791 /* 792 * find forward-looking contiguous good backing store 793 */ 794 795 if (after != NULL) { 796 int i; 797 798 for (i = 1; i < (SWB_NPAGES/2); ++i) { 799 daddr_t blk; 800 801 blk = swp_pager_meta_ctl(object, pindex + i, 0); 802 if (blk != blk0 + i) 803 break; 804 } 805 *after = (i - 1); 806 } 807 splx(s); 808 return (TRUE); 809 } 810 811 /* 812 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 813 * 814 * This removes any associated swap backing store, whether valid or 815 * not, from the page. 816 * 817 * This routine is typically called when a page is made dirty, at 818 * which point any associated swap can be freed. MADV_FREE also 819 * calls us in a special-case situation 820 * 821 * NOTE!!! If the page is clean and the swap was valid, the caller 822 * should make the page dirty before calling this routine. This routine 823 * does NOT change the m->dirty status of the page. Also: MADV_FREE 824 * depends on it. 825 * 826 * This routine may not block 827 * This routine must be called at splvm() 828 */ 829 830 static void 831 swap_pager_unswapped(m) 832 vm_page_t m; 833 { 834 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 835 } 836 837 /* 838 * SWAP_PAGER_STRATEGY() - read, write, free blocks 839 * 840 * This implements the vm_pager_strategy() interface to swap and allows 841 * other parts of the system to directly access swap as backing store 842 * through vm_objects of type OBJT_SWAP. This is intended to be a 843 * cacheless interface ( i.e. caching occurs at higher levels ). 844 * Therefore we do not maintain any resident pages. All I/O goes 845 * directly to and from the swap device. 846 * 847 * Note that b_blkno is scaled for PAGE_SIZE 848 * 849 * We currently attempt to run I/O synchronously or asynchronously as 850 * the caller requests. This isn't perfect because we loose error 851 * sequencing when we run multiple ops in parallel to satisfy a request. 852 * But this is swap, so we let it all hang out. 853 */ 854 855 static void 856 swap_pager_strategy(vm_object_t object, struct bio *bp) 857 { 858 vm_pindex_t start; 859 int count; 860 int s; 861 char *data; 862 struct buf *nbp = NULL; 863 864 GIANT_REQUIRED; 865 866 /* XXX: KASSERT instead ? */ 867 if (bp->bio_bcount & PAGE_MASK) { 868 biofinish(bp, NULL, EINVAL); 869 printf("swap_pager_strategy: bp %p blk %d size %d, not page bounded\n", bp, (int)bp->bio_pblkno, (int)bp->bio_bcount); 870 return; 871 } 872 873 /* 874 * Clear error indication, initialize page index, count, data pointer. 875 */ 876 877 bp->bio_error = 0; 878 bp->bio_flags &= ~BIO_ERROR; 879 bp->bio_resid = bp->bio_bcount; 880 881 start = bp->bio_pblkno; 882 count = howmany(bp->bio_bcount, PAGE_SIZE); 883 data = bp->bio_data; 884 885 s = splvm(); 886 887 /* 888 * Deal with BIO_DELETE 889 */ 890 891 if (bp->bio_cmd == BIO_DELETE) { 892 /* 893 * FREE PAGE(s) - destroy underlying swap that is no longer 894 * needed. 895 */ 896 swp_pager_meta_free(object, start, count); 897 splx(s); 898 bp->bio_resid = 0; 899 biodone(bp); 900 return; 901 } 902 903 /* 904 * Execute read or write 905 */ 906 while (count > 0) { 907 daddr_t blk; 908 909 /* 910 * Obtain block. If block not found and writing, allocate a 911 * new block and build it into the object. 912 */ 913 914 blk = swp_pager_meta_ctl(object, start, 0); 915 if ((blk == SWAPBLK_NONE) && (bp->bio_cmd == BIO_WRITE)) { 916 blk = swp_pager_getswapspace(1); 917 if (blk == SWAPBLK_NONE) { 918 bp->bio_error = ENOMEM; 919 bp->bio_flags |= BIO_ERROR; 920 break; 921 } 922 swp_pager_meta_build(object, start, blk); 923 } 924 925 /* 926 * Do we have to flush our current collection? Yes if: 927 * 928 * - no swap block at this index 929 * - swap block is not contiguous 930 * - we cross a physical disk boundry in the 931 * stripe. 932 */ 933 934 if ( 935 nbp && (nbp->b_blkno + btoc(nbp->b_bcount) != blk || 936 ((nbp->b_blkno ^ blk) & dmmax_mask) 937 ) 938 ) { 939 splx(s); 940 if (bp->bio_cmd == BIO_READ) { 941 ++cnt.v_swapin; 942 cnt.v_swappgsin += btoc(nbp->b_bcount); 943 } else { 944 ++cnt.v_swapout; 945 cnt.v_swappgsout += btoc(nbp->b_bcount); 946 nbp->b_dirtyend = nbp->b_bcount; 947 } 948 flushchainbuf(nbp); 949 s = splvm(); 950 nbp = NULL; 951 } 952 953 /* 954 * Add new swapblk to nbp, instantiating nbp if necessary. 955 * Zero-fill reads are able to take a shortcut. 956 */ 957 958 if (blk == SWAPBLK_NONE) { 959 /* 960 * We can only get here if we are reading. Since 961 * we are at splvm() we can safely modify b_resid, 962 * even if chain ops are in progress. 963 */ 964 bzero(data, PAGE_SIZE); 965 bp->bio_resid -= PAGE_SIZE; 966 } else { 967 if (nbp == NULL) { 968 nbp = getchainbuf(bp, swapdev_vp, B_ASYNC); 969 nbp->b_blkno = blk; 970 nbp->b_bcount = 0; 971 nbp->b_data = data; 972 } 973 nbp->b_bcount += PAGE_SIZE; 974 } 975 --count; 976 ++start; 977 data += PAGE_SIZE; 978 } 979 980 /* 981 * Flush out last buffer 982 */ 983 984 splx(s); 985 986 if (nbp) { 987 if (nbp->b_iocmd == BIO_READ) { 988 ++cnt.v_swapin; 989 cnt.v_swappgsin += btoc(nbp->b_bcount); 990 } else { 991 ++cnt.v_swapout; 992 cnt.v_swappgsout += btoc(nbp->b_bcount); 993 nbp->b_dirtyend = nbp->b_bcount; 994 } 995 flushchainbuf(nbp); 996 /* nbp = NULL; */ 997 } 998 /* 999 * Wait for completion. 1000 */ 1001 1002 waitchainbuf(bp, 0, 1); 1003 } 1004 1005 /* 1006 * SWAP_PAGER_GETPAGES() - bring pages in from swap 1007 * 1008 * Attempt to retrieve (m, count) pages from backing store, but make 1009 * sure we retrieve at least m[reqpage]. We try to load in as large 1010 * a chunk surrounding m[reqpage] as is contiguous in swap and which 1011 * belongs to the same object. 1012 * 1013 * The code is designed for asynchronous operation and 1014 * immediate-notification of 'reqpage' but tends not to be 1015 * used that way. Please do not optimize-out this algorithmic 1016 * feature, I intend to improve on it in the future. 1017 * 1018 * The parent has a single vm_object_pip_add() reference prior to 1019 * calling us and we should return with the same. 1020 * 1021 * The parent has BUSY'd the pages. We should return with 'm' 1022 * left busy, but the others adjusted. 1023 */ 1024 1025 static int 1026 swap_pager_getpages(object, m, count, reqpage) 1027 vm_object_t object; 1028 vm_page_t *m; 1029 int count, reqpage; 1030 { 1031 struct buf *bp; 1032 vm_page_t mreq; 1033 int s; 1034 int i; 1035 int j; 1036 daddr_t blk; 1037 vm_offset_t kva; 1038 vm_pindex_t lastpindex; 1039 1040 GIANT_REQUIRED; 1041 1042 mreq = m[reqpage]; 1043 1044 if (mreq->object != object) { 1045 panic("swap_pager_getpages: object mismatch %p/%p", 1046 object, 1047 mreq->object 1048 ); 1049 } 1050 /* 1051 * Calculate range to retrieve. The pages have already been assigned 1052 * their swapblks. We require a *contiguous* range that falls entirely 1053 * within a single device stripe. If we do not supply it, bad things 1054 * happen. Note that blk, iblk & jblk can be SWAPBLK_NONE, but the 1055 * loops are set up such that the case(s) are handled implicitly. 1056 * 1057 * The swp_*() calls must be made at splvm(). vm_page_free() does 1058 * not need to be, but it will go a little faster if it is. 1059 */ 1060 1061 s = splvm(); 1062 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 1063 1064 for (i = reqpage - 1; i >= 0; --i) { 1065 daddr_t iblk; 1066 1067 iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0); 1068 if (blk != iblk + (reqpage - i)) 1069 break; 1070 if ((blk ^ iblk) & dmmax_mask) 1071 break; 1072 } 1073 ++i; 1074 1075 for (j = reqpage + 1; j < count; ++j) { 1076 daddr_t jblk; 1077 1078 jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0); 1079 if (blk != jblk - (j - reqpage)) 1080 break; 1081 if ((blk ^ jblk) & dmmax_mask) 1082 break; 1083 } 1084 1085 /* 1086 * free pages outside our collection range. Note: we never free 1087 * mreq, it must remain busy throughout. 1088 */ 1089 1090 { 1091 int k; 1092 1093 for (k = 0; k < i; ++k) 1094 vm_page_free(m[k]); 1095 for (k = j; k < count; ++k) 1096 vm_page_free(m[k]); 1097 } 1098 splx(s); 1099 1100 1101 /* 1102 * Return VM_PAGER_FAIL if we have nothing to do. Return mreq 1103 * still busy, but the others unbusied. 1104 */ 1105 1106 if (blk == SWAPBLK_NONE) 1107 return(VM_PAGER_FAIL); 1108 1109 /* 1110 * Get a swap buffer header to perform the IO 1111 */ 1112 1113 bp = getpbuf(&nsw_rcount); 1114 kva = (vm_offset_t) bp->b_data; 1115 1116 /* 1117 * map our page(s) into kva for input 1118 * 1119 * NOTE: B_PAGING is set by pbgetvp() 1120 */ 1121 1122 pmap_qenter(kva, m + i, j - i); 1123 1124 bp->b_iocmd = BIO_READ; 1125 bp->b_iodone = swp_pager_async_iodone; 1126 bp->b_rcred = bp->b_wcred = proc0.p_ucred; 1127 bp->b_data = (caddr_t) kva; 1128 crhold(bp->b_rcred); 1129 crhold(bp->b_wcred); 1130 bp->b_blkno = blk - (reqpage - i); 1131 bp->b_bcount = PAGE_SIZE * (j - i); 1132 bp->b_bufsize = PAGE_SIZE * (j - i); 1133 bp->b_pager.pg_reqpage = reqpage - i; 1134 1135 { 1136 int k; 1137 1138 for (k = i; k < j; ++k) { 1139 bp->b_pages[k - i] = m[k]; 1140 vm_page_flag_set(m[k], PG_SWAPINPROG); 1141 } 1142 } 1143 bp->b_npages = j - i; 1144 1145 pbgetvp(swapdev_vp, bp); 1146 1147 cnt.v_swapin++; 1148 cnt.v_swappgsin += bp->b_npages; 1149 1150 /* 1151 * We still hold the lock on mreq, and our automatic completion routine 1152 * does not remove it. 1153 */ 1154 1155 vm_object_pip_add(mreq->object, bp->b_npages); 1156 lastpindex = m[j-1]->pindex; 1157 1158 /* 1159 * perform the I/O. NOTE!!! bp cannot be considered valid after 1160 * this point because we automatically release it on completion. 1161 * Instead, we look at the one page we are interested in which we 1162 * still hold a lock on even through the I/O completion. 1163 * 1164 * The other pages in our m[] array are also released on completion, 1165 * so we cannot assume they are valid anymore either. 1166 * 1167 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 1168 */ 1169 BUF_KERNPROC(bp); 1170 BUF_STRATEGY(bp); 1171 1172 /* 1173 * wait for the page we want to complete. PG_SWAPINPROG is always 1174 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 1175 * is set in the meta-data. 1176 */ 1177 1178 s = splvm(); 1179 1180 while ((mreq->flags & PG_SWAPINPROG) != 0) { 1181 vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED); 1182 cnt.v_intrans++; 1183 if (tsleep(mreq, PSWP, "swread", hz*20)) { 1184 printf( 1185 "swap_pager: indefinite wait buffer: device:" 1186 " %s, blkno: %ld, size: %ld\n", 1187 devtoname(bp->b_dev), (long)bp->b_blkno, 1188 bp->b_bcount 1189 ); 1190 } 1191 } 1192 1193 splx(s); 1194 1195 /* 1196 * mreq is left bussied after completion, but all the other pages 1197 * are freed. If we had an unrecoverable read error the page will 1198 * not be valid. 1199 */ 1200 1201 if (mreq->valid != VM_PAGE_BITS_ALL) { 1202 return(VM_PAGER_ERROR); 1203 } else { 1204 return(VM_PAGER_OK); 1205 } 1206 1207 /* 1208 * A final note: in a low swap situation, we cannot deallocate swap 1209 * and mark a page dirty here because the caller is likely to mark 1210 * the page clean when we return, causing the page to possibly revert 1211 * to all-zero's later. 1212 */ 1213 } 1214 1215 /* 1216 * swap_pager_putpages: 1217 * 1218 * Assign swap (if necessary) and initiate I/O on the specified pages. 1219 * 1220 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 1221 * are automatically converted to SWAP objects. 1222 * 1223 * In a low memory situation we may block in VOP_STRATEGY(), but the new 1224 * vm_page reservation system coupled with properly written VFS devices 1225 * should ensure that no low-memory deadlock occurs. This is an area 1226 * which needs work. 1227 * 1228 * The parent has N vm_object_pip_add() references prior to 1229 * calling us and will remove references for rtvals[] that are 1230 * not set to VM_PAGER_PEND. We need to remove the rest on I/O 1231 * completion. 1232 * 1233 * The parent has soft-busy'd the pages it passes us and will unbusy 1234 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 1235 * We need to unbusy the rest on I/O completion. 1236 */ 1237 1238 void 1239 swap_pager_putpages(object, m, count, sync, rtvals) 1240 vm_object_t object; 1241 vm_page_t *m; 1242 int count; 1243 boolean_t sync; 1244 int *rtvals; 1245 { 1246 int i; 1247 int n = 0; 1248 1249 GIANT_REQUIRED; 1250 if (count && m[0]->object != object) { 1251 panic("swap_pager_getpages: object mismatch %p/%p", 1252 object, 1253 m[0]->object 1254 ); 1255 } 1256 /* 1257 * Step 1 1258 * 1259 * Turn object into OBJT_SWAP 1260 * check for bogus sysops 1261 * force sync if not pageout process 1262 */ 1263 1264 if (object->type != OBJT_SWAP) 1265 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 1266 1267 if (curproc != pageproc) 1268 sync = TRUE; 1269 1270 /* 1271 * Step 2 1272 * 1273 * Update nsw parameters from swap_async_max sysctl values. 1274 * Do not let the sysop crash the machine with bogus numbers. 1275 */ 1276 1277 mtx_lock(&pbuf_mtx); 1278 if (swap_async_max != nsw_wcount_async_max) { 1279 int n; 1280 int s; 1281 1282 /* 1283 * limit range 1284 */ 1285 if ((n = swap_async_max) > nswbuf / 2) 1286 n = nswbuf / 2; 1287 if (n < 1) 1288 n = 1; 1289 swap_async_max = n; 1290 1291 /* 1292 * Adjust difference ( if possible ). If the current async 1293 * count is too low, we may not be able to make the adjustment 1294 * at this time. 1295 */ 1296 s = splvm(); 1297 n -= nsw_wcount_async_max; 1298 if (nsw_wcount_async + n >= 0) { 1299 nsw_wcount_async += n; 1300 nsw_wcount_async_max += n; 1301 wakeup(&nsw_wcount_async); 1302 } 1303 splx(s); 1304 } 1305 mtx_unlock(&pbuf_mtx); 1306 1307 /* 1308 * Step 3 1309 * 1310 * Assign swap blocks and issue I/O. We reallocate swap on the fly. 1311 * The page is left dirty until the pageout operation completes 1312 * successfully. 1313 */ 1314 1315 for (i = 0; i < count; i += n) { 1316 int s; 1317 int j; 1318 struct buf *bp; 1319 daddr_t blk; 1320 1321 /* 1322 * Maximum I/O size is limited by a number of factors. 1323 */ 1324 1325 n = min(BLIST_MAX_ALLOC, count - i); 1326 n = min(n, nsw_cluster_max); 1327 1328 s = splvm(); 1329 1330 /* 1331 * Get biggest block of swap we can. If we fail, fall 1332 * back and try to allocate a smaller block. Don't go 1333 * overboard trying to allocate space if it would overly 1334 * fragment swap. 1335 */ 1336 while ( 1337 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE && 1338 n > 4 1339 ) { 1340 n >>= 1; 1341 } 1342 if (blk == SWAPBLK_NONE) { 1343 for (j = 0; j < n; ++j) 1344 rtvals[i+j] = VM_PAGER_FAIL; 1345 splx(s); 1346 continue; 1347 } 1348 1349 /* 1350 * The I/O we are constructing cannot cross a physical 1351 * disk boundry in the swap stripe. Note: we are still 1352 * at splvm(). 1353 */ 1354 if ((blk ^ (blk + n)) & dmmax_mask) { 1355 j = ((blk + dmmax) & dmmax_mask) - blk; 1356 swp_pager_freeswapspace(blk + j, n - j); 1357 n = j; 1358 } 1359 1360 /* 1361 * All I/O parameters have been satisfied, build the I/O 1362 * request and assign the swap space. 1363 * 1364 * NOTE: B_PAGING is set by pbgetvp() 1365 */ 1366 1367 if (sync == TRUE) { 1368 bp = getpbuf(&nsw_wcount_sync); 1369 } else { 1370 bp = getpbuf(&nsw_wcount_async); 1371 bp->b_flags = B_ASYNC; 1372 } 1373 bp->b_iocmd = BIO_WRITE; 1374 bp->b_spc = NULL; /* not used, but NULL-out anyway */ 1375 1376 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n); 1377 1378 bp->b_rcred = bp->b_wcred = proc0.p_ucred; 1379 bp->b_bcount = PAGE_SIZE * n; 1380 bp->b_bufsize = PAGE_SIZE * n; 1381 bp->b_blkno = blk; 1382 1383 crhold(bp->b_rcred); 1384 crhold(bp->b_wcred); 1385 1386 pbgetvp(swapdev_vp, bp); 1387 1388 for (j = 0; j < n; ++j) { 1389 vm_page_t mreq = m[i+j]; 1390 1391 swp_pager_meta_build( 1392 mreq->object, 1393 mreq->pindex, 1394 blk + j 1395 ); 1396 vm_page_dirty(mreq); 1397 rtvals[i+j] = VM_PAGER_OK; 1398 1399 vm_page_flag_set(mreq, PG_SWAPINPROG); 1400 bp->b_pages[j] = mreq; 1401 } 1402 bp->b_npages = n; 1403 /* 1404 * Must set dirty range for NFS to work. 1405 */ 1406 bp->b_dirtyoff = 0; 1407 bp->b_dirtyend = bp->b_bcount; 1408 1409 cnt.v_swapout++; 1410 cnt.v_swappgsout += bp->b_npages; 1411 swapdev_vp->v_numoutput++; 1412 1413 splx(s); 1414 1415 /* 1416 * asynchronous 1417 * 1418 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 1419 */ 1420 1421 if (sync == FALSE) { 1422 bp->b_iodone = swp_pager_async_iodone; 1423 BUF_KERNPROC(bp); 1424 BUF_STRATEGY(bp); 1425 1426 for (j = 0; j < n; ++j) 1427 rtvals[i+j] = VM_PAGER_PEND; 1428 /* restart outter loop */ 1429 continue; 1430 } 1431 1432 /* 1433 * synchronous 1434 * 1435 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 1436 */ 1437 1438 bp->b_iodone = swp_pager_sync_iodone; 1439 BUF_STRATEGY(bp); 1440 1441 /* 1442 * Wait for the sync I/O to complete, then update rtvals. 1443 * We just set the rtvals[] to VM_PAGER_PEND so we can call 1444 * our async completion routine at the end, thus avoiding a 1445 * double-free. 1446 */ 1447 s = splbio(); 1448 1449 while ((bp->b_flags & B_DONE) == 0) { 1450 tsleep(bp, PVM, "swwrt", 0); 1451 } 1452 1453 for (j = 0; j < n; ++j) 1454 rtvals[i+j] = VM_PAGER_PEND; 1455 1456 /* 1457 * Now that we are through with the bp, we can call the 1458 * normal async completion, which frees everything up. 1459 */ 1460 1461 swp_pager_async_iodone(bp); 1462 splx(s); 1463 } 1464 } 1465 1466 /* 1467 * swap_pager_sync_iodone: 1468 * 1469 * Completion routine for synchronous reads and writes from/to swap. 1470 * We just mark the bp is complete and wake up anyone waiting on it. 1471 * 1472 * This routine may not block. This routine is called at splbio() or better. 1473 */ 1474 1475 static void 1476 swp_pager_sync_iodone(bp) 1477 struct buf *bp; 1478 { 1479 bp->b_flags |= B_DONE; 1480 bp->b_flags &= ~B_ASYNC; 1481 wakeup(bp); 1482 } 1483 1484 /* 1485 * swp_pager_async_iodone: 1486 * 1487 * Completion routine for asynchronous reads and writes from/to swap. 1488 * Also called manually by synchronous code to finish up a bp. 1489 * 1490 * For READ operations, the pages are PG_BUSY'd. For WRITE operations, 1491 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY 1492 * unbusy all pages except the 'main' request page. For WRITE 1493 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 1494 * because we marked them all VM_PAGER_PEND on return from putpages ). 1495 * 1496 * This routine may not block. 1497 * This routine is called at splbio() or better 1498 * 1499 * We up ourselves to splvm() as required for various vm_page related 1500 * calls. 1501 */ 1502 1503 static void 1504 swp_pager_async_iodone(bp) 1505 struct buf *bp; 1506 { 1507 int s; 1508 int i; 1509 vm_object_t object = NULL; 1510 1511 GIANT_REQUIRED; 1512 1513 bp->b_flags |= B_DONE; 1514 1515 /* 1516 * report error 1517 */ 1518 1519 if (bp->b_ioflags & BIO_ERROR) { 1520 printf( 1521 "swap_pager: I/O error - %s failed; blkno %ld," 1522 "size %ld, error %d\n", 1523 ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"), 1524 (long)bp->b_blkno, 1525 (long)bp->b_bcount, 1526 bp->b_error 1527 ); 1528 } 1529 1530 /* 1531 * set object, raise to splvm(). 1532 */ 1533 1534 if (bp->b_npages) 1535 object = bp->b_pages[0]->object; 1536 s = splvm(); 1537 1538 /* 1539 * remove the mapping for kernel virtual 1540 */ 1541 pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); 1542 1543 /* 1544 * cleanup pages. If an error occurs writing to swap, we are in 1545 * very serious trouble. If it happens to be a disk error, though, 1546 * we may be able to recover by reassigning the swap later on. So 1547 * in this case we remove the m->swapblk assignment for the page 1548 * but do not free it in the rlist. The errornous block(s) are thus 1549 * never reallocated as swap. Redirty the page and continue. 1550 */ 1551 1552 for (i = 0; i < bp->b_npages; ++i) { 1553 vm_page_t m = bp->b_pages[i]; 1554 1555 vm_page_flag_clear(m, PG_SWAPINPROG); 1556 1557 if (bp->b_ioflags & BIO_ERROR) { 1558 /* 1559 * If an error occurs I'd love to throw the swapblk 1560 * away without freeing it back to swapspace, so it 1561 * can never be used again. But I can't from an 1562 * interrupt. 1563 */ 1564 1565 if (bp->b_iocmd == BIO_READ) { 1566 /* 1567 * When reading, reqpage needs to stay 1568 * locked for the parent, but all other 1569 * pages can be freed. We still want to 1570 * wakeup the parent waiting on the page, 1571 * though. ( also: pg_reqpage can be -1 and 1572 * not match anything ). 1573 * 1574 * We have to wake specifically requested pages 1575 * up too because we cleared PG_SWAPINPROG and 1576 * someone may be waiting for that. 1577 * 1578 * NOTE: for reads, m->dirty will probably 1579 * be overridden by the original caller of 1580 * getpages so don't play cute tricks here. 1581 * 1582 * XXX IT IS NOT LEGAL TO FREE THE PAGE HERE 1583 * AS THIS MESSES WITH object->memq, and it is 1584 * not legal to mess with object->memq from an 1585 * interrupt. 1586 */ 1587 1588 m->valid = 0; 1589 vm_page_flag_clear(m, PG_ZERO); 1590 1591 if (i != bp->b_pager.pg_reqpage) 1592 vm_page_free(m); 1593 else 1594 vm_page_flash(m); 1595 /* 1596 * If i == bp->b_pager.pg_reqpage, do not wake 1597 * the page up. The caller needs to. 1598 */ 1599 } else { 1600 /* 1601 * If a write error occurs, reactivate page 1602 * so it doesn't clog the inactive list, 1603 * then finish the I/O. 1604 */ 1605 vm_page_dirty(m); 1606 vm_page_activate(m); 1607 vm_page_io_finish(m); 1608 } 1609 } else if (bp->b_iocmd == BIO_READ) { 1610 /* 1611 * For read success, clear dirty bits. Nobody should 1612 * have this page mapped but don't take any chances, 1613 * make sure the pmap modify bits are also cleared. 1614 * 1615 * NOTE: for reads, m->dirty will probably be 1616 * overridden by the original caller of getpages so 1617 * we cannot set them in order to free the underlying 1618 * swap in a low-swap situation. I don't think we'd 1619 * want to do that anyway, but it was an optimization 1620 * that existed in the old swapper for a time before 1621 * it got ripped out due to precisely this problem. 1622 * 1623 * clear PG_ZERO in page. 1624 * 1625 * If not the requested page then deactivate it. 1626 * 1627 * Note that the requested page, reqpage, is left 1628 * busied, but we still have to wake it up. The 1629 * other pages are released (unbusied) by 1630 * vm_page_wakeup(). We do not set reqpage's 1631 * valid bits here, it is up to the caller. 1632 */ 1633 1634 pmap_clear_modify(m); 1635 m->valid = VM_PAGE_BITS_ALL; 1636 vm_page_undirty(m); 1637 vm_page_flag_clear(m, PG_ZERO); 1638 1639 /* 1640 * We have to wake specifically requested pages 1641 * up too because we cleared PG_SWAPINPROG and 1642 * could be waiting for it in getpages. However, 1643 * be sure to not unbusy getpages specifically 1644 * requested page - getpages expects it to be 1645 * left busy. 1646 */ 1647 if (i != bp->b_pager.pg_reqpage) { 1648 vm_page_deactivate(m); 1649 vm_page_wakeup(m); 1650 } else { 1651 vm_page_flash(m); 1652 } 1653 } else { 1654 /* 1655 * For write success, clear the modify and dirty 1656 * status, then finish the I/O ( which decrements the 1657 * busy count and possibly wakes waiter's up ). 1658 */ 1659 pmap_clear_modify(m); 1660 vm_page_undirty(m); 1661 vm_page_io_finish(m); 1662 if (!vm_page_count_severe() || !vm_page_try_to_cache(m)) 1663 vm_page_protect(m, VM_PROT_READ); 1664 } 1665 } 1666 1667 /* 1668 * adjust pip. NOTE: the original parent may still have its own 1669 * pip refs on the object. 1670 */ 1671 1672 if (object) 1673 vm_object_pip_wakeupn(object, bp->b_npages); 1674 1675 /* 1676 * release the physical I/O buffer 1677 */ 1678 1679 relpbuf( 1680 bp, 1681 ((bp->b_iocmd == BIO_READ) ? &nsw_rcount : 1682 ((bp->b_flags & B_ASYNC) ? 1683 &nsw_wcount_async : 1684 &nsw_wcount_sync 1685 ) 1686 ) 1687 ); 1688 splx(s); 1689 } 1690 1691 /************************************************************************ 1692 * SWAP META DATA * 1693 ************************************************************************ 1694 * 1695 * These routines manipulate the swap metadata stored in the 1696 * OBJT_SWAP object. All swp_*() routines must be called at 1697 * splvm() because swap can be freed up by the low level vm_page 1698 * code which might be called from interrupts beyond what splbio() covers. 1699 * 1700 * Swap metadata is implemented with a global hash and not directly 1701 * linked into the object. Instead the object simply contains 1702 * appropriate tracking counters. 1703 */ 1704 1705 /* 1706 * SWP_PAGER_HASH() - hash swap meta data 1707 * 1708 * This is an inline helper function which hashes the swapblk given 1709 * the object and page index. It returns a pointer to a pointer 1710 * to the object, or a pointer to a NULL pointer if it could not 1711 * find a swapblk. 1712 * 1713 * This routine must be called at splvm(). 1714 */ 1715 1716 static __inline struct swblock ** 1717 swp_pager_hash(vm_object_t object, vm_pindex_t index) 1718 { 1719 struct swblock **pswap; 1720 struct swblock *swap; 1721 1722 index &= ~SWAP_META_MASK; 1723 pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask]; 1724 1725 while ((swap = *pswap) != NULL) { 1726 if (swap->swb_object == object && 1727 swap->swb_index == index 1728 ) { 1729 break; 1730 } 1731 pswap = &swap->swb_hnext; 1732 } 1733 return(pswap); 1734 } 1735 1736 /* 1737 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 1738 * 1739 * We first convert the object to a swap object if it is a default 1740 * object. 1741 * 1742 * The specified swapblk is added to the object's swap metadata. If 1743 * the swapblk is not valid, it is freed instead. Any previously 1744 * assigned swapblk is freed. 1745 * 1746 * This routine must be called at splvm(), except when used to convert 1747 * an OBJT_DEFAULT object into an OBJT_SWAP object. 1748 */ 1749 1750 static void 1751 swp_pager_meta_build( 1752 vm_object_t object, 1753 vm_pindex_t index, 1754 daddr_t swapblk 1755 ) { 1756 struct swblock *swap; 1757 struct swblock **pswap; 1758 1759 GIANT_REQUIRED; 1760 /* 1761 * Convert default object to swap object if necessary 1762 */ 1763 1764 if (object->type != OBJT_SWAP) { 1765 object->type = OBJT_SWAP; 1766 object->un_pager.swp.swp_bcount = 0; 1767 1768 mtx_lock(&sw_alloc_mtx); 1769 if (object->handle != NULL) { 1770 TAILQ_INSERT_TAIL( 1771 NOBJLIST(object->handle), 1772 object, 1773 pager_object_list 1774 ); 1775 } else { 1776 TAILQ_INSERT_TAIL( 1777 &swap_pager_un_object_list, 1778 object, 1779 pager_object_list 1780 ); 1781 } 1782 mtx_unlock(&sw_alloc_mtx); 1783 } 1784 1785 /* 1786 * Locate hash entry. If not found create, but if we aren't adding 1787 * anything just return. If we run out of space in the map we wait 1788 * and, since the hash table may have changed, retry. 1789 */ 1790 1791 retry: 1792 pswap = swp_pager_hash(object, index); 1793 1794 if ((swap = *pswap) == NULL) { 1795 int i; 1796 1797 if (swapblk == SWAPBLK_NONE) 1798 return; 1799 1800 swap = *pswap = zalloc(swap_zone); 1801 if (swap == NULL) { 1802 VM_WAIT; 1803 goto retry; 1804 } 1805 swap->swb_hnext = NULL; 1806 swap->swb_object = object; 1807 swap->swb_index = index & ~SWAP_META_MASK; 1808 swap->swb_count = 0; 1809 1810 ++object->un_pager.swp.swp_bcount; 1811 1812 for (i = 0; i < SWAP_META_PAGES; ++i) 1813 swap->swb_pages[i] = SWAPBLK_NONE; 1814 } 1815 1816 /* 1817 * Delete prior contents of metadata 1818 */ 1819 1820 index &= SWAP_META_MASK; 1821 1822 if (swap->swb_pages[index] != SWAPBLK_NONE) { 1823 swp_pager_freeswapspace(swap->swb_pages[index], 1); 1824 --swap->swb_count; 1825 } 1826 1827 /* 1828 * Enter block into metadata 1829 */ 1830 1831 swap->swb_pages[index] = swapblk; 1832 if (swapblk != SWAPBLK_NONE) 1833 ++swap->swb_count; 1834 } 1835 1836 /* 1837 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 1838 * 1839 * The requested range of blocks is freed, with any associated swap 1840 * returned to the swap bitmap. 1841 * 1842 * This routine will free swap metadata structures as they are cleaned 1843 * out. This routine does *NOT* operate on swap metadata associated 1844 * with resident pages. 1845 * 1846 * This routine must be called at splvm() 1847 */ 1848 1849 static void 1850 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count) 1851 { 1852 GIANT_REQUIRED; 1853 1854 if (object->type != OBJT_SWAP) 1855 return; 1856 1857 while (count > 0) { 1858 struct swblock **pswap; 1859 struct swblock *swap; 1860 1861 pswap = swp_pager_hash(object, index); 1862 1863 if ((swap = *pswap) != NULL) { 1864 daddr_t v = swap->swb_pages[index & SWAP_META_MASK]; 1865 1866 if (v != SWAPBLK_NONE) { 1867 swp_pager_freeswapspace(v, 1); 1868 swap->swb_pages[index & SWAP_META_MASK] = 1869 SWAPBLK_NONE; 1870 if (--swap->swb_count == 0) { 1871 *pswap = swap->swb_hnext; 1872 zfree(swap_zone, swap); 1873 --object->un_pager.swp.swp_bcount; 1874 } 1875 } 1876 --count; 1877 ++index; 1878 } else { 1879 int n = SWAP_META_PAGES - (index & SWAP_META_MASK); 1880 count -= n; 1881 index += n; 1882 } 1883 } 1884 } 1885 1886 /* 1887 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 1888 * 1889 * This routine locates and destroys all swap metadata associated with 1890 * an object. 1891 * 1892 * This routine must be called at splvm() 1893 */ 1894 1895 static void 1896 swp_pager_meta_free_all(vm_object_t object) 1897 { 1898 daddr_t index = 0; 1899 1900 GIANT_REQUIRED; 1901 1902 if (object->type != OBJT_SWAP) 1903 return; 1904 1905 while (object->un_pager.swp.swp_bcount) { 1906 struct swblock **pswap; 1907 struct swblock *swap; 1908 1909 pswap = swp_pager_hash(object, index); 1910 if ((swap = *pswap) != NULL) { 1911 int i; 1912 1913 for (i = 0; i < SWAP_META_PAGES; ++i) { 1914 daddr_t v = swap->swb_pages[i]; 1915 if (v != SWAPBLK_NONE) { 1916 --swap->swb_count; 1917 swp_pager_freeswapspace(v, 1); 1918 } 1919 } 1920 if (swap->swb_count != 0) 1921 panic("swap_pager_meta_free_all: swb_count != 0"); 1922 *pswap = swap->swb_hnext; 1923 zfree(swap_zone, swap); 1924 --object->un_pager.swp.swp_bcount; 1925 } 1926 index += SWAP_META_PAGES; 1927 if (index > 0x20000000) 1928 panic("swp_pager_meta_free_all: failed to locate all swap meta blocks"); 1929 } 1930 } 1931 1932 /* 1933 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 1934 * 1935 * This routine is capable of looking up, popping, or freeing 1936 * swapblk assignments in the swap meta data or in the vm_page_t. 1937 * The routine typically returns the swapblk being looked-up, or popped, 1938 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 1939 * was invalid. This routine will automatically free any invalid 1940 * meta-data swapblks. 1941 * 1942 * It is not possible to store invalid swapblks in the swap meta data 1943 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 1944 * 1945 * When acting on a busy resident page and paging is in progress, we 1946 * have to wait until paging is complete but otherwise can act on the 1947 * busy page. 1948 * 1949 * This routine must be called at splvm(). 1950 * 1951 * SWM_FREE remove and free swap block from metadata 1952 * SWM_POP remove from meta data but do not free.. pop it out 1953 */ 1954 1955 static daddr_t 1956 swp_pager_meta_ctl( 1957 vm_object_t object, 1958 vm_pindex_t index, 1959 int flags 1960 ) { 1961 struct swblock **pswap; 1962 struct swblock *swap; 1963 daddr_t r1; 1964 1965 GIANT_REQUIRED; 1966 /* 1967 * The meta data only exists of the object is OBJT_SWAP 1968 * and even then might not be allocated yet. 1969 */ 1970 1971 if (object->type != OBJT_SWAP) 1972 return(SWAPBLK_NONE); 1973 1974 r1 = SWAPBLK_NONE; 1975 pswap = swp_pager_hash(object, index); 1976 1977 if ((swap = *pswap) != NULL) { 1978 index &= SWAP_META_MASK; 1979 r1 = swap->swb_pages[index]; 1980 1981 if (r1 != SWAPBLK_NONE) { 1982 if (flags & SWM_FREE) { 1983 swp_pager_freeswapspace(r1, 1); 1984 r1 = SWAPBLK_NONE; 1985 } 1986 if (flags & (SWM_FREE|SWM_POP)) { 1987 swap->swb_pages[index] = SWAPBLK_NONE; 1988 if (--swap->swb_count == 0) { 1989 *pswap = swap->swb_hnext; 1990 zfree(swap_zone, swap); 1991 --object->un_pager.swp.swp_bcount; 1992 } 1993 } 1994 } 1995 } 1996 return(r1); 1997 } 1998 1999 /******************************************************** 2000 * CHAINING FUNCTIONS * 2001 ******************************************************** 2002 * 2003 * These functions support recursion of I/O operations 2004 * on bp's, typically by chaining one or more 'child' bp's 2005 * to the parent. Synchronous, asynchronous, and semi-synchronous 2006 * chaining is possible. 2007 */ 2008 2009 /* 2010 * vm_pager_chain_iodone: 2011 * 2012 * io completion routine for child bp. Currently we fudge a bit 2013 * on dealing with b_resid. Since users of these routines may issue 2014 * multiple children simultaneously, sequencing of the error can be lost. 2015 */ 2016 2017 static void 2018 vm_pager_chain_iodone(struct buf *nbp) 2019 { 2020 struct bio *bp; 2021 u_int *count; 2022 2023 bp = nbp->b_caller1; 2024 count = (u_int *)&(bp->bio_caller1); 2025 if (bp != NULL) { 2026 if (nbp->b_ioflags & BIO_ERROR) { 2027 bp->bio_flags |= BIO_ERROR; 2028 bp->bio_error = nbp->b_error; 2029 } else if (nbp->b_resid != 0) { 2030 bp->bio_flags |= BIO_ERROR; 2031 bp->bio_error = EINVAL; 2032 } else { 2033 bp->bio_resid -= nbp->b_bcount; 2034 } 2035 nbp->b_caller1 = NULL; 2036 --(*count); 2037 if (bp->bio_flags & BIO_FLAG1) { 2038 bp->bio_flags &= ~BIO_FLAG1; 2039 wakeup(bp); 2040 } 2041 } 2042 nbp->b_flags |= B_DONE; 2043 nbp->b_flags &= ~B_ASYNC; 2044 relpbuf(nbp, NULL); 2045 } 2046 2047 /* 2048 * getchainbuf: 2049 * 2050 * Obtain a physical buffer and chain it to its parent buffer. When 2051 * I/O completes, the parent buffer will be B_SIGNAL'd. Errors are 2052 * automatically propagated to the parent 2053 */ 2054 2055 struct buf * 2056 getchainbuf(struct bio *bp, struct vnode *vp, int flags) 2057 { 2058 struct buf *nbp; 2059 u_int *count; 2060 2061 GIANT_REQUIRED; 2062 nbp = getpbuf(NULL); 2063 count = (u_int *)&(bp->bio_caller1); 2064 2065 nbp->b_caller1 = bp; 2066 ++(*count); 2067 2068 if (*count > 4) 2069 waitchainbuf(bp, 4, 0); 2070 2071 nbp->b_iocmd = bp->bio_cmd; 2072 nbp->b_ioflags = bp->bio_flags & BIO_ORDERED; 2073 nbp->b_flags = flags; 2074 nbp->b_rcred = nbp->b_wcred = proc0.p_ucred; 2075 nbp->b_iodone = vm_pager_chain_iodone; 2076 2077 crhold(nbp->b_rcred); 2078 crhold(nbp->b_wcred); 2079 2080 if (vp) 2081 pbgetvp(vp, nbp); 2082 return(nbp); 2083 } 2084 2085 void 2086 flushchainbuf(struct buf *nbp) 2087 { 2088 GIANT_REQUIRED; 2089 if (nbp->b_bcount) { 2090 nbp->b_bufsize = nbp->b_bcount; 2091 if (nbp->b_iocmd == BIO_WRITE) 2092 nbp->b_dirtyend = nbp->b_bcount; 2093 BUF_KERNPROC(nbp); 2094 BUF_STRATEGY(nbp); 2095 } else { 2096 bufdone(nbp); 2097 } 2098 } 2099 2100 static void 2101 waitchainbuf(struct bio *bp, int limit, int done) 2102 { 2103 int s; 2104 u_int *count; 2105 2106 GIANT_REQUIRED; 2107 count = (u_int *)&(bp->bio_caller1); 2108 s = splbio(); 2109 while (*count > limit) { 2110 bp->bio_flags |= BIO_FLAG1; 2111 tsleep(bp, PRIBIO + 4, "bpchain", 0); 2112 } 2113 if (done) { 2114 if (bp->bio_resid != 0 && !(bp->bio_flags & BIO_ERROR)) { 2115 bp->bio_flags |= BIO_ERROR; 2116 bp->bio_error = EINVAL; 2117 } 2118 biodone(bp); 2119 } 2120 splx(s); 2121 } 2122 2123