1 /* 2 * Copyright (c) 1998 Matthew Dillon, 3 * Copyright (c) 1994 John S. Dyson 4 * Copyright (c) 1990 University of Utah. 5 * Copyright (c) 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * New Swap System 41 * Matthew Dillon 42 * 43 * Radix Bitmap 'blists'. 44 * 45 * - The new swapper uses the new radix bitmap code. This should scale 46 * to arbitrarily small or arbitrarily large swap spaces and an almost 47 * arbitrary degree of fragmentation. 48 * 49 * Features: 50 * 51 * - on the fly reallocation of swap during putpages. The new system 52 * does not try to keep previously allocated swap blocks for dirty 53 * pages. 54 * 55 * - on the fly deallocation of swap 56 * 57 * - No more garbage collection required. Unnecessarily allocated swap 58 * blocks only exist for dirty vm_page_t's now and these are already 59 * cycled (in a high-load system) by the pager. We also do on-the-fly 60 * removal of invalidated swap blocks when a page is destroyed 61 * or renamed. 62 * 63 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 64 * 65 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 66 * 67 * $Id: swap_pager.c,v 1.117 1999/03/14 09:20:00 julian Exp $ 68 */ 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/kernel.h> 73 #include <sys/proc.h> 74 #include <sys/buf.h> 75 #include <sys/vnode.h> 76 #include <sys/malloc.h> 77 #include <sys/vmmeter.h> 78 #include <sys/sysctl.h> 79 #include <sys/blist.h> 80 #include <sys/lock.h> 81 82 #ifndef MAX_PAGEOUT_CLUSTER 83 #define MAX_PAGEOUT_CLUSTER 16 84 #endif 85 86 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER 87 88 #include "opt_swap.h" 89 #include <vm/vm.h> 90 #include <vm/vm_prot.h> 91 #include <vm/vm_object.h> 92 #include <vm/vm_page.h> 93 #include <vm/vm_pager.h> 94 #include <vm/vm_pageout.h> 95 #include <vm/swap_pager.h> 96 #include <vm/vm_extern.h> 97 #include <vm/vm_zone.h> 98 99 #define SWM_FREE 0x02 /* free, period */ 100 #define SWM_POP 0x04 /* pop out */ 101 102 /* 103 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks 104 * in the old system. 105 */ 106 107 extern int vm_swap_size; /* number of free swap blocks, in pages */ 108 109 int swap_pager_full; /* swap space exhaustion (task killing) */ 110 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/ 111 static int nsw_rcount; /* free read buffers */ 112 static int nsw_wcount_sync; /* limit write buffers / synchronous */ 113 static int nsw_wcount_async; /* limit write buffers / asynchronous */ 114 static int nsw_wcount_async_max;/* assigned maximum */ 115 static int nsw_cluster_max; /* maximum VOP I/O allowed */ 116 static int sw_alloc_interlock; /* swap pager allocation interlock */ 117 118 struct blist *swapblist; 119 static struct swblock **swhash; 120 static int swhash_mask; 121 static int swap_async_max = 4; /* maximum in-progress async I/O's */ 122 123 SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 124 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 125 126 /* 127 * "named" and "unnamed" anon region objects. Try to reduce the overhead 128 * of searching a named list by hashing it just a little. 129 */ 130 131 #define NOBJLISTS 8 132 133 #define NOBJLIST(handle) \ 134 (&swap_pager_object_list[((int)(long)handle >> 4) & (NOBJLISTS-1)]) 135 136 static struct pagerlst swap_pager_object_list[NOBJLISTS]; 137 struct pagerlst swap_pager_un_object_list; 138 vm_zone_t swap_zone; 139 140 /* 141 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 142 * calls hooked from other parts of the VM system and do not appear here. 143 * (see vm/swap_pager.h). 144 */ 145 146 static vm_object_t 147 swap_pager_alloc __P((void *handle, vm_ooffset_t size, 148 vm_prot_t prot, vm_ooffset_t offset)); 149 static void swap_pager_dealloc __P((vm_object_t object)); 150 static int swap_pager_getpages __P((vm_object_t, vm_page_t *, int, int)); 151 static void swap_pager_init __P((void)); 152 static void swap_pager_unswapped __P((vm_page_t)); 153 static void swap_pager_strategy __P((vm_object_t, struct buf *)); 154 155 struct pagerops swappagerops = { 156 swap_pager_init, /* early system initialization of pager */ 157 swap_pager_alloc, /* allocate an OBJT_SWAP object */ 158 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 159 swap_pager_getpages, /* pagein */ 160 swap_pager_putpages, /* pageout */ 161 swap_pager_haspage, /* get backing store status for page */ 162 swap_pager_unswapped, /* remove swap related to page */ 163 swap_pager_strategy /* pager strategy call */ 164 }; 165 166 /* 167 * dmmax is in page-sized chunks with the new swap system. It was 168 * dev-bsized chunks in the old. 169 * 170 * swap_*() routines are externally accessible. swp_*() routines are 171 * internal. 172 */ 173 174 int dmmax; 175 static int dmmax_mask; 176 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 177 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 178 179 static __inline void swp_sizecheck __P((void)); 180 static void swp_pager_sync_iodone __P((struct buf *bp)); 181 static void swp_pager_async_iodone __P((struct buf *bp)); 182 183 /* 184 * Swap bitmap functions 185 */ 186 187 static __inline void swp_pager_freeswapspace __P((daddr_t blk, int npages)); 188 static __inline daddr_t swp_pager_getswapspace __P((int npages)); 189 190 /* 191 * Metadata functions 192 */ 193 194 static void swp_pager_meta_build __P((vm_object_t, daddr_t, daddr_t, int)); 195 static void swp_pager_meta_free __P((vm_object_t, daddr_t, daddr_t)); 196 static void swp_pager_meta_free_all __P((vm_object_t)); 197 static daddr_t swp_pager_meta_ctl __P((vm_object_t, vm_pindex_t, int)); 198 199 /* 200 * SWP_SIZECHECK() - update swap_pager_full indication 201 * 202 * update the swap_pager_almost_full indication and warn when we are 203 * about to run out of swap space, using lowat/hiwat hysteresis. 204 * 205 * Clear swap_pager_full ( task killing ) indication when lowat is met. 206 * 207 * No restrictions on call 208 * This routine may not block. 209 * This routine must be called at splvm() 210 */ 211 212 static __inline void 213 swp_sizecheck() 214 { 215 if (vm_swap_size < nswap_lowat) { 216 if (swap_pager_almost_full == 0) { 217 printf("swap_pager: out of swap space\n"); 218 swap_pager_almost_full = 1; 219 } 220 } else { 221 swap_pager_full = 0; 222 if (vm_swap_size > nswap_hiwat) 223 swap_pager_almost_full = 0; 224 } 225 } 226 227 /* 228 * SWAP_PAGER_INIT() - initialize the swap pager! 229 * 230 * Expected to be started from system init. NOTE: This code is run 231 * before much else so be careful what you depend on. Most of the VM 232 * system has yet to be initialized at this point. 233 */ 234 235 static void 236 swap_pager_init() 237 { 238 /* 239 * Initialize object lists 240 */ 241 int i; 242 243 for (i = 0; i < NOBJLISTS; ++i) 244 TAILQ_INIT(&swap_pager_object_list[i]); 245 TAILQ_INIT(&swap_pager_un_object_list); 246 247 /* 248 * Device Stripe, in PAGE_SIZE'd blocks 249 */ 250 251 dmmax = SWB_NPAGES * 2; 252 dmmax_mask = ~(dmmax - 1); 253 } 254 255 /* 256 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 257 * 258 * Expected to be started from pageout process once, prior to entering 259 * its main loop. 260 */ 261 262 void 263 swap_pager_swap_init() 264 { 265 int n; 266 267 /* 268 * Number of in-transit swap bp operations. Don't 269 * exhaust the pbufs completely. Make sure we 270 * initialize workable values (0 will work for hysteresis 271 * but it isn't very efficient). 272 * 273 * The nsw_cluster_max is constrained by the bp->b_pages[] 274 * array (MAXPHYS/PAGE_SIZE) and our locally defined 275 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 276 * constrained by the swap device interleave stripe size. 277 * 278 * Currently we hardwire nsw_wcount_async to 4. This limit is 279 * designed to prevent other I/O from having high latencies due to 280 * our pageout I/O. The value 4 works well for one or two active swap 281 * devices but is probably a little low if you have more. Even so, 282 * a higher value would probably generate only a limited improvement 283 * with three or four active swap devices since the system does not 284 * typically have to pageout at extreme bandwidths. We will want 285 * at least 2 per swap devices, and 4 is a pretty good value if you 286 * have one NFS swap device due to the command/ack latency over NFS. 287 * So it all works out pretty well. 288 */ 289 290 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 291 292 nsw_rcount = (nswbuf + 1) / 2; 293 nsw_wcount_sync = (nswbuf + 3) / 4; 294 nsw_wcount_async = 4; 295 nsw_wcount_async_max = nsw_wcount_async; 296 297 /* 298 * Initialize our zone. Right now I'm just guessing on the number 299 * we need based on the number of pages in the system. Each swblock 300 * can hold 16 pages, so this is probably overkill. 301 */ 302 303 n = cnt.v_page_count * 2; 304 305 swap_zone = zinit( 306 "SWAPMETA", 307 sizeof(struct swblock), 308 n, 309 ZONE_INTERRUPT, 310 1 311 ); 312 313 /* 314 * Initialize our meta-data hash table. The swapper does not need to 315 * be quite as efficient as the VM system, so we do not use an 316 * oversized hash table. 317 * 318 * n: size of hash table, must be power of 2 319 * swhash_mask: hash table index mask 320 */ 321 322 for (n = 1; n < cnt.v_page_count / 4; n <<= 1) 323 ; 324 325 swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK); 326 bzero(swhash, sizeof(struct swblock *) * n); 327 328 swhash_mask = n - 1; 329 } 330 331 /* 332 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 333 * its metadata structures. 334 * 335 * This routine is called from the mmap and fork code to create a new 336 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 337 * and then converting it with swp_pager_meta_build(). 338 * 339 * This routine may block in vm_object_allocate() and create a named 340 * object lookup race, so we must interlock. We must also run at 341 * splvm() for the object lookup to handle races with interrupts, but 342 * we do not have to maintain splvm() in between the lookup and the 343 * add because (I believe) it is not possible to attempt to create 344 * a new swap object w/handle when a default object with that handle 345 * already exists. 346 */ 347 348 static vm_object_t 349 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 350 vm_ooffset_t offset) 351 { 352 vm_object_t object; 353 354 if (handle) { 355 /* 356 * Reference existing named region or allocate new one. There 357 * should not be a race here against swp_pager_meta_build() 358 * as called from vm_page_remove() in regards to the lookup 359 * of the handle. 360 */ 361 362 while (sw_alloc_interlock) { 363 sw_alloc_interlock = -1; 364 tsleep(&sw_alloc_interlock, PVM, "swpalc", 0); 365 } 366 sw_alloc_interlock = 1; 367 368 object = vm_pager_object_lookup(NOBJLIST(handle), handle); 369 370 if (object != NULL) { 371 vm_object_reference(object); 372 } else { 373 object = vm_object_allocate(OBJT_DEFAULT, 374 OFF_TO_IDX(offset + PAGE_MASK + size)); 375 object->handle = handle; 376 377 swp_pager_meta_build( 378 object, 379 0, 380 SWAPBLK_NONE, 381 0 382 ); 383 } 384 385 if (sw_alloc_interlock < 0) 386 wakeup(&sw_alloc_interlock); 387 388 sw_alloc_interlock = 0; 389 } else { 390 object = vm_object_allocate(OBJT_DEFAULT, 391 OFF_TO_IDX(offset + PAGE_MASK + size)); 392 393 swp_pager_meta_build( 394 object, 395 0, 396 SWAPBLK_NONE, 397 0 398 ); 399 } 400 401 return (object); 402 } 403 404 /* 405 * SWAP_PAGER_DEALLOC() - remove swap metadata from object 406 * 407 * The swap backing for the object is destroyed. The code is 408 * designed such that we can reinstantiate it later, but this 409 * routine is typically called only when the entire object is 410 * about to be destroyed. 411 * 412 * This routine may block, but no longer does. 413 * 414 * The object must be locked or unreferenceable. 415 */ 416 417 static void 418 swap_pager_dealloc(object) 419 vm_object_t object; 420 { 421 /* 422 * Remove from list right away so lookups will fail if we block for 423 * pageout completion. 424 */ 425 426 if (object->handle == NULL) { 427 TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list); 428 } else { 429 TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list); 430 } 431 432 vm_object_pip_wait(object, "swpdea"); 433 434 /* 435 * Free all remaining metadata. We only bother to free it from 436 * the swap meta data. We do not attempt to free swapblk's still 437 * associated with vm_page_t's for this object. We do not care 438 * if paging is still in progress on some objects. 439 */ 440 441 swp_pager_meta_free_all(object); 442 } 443 444 /************************************************************************ 445 * SWAP PAGER BITMAP ROUTINES * 446 ************************************************************************/ 447 448 /* 449 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 450 * 451 * Allocate swap for the requested number of pages. The starting 452 * swap block number (a page index) is returned or SWAPBLK_NONE 453 * if the allocation failed. 454 * 455 * Also has the side effect of advising that somebody made a mistake 456 * when they configured swap and didn't configure enough. 457 * 458 * Must be called at splvm() to avoid races with bitmap frees from 459 * vm_page_remove() aka swap_pager_page_removed(). 460 * 461 * This routine may not block 462 * This routine must be called at splvm(). 463 */ 464 465 static __inline daddr_t 466 swp_pager_getswapspace(npages) 467 int npages; 468 { 469 daddr_t blk; 470 471 if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) { 472 if (swap_pager_full != 2) { 473 printf("swap_pager_getswapspace: failed\n"); 474 swap_pager_full = 2; 475 swap_pager_almost_full = 1; 476 } 477 } else { 478 vm_swap_size -= npages; 479 swp_sizecheck(); 480 } 481 return(blk); 482 } 483 484 /* 485 * SWP_PAGER_FREESWAPSPACE() - free raw swap space 486 * 487 * This routine returns the specified swap blocks back to the bitmap. 488 * 489 * Note: This routine may not block (it could in the old swap code), 490 * and through the use of the new blist routines it does not block. 491 * 492 * We must be called at splvm() to avoid races with bitmap frees from 493 * vm_page_remove() aka swap_pager_page_removed(). 494 * 495 * This routine may not block 496 * This routine must be called at splvm(). 497 */ 498 499 static __inline void 500 swp_pager_freeswapspace(blk, npages) 501 daddr_t blk; 502 int npages; 503 { 504 blist_free(swapblist, blk, npages); 505 vm_swap_size += npages; 506 swp_sizecheck(); 507 } 508 509 /* 510 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 511 * range within an object. 512 * 513 * This is a globally accessible routine. 514 * 515 * This routine removes swapblk assignments from swap metadata. 516 * 517 * The external callers of this routine typically have already destroyed 518 * or renamed vm_page_t's associated with this range in the object so 519 * we should be ok. 520 */ 521 522 void 523 swap_pager_freespace(object, start, size) 524 vm_object_t object; 525 vm_pindex_t start; 526 vm_size_t size; 527 { 528 swp_pager_meta_free(object, start, size); 529 } 530 531 /* 532 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 533 * and destroy the source. 534 * 535 * Copy any valid swapblks from the source to the destination. In 536 * cases where both the source and destination have a valid swapblk, 537 * we keep the destination's. 538 * 539 * This routine is allowed to block. It may block allocating metadata 540 * indirectly through swp_pager_meta_build() or if paging is still in 541 * progress on the source. 542 * 543 * XXX vm_page_collapse() kinda expects us not to block because we 544 * supposedly do not need to allocate memory, but for the moment we 545 * *may* have to get a little memory from the zone allocator, but 546 * it is taken from the interrupt memory. We should be ok. 547 * 548 * The source object contains no vm_page_t's (which is just as well) 549 * 550 * The source object is of type OBJT_SWAP. 551 * 552 * The source and destination objects must be 553 * locked or inaccessible (XXX are they ?) 554 */ 555 556 void 557 swap_pager_copy(srcobject, dstobject, offset, destroysource) 558 vm_object_t srcobject; 559 vm_object_t dstobject; 560 vm_pindex_t offset; 561 int destroysource; 562 { 563 vm_pindex_t i; 564 565 /* 566 * If destroysource is set, we remove the source object from the 567 * swap_pager internal queue now. 568 */ 569 570 if (destroysource) { 571 if (srcobject->handle == NULL) { 572 TAILQ_REMOVE( 573 &swap_pager_un_object_list, 574 srcobject, 575 pager_object_list 576 ); 577 } else { 578 TAILQ_REMOVE( 579 NOBJLIST(srcobject->handle), 580 srcobject, 581 pager_object_list 582 ); 583 } 584 } 585 586 /* 587 * transfer source to destination. 588 */ 589 590 for (i = 0; i < dstobject->size; ++i) { 591 daddr_t dstaddr; 592 593 /* 594 * Locate (without changing) the swapblk on the destination, 595 * unless it is invalid in which case free it silently, or 596 * if the destination is a resident page, in which case the 597 * source is thrown away. 598 */ 599 600 dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 601 602 if (dstaddr == SWAPBLK_NONE) { 603 /* 604 * Destination has no swapblk and is not resident, 605 * copy source. 606 */ 607 daddr_t srcaddr; 608 609 srcaddr = swp_pager_meta_ctl( 610 srcobject, 611 i + offset, 612 SWM_POP 613 ); 614 615 if (srcaddr != SWAPBLK_NONE) 616 swp_pager_meta_build(dstobject, i, srcaddr, 1); 617 } else { 618 /* 619 * Destination has valid swapblk or it is represented 620 * by a resident page. We destroy the sourceblock. 621 */ 622 623 swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE); 624 } 625 } 626 627 /* 628 * Free left over swap blocks in source. 629 * 630 * We have to revert the type to OBJT_DEFAULT so we do not accidently 631 * double-remove the object from the swap queues. 632 */ 633 634 if (destroysource) { 635 swp_pager_meta_free_all(srcobject); 636 /* 637 * Reverting the type is not necessary, the caller is going 638 * to destroy srcobject directly, but I'm doing it here 639 * for consistancy since we've removed the object from its 640 * queues. 641 */ 642 srcobject->type = OBJT_DEFAULT; 643 } 644 return; 645 } 646 647 /* 648 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 649 * the requested page. 650 * 651 * We determine whether good backing store exists for the requested 652 * page and return TRUE if it does, FALSE if it doesn't. 653 * 654 * If TRUE, we also try to determine how much valid, contiguous backing 655 * store exists before and after the requested page within a reasonable 656 * distance. We do not try to restrict it to the swap device stripe 657 * (that is handled in getpages/putpages). It probably isn't worth 658 * doing here. 659 */ 660 661 boolean_t 662 swap_pager_haspage(object, pindex, before, after) 663 vm_object_t object; 664 vm_pindex_t pindex; 665 int *before; 666 int *after; 667 { 668 daddr_t blk0; 669 670 /* 671 * do we have good backing store at the requested index ? 672 */ 673 674 blk0 = swp_pager_meta_ctl(object, pindex, 0); 675 676 if (blk0 & SWAPBLK_NONE) { 677 if (before) 678 *before = 0; 679 if (after) 680 *after = 0; 681 return (FALSE); 682 } 683 684 /* 685 * find backwards-looking contiguous good backing store 686 */ 687 688 if (before != NULL) { 689 int i; 690 691 for (i = 1; i < (SWB_NPAGES/2); ++i) { 692 daddr_t blk; 693 694 if (i > pindex) 695 break; 696 blk = swp_pager_meta_ctl(object, pindex - i, 0); 697 if (blk & SWAPBLK_NONE) 698 break; 699 if (blk != blk0 - i) 700 break; 701 } 702 *before = (i - 1); 703 } 704 705 /* 706 * find forward-looking contiguous good backing store 707 */ 708 709 if (after != NULL) { 710 int i; 711 712 for (i = 1; i < (SWB_NPAGES/2); ++i) { 713 daddr_t blk; 714 715 blk = swp_pager_meta_ctl(object, pindex + i, 0); 716 if (blk & SWAPBLK_NONE) 717 break; 718 if (blk != blk0 + i) 719 break; 720 } 721 *after = (i - 1); 722 } 723 724 return (TRUE); 725 } 726 727 /* 728 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 729 * 730 * This removes any associated swap backing store, whether valid or 731 * not, from the page. 732 * 733 * This routine is typically called when a page is made dirty, at 734 * which point any associated swap can be freed. MADV_FREE also 735 * calls us in a special-case situation 736 * 737 * NOTE!!! If the page is clean and the swap was valid, the caller 738 * should make the page dirty before calling this routine. This routine 739 * does NOT change the m->dirty status of the page. Also: MADV_FREE 740 * depends on it. 741 * 742 * This routine may not block 743 */ 744 745 static void 746 swap_pager_unswapped(m) 747 vm_page_t m; 748 { 749 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 750 } 751 752 /* 753 * SWAP_PAGER_STRATEGY() - read, write, free blocks 754 * 755 * This implements the vm_pager_strategy() interface to swap and allows 756 * other parts of the system to directly access swap as backing store 757 * through vm_objects of type OBJT_SWAP. This is intended to be a 758 * cacheless interface ( i.e. caching occurs at higher levels ). 759 * Therefore we do not maintain any resident pages. All I/O goes 760 * directly from and to the swap device. 761 * 762 * Note that b_blkno is scaled for PAGE_SIZE 763 * 764 * We currently attempt to run I/O synchronously or asynchronously as 765 * the caller requests. This isn't perfect because we loose error 766 * sequencing when we run multiple ops in parallel to satisfy a request. 767 * But this is swap, so we let it all hang out. 768 */ 769 770 static void 771 swap_pager_strategy(vm_object_t object, struct buf *bp) 772 { 773 vm_pindex_t start; 774 int count; 775 char *data; 776 struct buf *nbp = NULL; 777 778 if (bp->b_bcount & PAGE_MASK) { 779 bp->b_error = EINVAL; 780 bp->b_flags |= B_ERROR | B_INVAL; 781 biodone(bp); 782 printf("swap_pager_strategy: bp %p b_vp %p blk %d size %d, not page bounded\n", bp, bp->b_vp, (int)bp->b_pblkno, (int)bp->b_bcount); 783 return; 784 } 785 786 /* 787 * Clear error indication, initialize page index, count, data pointer. 788 */ 789 790 bp->b_error = 0; 791 bp->b_flags &= ~B_ERROR; 792 bp->b_resid = bp->b_bcount; 793 794 start = bp->b_pblkno; 795 count = howmany(bp->b_bcount, PAGE_SIZE); 796 data = bp->b_data; 797 798 /* 799 * Execute strategy function 800 */ 801 802 if (bp->b_flags & B_FREEBUF) { 803 /* 804 * FREE PAGE(s) - destroy underlying swap that is no longer 805 * needed. 806 */ 807 int s; 808 809 s = splvm(); 810 swp_pager_meta_free(object, start, count); 811 splx(s); 812 bp->b_resid = 0; 813 } else if (bp->b_flags & B_READ) { 814 /* 815 * READ FROM SWAP - read directly from swap backing store, 816 * zero-fill as appropriate. 817 * 818 * Note: the count == 0 case is beyond the end of the 819 * buffer. This is a special case to close out any 820 * left over nbp. 821 */ 822 823 while (count > 0) { 824 daddr_t blk; 825 int s; 826 827 s = splvm(); 828 blk = swp_pager_meta_ctl(object, start, 0); 829 splx(s); 830 831 /* 832 * Do we have to flush our current collection? 833 */ 834 835 if ( 836 nbp && ( 837 (blk & SWAPBLK_NONE) || 838 nbp->b_blkno + btoc(nbp->b_bcount) != blk 839 ) 840 ) { 841 ++cnt.v_swapin; 842 cnt.v_swappgsin += btoc(nbp->b_bcount); 843 flushchainbuf(nbp); 844 nbp = NULL; 845 } 846 847 /* 848 * Add to collection 849 */ 850 if (blk & SWAPBLK_NONE) { 851 s = splbio(); 852 bp->b_resid -= PAGE_SIZE; 853 splx(s); 854 bzero(data, PAGE_SIZE); 855 } else { 856 if (nbp == NULL) { 857 nbp = getchainbuf(bp, swapdev_vp, B_READ|B_ASYNC); 858 nbp->b_blkno = blk; 859 nbp->b_data = data; 860 } 861 nbp->b_bcount += PAGE_SIZE; 862 } 863 --count; 864 ++start; 865 data += PAGE_SIZE; 866 } 867 } else { 868 /* 869 * WRITE TO SWAP - [re]allocate swap and write. 870 */ 871 while (count > 0) { 872 int i; 873 int s; 874 int n; 875 daddr_t blk; 876 877 n = min(count, BLIST_MAX_ALLOC); 878 n = min(n, nsw_cluster_max); 879 880 s = splvm(); 881 for (;;) { 882 blk = swp_pager_getswapspace(n); 883 if (blk != SWAPBLK_NONE) 884 break; 885 n >>= 1; 886 if (n == 0) 887 break; 888 } 889 if (n == 0) { 890 bp->b_error = ENOMEM; 891 bp->b_flags |= B_ERROR; 892 splx(s); 893 break; 894 } 895 896 /* 897 * Oops, too big if it crosses a stripe 898 * 899 * 1111000000 900 * 111111 901 * 1000001 902 */ 903 if ((blk ^ (blk + n)) & dmmax_mask) { 904 int j = ((blk + dmmax) & dmmax_mask) - blk; 905 swp_pager_freeswapspace(blk + j, n - j); 906 n = j; 907 } 908 909 swp_pager_meta_free(object, start, n); 910 911 splx(s); 912 913 if (nbp) { 914 ++cnt.v_swapout; 915 cnt.v_swappgsout += btoc(nbp->b_bcount); 916 flushchainbuf(nbp); 917 } 918 919 nbp = getchainbuf(bp, swapdev_vp, B_ASYNC); 920 921 nbp->b_blkno = blk; 922 nbp->b_data = data; 923 nbp->b_bcount = PAGE_SIZE * n; 924 925 /* 926 * Must set dirty range for NFS to work. dirtybeg & 927 * off are already 0. 928 */ 929 nbp->b_dirtyend = nbp->b_bcount; 930 931 ++cnt.v_swapout; 932 cnt.v_swappgsout += n; 933 934 s = splbio(); 935 for (i = 0; i < n; ++i) { 936 swp_pager_meta_build( 937 object, 938 start + i, 939 blk + i, 940 1 941 ); 942 } 943 splx(s); 944 945 count -= n; 946 start += n; 947 data += PAGE_SIZE * n; 948 } 949 } 950 951 /* 952 * Cleanup. Commit last nbp either async or sync, and either 953 * wait for it synchronously or make it auto-biodone itself and 954 * the parent bp. 955 */ 956 957 if (nbp) { 958 if ((bp->b_flags & B_ASYNC) == 0) 959 nbp->b_flags &= ~B_ASYNC; 960 if (nbp->b_flags & B_READ) { 961 ++cnt.v_swapin; 962 cnt.v_swappgsin += btoc(nbp->b_bcount); 963 } else { 964 ++cnt.v_swapout; 965 cnt.v_swappgsout += btoc(nbp->b_bcount); 966 } 967 flushchainbuf(nbp); 968 } 969 if (bp->b_flags & B_ASYNC) { 970 autochaindone(bp); 971 } else { 972 waitchainbuf(bp, 0, 1); 973 } 974 } 975 976 /* 977 * SWAP_PAGER_GETPAGES() - bring pages in from swap 978 * 979 * Attempt to retrieve (m, count) pages from backing store, but make 980 * sure we retrieve at least m[reqpage]. We try to load in as large 981 * a chunk surrounding m[reqpage] as is contiguous in swap and which 982 * belongs to the same object. 983 * 984 * The code is designed for asynchronous operation and 985 * immediate-notification of 'reqpage' but tends not to be 986 * used that way. Please do not optimize-out this algorithmic 987 * feature, I intend to improve on it in the future. 988 * 989 * The parent has a single vm_object_pip_add() reference prior to 990 * calling us and we should return with the same. 991 * 992 * The parent has BUSY'd the pages. We should return with 'm' 993 * left busy, but the others adjusted. 994 */ 995 996 static int 997 swap_pager_getpages(object, m, count, reqpage) 998 vm_object_t object; 999 vm_page_t *m; 1000 int count, reqpage; 1001 { 1002 struct buf *bp; 1003 vm_page_t mreq; 1004 int s; 1005 int i; 1006 int j; 1007 daddr_t blk; 1008 vm_offset_t kva; 1009 vm_pindex_t lastpindex; 1010 1011 mreq = m[reqpage]; 1012 1013 #if !defined(MAX_PERF) 1014 if (mreq->object != object) { 1015 panic("swap_pager_getpages: object mismatch %p/%p", 1016 object, 1017 mreq->object 1018 ); 1019 } 1020 #endif 1021 /* 1022 * Calculate range to retrieve. The pages have already been assigned 1023 * their swapblks. We require a *contiguous* range that falls entirely 1024 * within a single device stripe. If we do not supply it, bad things 1025 * happen. 1026 */ 1027 1028 1029 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 1030 1031 for (i = reqpage - 1; i >= 0; --i) { 1032 daddr_t iblk; 1033 1034 iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0); 1035 if (iblk & SWAPBLK_NONE) 1036 break; 1037 1038 if ((blk ^ iblk) & dmmax_mask) 1039 break; 1040 1041 if (blk != iblk + (reqpage - i)) 1042 break; 1043 } 1044 ++i; 1045 1046 for (j = reqpage + 1; j < count; ++j) { 1047 daddr_t jblk; 1048 1049 jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0); 1050 if (jblk & SWAPBLK_NONE) 1051 break; 1052 1053 if ((blk ^ jblk) & dmmax_mask) 1054 break; 1055 1056 if (blk != jblk - (j - reqpage)) 1057 break; 1058 } 1059 1060 /* 1061 * If blk itself is bad, well, we can't do any I/O. This should 1062 * already be covered as a side effect, but I'm making sure. 1063 */ 1064 1065 if (blk & SWAPBLK_NONE) { 1066 i = reqpage; 1067 j = reqpage + 1; 1068 } 1069 1070 /* 1071 * free pages outside our collection range. Note: we never free 1072 * mreq, it must remain busy throughout. 1073 */ 1074 1075 { 1076 int k; 1077 1078 for (k = 0; k < i; ++k) { 1079 vm_page_free(m[k]); 1080 } 1081 for (k = j; k < count; ++k) { 1082 vm_page_free(m[k]); 1083 } 1084 } 1085 1086 /* 1087 * Return VM_PAGER_FAIL if we have nothing 1088 * to do. Return mreq still busy, but the 1089 * others unbusied. 1090 */ 1091 1092 if (blk & SWAPBLK_NONE) 1093 return(VM_PAGER_FAIL); 1094 1095 1096 /* 1097 * Get a swap buffer header to perform the IO 1098 */ 1099 1100 bp = getpbuf(&nsw_rcount); 1101 kva = (vm_offset_t) bp->b_data; 1102 1103 /* 1104 * map our page(s) into kva for input 1105 * 1106 * NOTE: B_PAGING is set by pbgetvp() 1107 */ 1108 1109 pmap_qenter(kva, m + i, j - i); 1110 1111 bp->b_flags = B_BUSY | B_READ | B_CALL; 1112 bp->b_iodone = swp_pager_async_iodone; 1113 bp->b_rcred = bp->b_wcred = proc0.p_ucred; 1114 bp->b_data = (caddr_t) kva; 1115 crhold(bp->b_rcred); 1116 crhold(bp->b_wcred); 1117 /* 1118 * b_blkno is in page-sized chunks. swapblk is valid, too, so 1119 * we don't have to mask it against SWAPBLK_MASK. 1120 */ 1121 bp->b_blkno = blk - (reqpage - i); 1122 bp->b_bcount = PAGE_SIZE * (j - i); 1123 bp->b_bufsize = PAGE_SIZE * (j - i); 1124 bp->b_pager.pg_reqpage = reqpage - i; 1125 1126 { 1127 int k; 1128 1129 for (k = i; k < j; ++k) { 1130 bp->b_pages[k - i] = m[k]; 1131 vm_page_flag_set(m[k], PG_SWAPINPROG); 1132 } 1133 } 1134 bp->b_npages = j - i; 1135 1136 pbgetvp(swapdev_vp, bp); 1137 1138 cnt.v_swapin++; 1139 cnt.v_swappgsin += bp->b_npages; 1140 1141 /* 1142 * We still hold the lock on mreq, and our automatic completion routine 1143 * does not remove it. 1144 */ 1145 1146 vm_object_pip_add(mreq->object, bp->b_npages); 1147 lastpindex = m[j-1]->pindex; 1148 1149 /* 1150 * perform the I/O. NOTE!!! bp cannot be considered valid after 1151 * this point because we automatically release it on completion. 1152 * Instead, we look at the one page we are interested in which we 1153 * still hold a lock on even through the I/O completion. 1154 * 1155 * The other pages in our m[] array are also released on completion, 1156 * so we cannot assume they are valid anymore either. 1157 * 1158 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 1159 */ 1160 1161 VOP_STRATEGY(bp->b_vp, bp); 1162 1163 /* 1164 * wait for the page we want to complete. PG_SWAPINPROG is always 1165 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 1166 * is set in the meta-data. 1167 */ 1168 1169 s = splvm(); 1170 1171 while ((mreq->flags & PG_SWAPINPROG) != 0) { 1172 vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED); 1173 cnt.v_intrans++; 1174 if (tsleep(mreq, PSWP, "swread", hz*20)) { 1175 printf( 1176 "swap_pager: indefinite wait buffer: device:" 1177 " %#lx, blkno: %ld, size: %ld\n", 1178 (u_long)bp->b_dev, (long)bp->b_blkno, 1179 (long)bp->b_bcount 1180 ); 1181 } 1182 } 1183 1184 splx(s); 1185 1186 /* 1187 * mreq is left bussied after completion, but all the other pages 1188 * are freed. If we had an unrecoverable read error the page will 1189 * not be valid. 1190 */ 1191 1192 if (mreq->valid != VM_PAGE_BITS_ALL) { 1193 return(VM_PAGER_ERROR); 1194 } else { 1195 mreq->object->last_read = lastpindex; 1196 return(VM_PAGER_OK); 1197 } 1198 1199 /* 1200 * A final note: in a low swap situation, we cannot deallocate swap 1201 * and mark a page dirty here because the caller is likely to mark 1202 * the page clean when we return, causing the page to possibly revert 1203 * to all-zero's later. 1204 */ 1205 } 1206 1207 /* 1208 * swap_pager_putpages: 1209 * 1210 * Assign swap (if necessary) and initiate I/O on the specified pages. 1211 * 1212 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 1213 * are automatically converted to SWAP objects. 1214 * 1215 * In a low memory situation we may block in VOP_STRATEGY(), but the new 1216 * vm_page reservation system coupled with properly written VFS devices 1217 * should ensure that no low-memory deadlock occurs. This is an area 1218 * which needs work. 1219 * 1220 * The parent has N vm_object_pip_add() references prior to 1221 * calling us and will remove references for rtvals[] that are 1222 * not set to VM_PAGER_PEND. We need to remove the rest on I/O 1223 * completion. 1224 * 1225 * The parent has soft-busy'd the pages it passes us and will unbusy 1226 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 1227 * We need to unbusy the rest on I/O completion. 1228 */ 1229 1230 void 1231 swap_pager_putpages(object, m, count, sync, rtvals) 1232 vm_object_t object; 1233 vm_page_t *m; 1234 int count; 1235 boolean_t sync; 1236 int *rtvals; 1237 { 1238 int i; 1239 int n = 0; 1240 1241 #if !defined(MAX_PERF) 1242 if (count && m[0]->object != object) { 1243 panic("swap_pager_getpages: object mismatch %p/%p", 1244 object, 1245 m[0]->object 1246 ); 1247 } 1248 #endif 1249 /* 1250 * Step 1 1251 * 1252 * Turn object into OBJT_SWAP 1253 * check for bogus sysops 1254 * force sync if not pageout process 1255 */ 1256 1257 if (object->type != OBJT_SWAP) { 1258 swp_pager_meta_build(object, 0, SWAPBLK_NONE, 0); 1259 } 1260 1261 if (curproc != pageproc) 1262 sync = TRUE; 1263 1264 /* 1265 * Step 2 1266 * 1267 * Update nsw parameters from swap_async_max sysctl values. 1268 * Do not let the sysop crash the machine with bogus numbers. 1269 */ 1270 1271 if (swap_async_max != nsw_wcount_async_max) { 1272 int n; 1273 int s; 1274 1275 /* 1276 * limit range 1277 */ 1278 if ((n = swap_async_max) > nswbuf / 2) 1279 n = nswbuf / 2; 1280 if (n < 1) 1281 n = 1; 1282 swap_async_max = n; 1283 1284 /* 1285 * Adjust difference ( if possible ). If the current async 1286 * count is too low, we may not be able to make the adjustment 1287 * at this time. 1288 */ 1289 s = splvm(); 1290 n -= nsw_wcount_async_max; 1291 if (nsw_wcount_async + n >= 0) { 1292 nsw_wcount_async += n; 1293 nsw_wcount_async_max += n; 1294 wakeup(&nsw_wcount_async); 1295 } 1296 splx(s); 1297 } 1298 1299 /* 1300 * Step 3 1301 * 1302 * Assign swap blocks and issue I/O. We reallocate swap on the fly. 1303 * The page is left dirty until the pageout operation completes 1304 * successfully. 1305 */ 1306 1307 for (i = 0; i < count; i += n) { 1308 int s; 1309 int j; 1310 struct buf *bp; 1311 daddr_t blk; 1312 1313 /* 1314 * Maximum I/O size is limited by a number of factors. 1315 */ 1316 1317 n = min(BLIST_MAX_ALLOC, count - i); 1318 n = min(n, nsw_cluster_max); 1319 1320 /* 1321 * Get biggest block of swap we can. If we fail, fall 1322 * back and try to allocate a smaller block. Don't go 1323 * overboard trying to allocate space if it would overly 1324 * fragment swap. 1325 */ 1326 while ( 1327 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE && 1328 n > 4 1329 ) { 1330 n >>= 1; 1331 } 1332 if (blk == SWAPBLK_NONE) { 1333 for (j = 0; j < n; ++j) { 1334 rtvals[i+j] = VM_PAGER_FAIL; 1335 } 1336 continue; 1337 } 1338 1339 /* 1340 * Oops, too big if it crosses a stripe 1341 * 1342 * 1111000000 1343 * 111111 1344 * 1000001 1345 */ 1346 if ((blk ^ (blk + n)) & dmmax_mask) { 1347 j = ((blk + dmmax) & dmmax_mask) - blk; 1348 swp_pager_freeswapspace(blk + j, n - j); 1349 n = j; 1350 } 1351 1352 /* 1353 * All I/O parameters have been satisfied, build the I/O 1354 * request and assign the swap space. 1355 * 1356 * NOTE: B_PAGING is set by pbgetvp() 1357 */ 1358 1359 if (sync == TRUE) { 1360 bp = getpbuf(&nsw_wcount_sync); 1361 bp->b_flags = B_BUSY | B_CALL; 1362 } else { 1363 bp = getpbuf(&nsw_wcount_async); 1364 bp->b_flags = B_BUSY | B_CALL | B_ASYNC; 1365 } 1366 bp->b_spc = NULL; /* not used, but NULL-out anyway */ 1367 1368 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n); 1369 1370 bp->b_rcred = bp->b_wcred = proc0.p_ucred; 1371 bp->b_bcount = PAGE_SIZE * n; 1372 bp->b_bufsize = PAGE_SIZE * n; 1373 bp->b_blkno = blk; 1374 1375 crhold(bp->b_rcred); 1376 crhold(bp->b_wcred); 1377 1378 pbgetvp(swapdev_vp, bp); 1379 1380 s = splvm(); 1381 1382 for (j = 0; j < n; ++j) { 1383 vm_page_t mreq = m[i+j]; 1384 1385 swp_pager_meta_build( 1386 mreq->object, 1387 mreq->pindex, 1388 blk + j, 1389 0 1390 ); 1391 vm_page_dirty(mreq); 1392 rtvals[i+j] = VM_PAGER_OK; 1393 1394 vm_page_flag_set(mreq, PG_SWAPINPROG); 1395 bp->b_pages[j] = mreq; 1396 } 1397 bp->b_npages = n; 1398 /* 1399 * Must set dirty range for NFS to work. 1400 */ 1401 bp->b_dirtyoff = 0; 1402 bp->b_dirtyend = bp->b_bcount; 1403 1404 cnt.v_swapout++; 1405 cnt.v_swappgsout += bp->b_npages; 1406 swapdev_vp->v_numoutput++; 1407 1408 /* 1409 * asynchronous 1410 * 1411 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 1412 */ 1413 1414 if (sync == FALSE) { 1415 bp->b_iodone = swp_pager_async_iodone; 1416 VOP_STRATEGY(bp->b_vp, bp); 1417 1418 for (j = 0; j < n; ++j) 1419 rtvals[i+j] = VM_PAGER_PEND; 1420 1421 splx(s); 1422 continue; 1423 } 1424 1425 /* 1426 * synchronous 1427 * 1428 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 1429 */ 1430 1431 bp->b_iodone = swp_pager_sync_iodone; 1432 VOP_STRATEGY(bp->b_vp, bp); 1433 1434 /* 1435 * Wait for the sync I/O to complete, then update rtvals. 1436 * We just set the rtvals[] to VM_PAGER_PEND so we can call 1437 * our async completion routine at the end, thus avoiding a 1438 * double-free. 1439 */ 1440 while ((bp->b_flags & B_DONE) == 0) { 1441 tsleep(bp, PVM, "swwrt", 0); 1442 } 1443 1444 for (j = 0; j < n; ++j) 1445 rtvals[i+j] = VM_PAGER_PEND; 1446 1447 /* 1448 * Now that we are through with the bp, we can call the 1449 * normal async completion, which frees everything up. 1450 */ 1451 1452 swp_pager_async_iodone(bp); 1453 1454 splx(s); 1455 } 1456 } 1457 1458 /* 1459 * swap_pager_sync_iodone: 1460 * 1461 * Completion routine for synchronous reads and writes from/to swap. 1462 * We just mark the bp is complete and wake up anyone waiting on it. 1463 * 1464 * This routine may not block. 1465 */ 1466 1467 static void 1468 swp_pager_sync_iodone(bp) 1469 struct buf *bp; 1470 { 1471 bp->b_flags |= B_DONE; 1472 bp->b_flags &= ~B_ASYNC; 1473 wakeup(bp); 1474 } 1475 1476 /* 1477 * swp_pager_async_iodone: 1478 * 1479 * Completion routine for asynchronous reads and writes from/to swap. 1480 * Also called manually by synchronous code to finish up a bp. 1481 * 1482 * WARNING! This routine may be called from an interrupt. We cannot 1483 * mess with swap metadata unless we want to run all our other routines 1484 * at splbio() too, which I'd rather not do. We up ourselves 1485 * to splvm() because we may call vm_page_free(), which can unlink a 1486 * page from an object. 1487 * 1488 * XXX currently I do not believe any object routines protect 1489 * object->memq at splvm(). The code must be gone over to determine 1490 * the actual state of the problem. 1491 * 1492 * For READ operations, the pages are PG_BUSY'd. For WRITE operations, 1493 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY 1494 * unbusy all pages except the 'main' request page. For WRITE 1495 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 1496 * because we marked them all VM_PAGER_PEND on return from putpages ). 1497 * 1498 * This routine may not block. 1499 * This routine is called at splbio() 1500 */ 1501 1502 static void 1503 swp_pager_async_iodone(bp) 1504 register struct buf *bp; 1505 { 1506 int s; 1507 int i; 1508 vm_object_t object = NULL; 1509 1510 s = splvm(); 1511 1512 bp->b_flags |= B_DONE; 1513 1514 /* 1515 * report error 1516 */ 1517 1518 if (bp->b_flags & B_ERROR) { 1519 printf( 1520 "swap_pager: I/O error - %s failed; blkno %ld," 1521 "size %ld, error %d\n", 1522 ((bp->b_flags & B_READ) ? "pagein" : "pageout"), 1523 (long)bp->b_blkno, 1524 (long)bp->b_bcount, 1525 bp->b_error 1526 ); 1527 } 1528 1529 /* 1530 * set object. 1531 */ 1532 1533 if (bp->b_npages) 1534 object = bp->b_pages[0]->object; 1535 1536 /* 1537 * remove the mapping for kernel virtual 1538 */ 1539 1540 pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); 1541 1542 /* 1543 * cleanup pages. If an error occurs writing to swap, we are in 1544 * very serious trouble. If it happens to be a disk error, though, 1545 * we may be able to recover by reassigning the swap later on. So 1546 * in this case we remove the m->swapblk assignment for the page 1547 * but do not free it in the rlist. The errornous block(s) are thus 1548 * never reallocated as swap. Redirty the page and continue. 1549 */ 1550 1551 for (i = 0; i < bp->b_npages; ++i) { 1552 vm_page_t m = bp->b_pages[i]; 1553 1554 vm_page_flag_clear(m, PG_SWAPINPROG); 1555 1556 if (bp->b_flags & B_ERROR) { 1557 /* 1558 * If an error occurs I'd love to throw the swapblk 1559 * away without freeing it back to swapspace, so it 1560 * can never be used again. But I can't from an 1561 * interrupt. 1562 */ 1563 1564 if (bp->b_flags & B_READ) { 1565 /* 1566 * When reading, reqpage needs to stay 1567 * locked for the parent, but all other 1568 * pages can be freed. We still want to 1569 * wakeup the parent waiting on the page, 1570 * though. ( also: pg_reqpage can be -1 and 1571 * not match anything ). 1572 * 1573 * We have to wake specifically requested pages 1574 * up too because we cleared PG_SWAPINPROG and 1575 * someone may be waiting for that. 1576 * 1577 * NOTE: for reads, m->dirty will probably 1578 * be overriden by the original caller of 1579 * getpages so don't play cute tricks here. 1580 * 1581 * XXX it may not be legal to free the page 1582 * here as this messes with the object->memq's. 1583 */ 1584 1585 m->valid = 0; 1586 vm_page_flag_clear(m, PG_ZERO); 1587 1588 if (i != bp->b_pager.pg_reqpage) 1589 vm_page_free(m); 1590 else 1591 vm_page_flash(m); 1592 /* 1593 * If i == bp->b_pager.pg_reqpage, do not wake 1594 * the page up. The caller needs to. 1595 */ 1596 } else { 1597 /* 1598 * If a write error occurs, reactivate page 1599 * so it doesn't clog the inactive list, 1600 * then finish the I/O. 1601 */ 1602 vm_page_dirty(m); 1603 vm_page_activate(m); 1604 vm_page_io_finish(m); 1605 } 1606 } else if (bp->b_flags & B_READ) { 1607 /* 1608 * For read success, clear dirty bits. Nobody should 1609 * have this page mapped but don't take any chances, 1610 * make sure the pmap modify bits are also cleared. 1611 * 1612 * NOTE: for reads, m->dirty will probably be 1613 * overriden by the original caller of getpages so 1614 * we cannot set them in order to free the underlying 1615 * swap in a low-swap situation. I don't think we'd 1616 * want to do that anyway, but it was an optimization 1617 * that existed in the old swapper for a time before 1618 * it got ripped out due to precisely this problem. 1619 * 1620 * clear PG_ZERO in page. 1621 * 1622 * If not the requested page then deactivate it. 1623 * 1624 * Note that the requested page, reqpage, is left 1625 * busied, but we still have to wake it up. The 1626 * other pages are released (unbusied) by 1627 * vm_page_wakeup(). We do not set reqpage's 1628 * valid bits here, it is up to the caller. 1629 */ 1630 1631 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 1632 m->valid = VM_PAGE_BITS_ALL; 1633 m->dirty = 0; 1634 vm_page_flag_clear(m, PG_ZERO); 1635 1636 /* 1637 * We have to wake specifically requested pages 1638 * up too because we cleared PG_SWAPINPROG and 1639 * could be waiting for it in getpages. However, 1640 * be sure to not unbusy getpages specifically 1641 * requested page - getpages expects it to be 1642 * left busy. 1643 */ 1644 if (i != bp->b_pager.pg_reqpage) { 1645 vm_page_deactivate(m); 1646 vm_page_wakeup(m); 1647 } else { 1648 vm_page_flash(m); 1649 } 1650 } else { 1651 /* 1652 * For write success, clear the modify and dirty 1653 * status, then finish the I/O ( which decrements the 1654 * busy count and possibly wakes waiter's up ). 1655 */ 1656 vm_page_protect(m, VM_PROT_READ); 1657 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 1658 m->dirty = 0; 1659 vm_page_io_finish(m); 1660 } 1661 } 1662 1663 /* 1664 * adjust pip. NOTE: the original parent may still have its own 1665 * pip refs on the object. 1666 */ 1667 1668 if (object) 1669 vm_object_pip_wakeupn(object, bp->b_npages); 1670 1671 /* 1672 * release the physical I/O buffer 1673 */ 1674 1675 relpbuf( 1676 bp, 1677 ((bp->b_flags & B_READ) ? &nsw_rcount : 1678 ((bp->b_flags & B_ASYNC) ? 1679 &nsw_wcount_async : 1680 &nsw_wcount_sync 1681 ) 1682 ) 1683 ); 1684 splx(s); 1685 } 1686 1687 /************************************************************************ 1688 * SWAP META DATA * 1689 ************************************************************************ 1690 * 1691 * These routines manipulate the swap metadata stored in the 1692 * OBJT_SWAP object. 1693 * 1694 * In fact, we just have a few counters in the vm_object_t. The 1695 * metadata is actually stored in a hash table. 1696 */ 1697 1698 /* 1699 * SWP_PAGER_HASH() - hash swap meta data 1700 * 1701 * This is an inline helper function which hash the swapblk given 1702 * the object and page index. It returns a pointer to a pointer 1703 * to the object, or a pointer to a NULL pointer if it could not 1704 * find a swapblk. 1705 */ 1706 1707 static __inline struct swblock ** 1708 swp_pager_hash(vm_object_t object, daddr_t index) 1709 { 1710 struct swblock **pswap; 1711 struct swblock *swap; 1712 1713 index &= ~SWAP_META_MASK; 1714 pswap = &swhash[(index ^ (int)(long)object) & swhash_mask]; 1715 1716 while ((swap = *pswap) != NULL) { 1717 if (swap->swb_object == object && 1718 swap->swb_index == index 1719 ) { 1720 break; 1721 } 1722 pswap = &swap->swb_hnext; 1723 } 1724 return(pswap); 1725 } 1726 1727 /* 1728 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 1729 * 1730 * We first convert the object to a swap object if it is a default 1731 * object. 1732 * 1733 * The specified swapblk is added to the object's swap metadata. If 1734 * the swapblk is not valid, it is freed instead. Any previously 1735 * assigned swapblk is freed. 1736 */ 1737 1738 static void 1739 swp_pager_meta_build( 1740 vm_object_t object, 1741 daddr_t index, 1742 daddr_t swapblk, 1743 int waitok 1744 ) { 1745 struct swblock *swap; 1746 struct swblock **pswap; 1747 1748 /* 1749 * Convert default object to swap object if necessary 1750 */ 1751 1752 if (object->type != OBJT_SWAP) { 1753 object->type = OBJT_SWAP; 1754 object->un_pager.swp.swp_bcount = 0; 1755 1756 if (object->handle != NULL) { 1757 TAILQ_INSERT_TAIL( 1758 NOBJLIST(object->handle), 1759 object, 1760 pager_object_list 1761 ); 1762 } else { 1763 TAILQ_INSERT_TAIL( 1764 &swap_pager_un_object_list, 1765 object, 1766 pager_object_list 1767 ); 1768 } 1769 } 1770 1771 /* 1772 * Wait for free memory when waitok is TRUE prior to calling the 1773 * zone allocator. 1774 */ 1775 1776 while (waitok && cnt.v_free_count == 0) { 1777 VM_WAIT; 1778 } 1779 1780 /* 1781 * If swapblk being added is invalid, just free it. 1782 */ 1783 1784 if (swapblk & SWAPBLK_NONE) { 1785 if (swapblk != SWAPBLK_NONE) { 1786 swp_pager_freeswapspace( 1787 index, 1788 1 1789 ); 1790 swapblk = SWAPBLK_NONE; 1791 } 1792 } 1793 1794 /* 1795 * Locate hash entry. If not found create, but if we aren't adding 1796 * anything just return. 1797 */ 1798 1799 pswap = swp_pager_hash(object, index); 1800 1801 if ((swap = *pswap) == NULL) { 1802 int i; 1803 1804 if (swapblk == SWAPBLK_NONE) 1805 return; 1806 1807 swap = *pswap = zalloc(swap_zone); 1808 1809 swap->swb_hnext = NULL; 1810 swap->swb_object = object; 1811 swap->swb_index = index & ~SWAP_META_MASK; 1812 swap->swb_count = 0; 1813 1814 ++object->un_pager.swp.swp_bcount; 1815 1816 for (i = 0; i < SWAP_META_PAGES; ++i) 1817 swap->swb_pages[i] = SWAPBLK_NONE; 1818 } 1819 1820 /* 1821 * Delete prior contents of metadata 1822 */ 1823 1824 index &= SWAP_META_MASK; 1825 1826 if (swap->swb_pages[index] != SWAPBLK_NONE) { 1827 swp_pager_freeswapspace( 1828 swap->swb_pages[index] & SWAPBLK_MASK, 1829 1 1830 ); 1831 --swap->swb_count; 1832 } 1833 1834 /* 1835 * Enter block into metadata 1836 */ 1837 1838 swap->swb_pages[index] = swapblk; 1839 ++swap->swb_count; 1840 } 1841 1842 /* 1843 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 1844 * 1845 * The requested range of blocks is freed, with any associated swap 1846 * returned to the swap bitmap. 1847 * 1848 * This routine will free swap metadata structures as they are cleaned 1849 * out. This routine does *NOT* operate on swap metadata associated 1850 * with resident pages. 1851 * 1852 * This routine must be called at splvm() 1853 */ 1854 1855 static void 1856 swp_pager_meta_free(vm_object_t object, daddr_t index, daddr_t count) 1857 { 1858 if (object->type != OBJT_SWAP) 1859 return; 1860 1861 while (count > 0) { 1862 struct swblock **pswap; 1863 struct swblock *swap; 1864 1865 pswap = swp_pager_hash(object, index); 1866 1867 if ((swap = *pswap) != NULL) { 1868 daddr_t v = swap->swb_pages[index & SWAP_META_MASK]; 1869 1870 if (v != SWAPBLK_NONE) { 1871 swp_pager_freeswapspace(v, 1); 1872 swap->swb_pages[index & SWAP_META_MASK] = 1873 SWAPBLK_NONE; 1874 if (--swap->swb_count == 0) { 1875 *pswap = swap->swb_hnext; 1876 zfree(swap_zone, swap); 1877 --object->un_pager.swp.swp_bcount; 1878 } 1879 } 1880 --count; 1881 ++index; 1882 } else { 1883 daddr_t n = SWAP_META_PAGES - (index & SWAP_META_MASK); 1884 count -= n; 1885 index += n; 1886 } 1887 } 1888 } 1889 1890 /* 1891 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 1892 * 1893 * This routine locates and destroys all swap metadata associated with 1894 * an object. 1895 */ 1896 1897 static void 1898 swp_pager_meta_free_all(vm_object_t object) 1899 { 1900 daddr_t index = 0; 1901 1902 if (object->type != OBJT_SWAP) 1903 return; 1904 1905 while (object->un_pager.swp.swp_bcount) { 1906 struct swblock **pswap; 1907 struct swblock *swap; 1908 1909 pswap = swp_pager_hash(object, index); 1910 if ((swap = *pswap) != NULL) { 1911 int i; 1912 1913 for (i = 0; i < SWAP_META_PAGES; ++i) { 1914 daddr_t v = swap->swb_pages[i]; 1915 if (v != SWAPBLK_NONE) { 1916 #if !defined(MAX_PERF) 1917 --swap->swb_count; 1918 #endif 1919 swp_pager_freeswapspace( 1920 v, 1921 1 1922 ); 1923 } 1924 } 1925 #if !defined(MAX_PERF) 1926 if (swap->swb_count != 0) 1927 panic("swap_pager_meta_free_all: swb_count != 0"); 1928 #endif 1929 *pswap = swap->swb_hnext; 1930 zfree(swap_zone, swap); 1931 --object->un_pager.swp.swp_bcount; 1932 } 1933 index += SWAP_META_PAGES; 1934 #if !defined(MAX_PERF) 1935 if (index > 0x20000000) 1936 panic("swp_pager_meta_free_all: failed to locate all swap meta blocks"); 1937 #endif 1938 } 1939 } 1940 1941 /* 1942 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 1943 * 1944 * This routine is capable of looking up, popping, or freeing 1945 * swapblk assignments in the swap meta data or in the vm_page_t. 1946 * The routine typically returns the swapblk being looked-up, or popped, 1947 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 1948 * was invalid. This routine will automatically free any invalid 1949 * meta-data swapblks. 1950 * 1951 * It is not possible to store invalid swapblks in the swap meta data 1952 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 1953 * 1954 * When acting on a busy resident page and paging is in progress, we 1955 * have to wait until paging is complete but otherwise can act on the 1956 * busy page. 1957 * 1958 * SWM_FREE remove and free swap block from metadata 1959 * 1960 * SWM_POP remove from meta data but do not free.. pop it out 1961 */ 1962 1963 static daddr_t 1964 swp_pager_meta_ctl( 1965 vm_object_t object, 1966 vm_pindex_t index, 1967 int flags 1968 ) { 1969 /* 1970 * The meta data only exists of the object is OBJT_SWAP 1971 * and even then might not be allocated yet. 1972 */ 1973 1974 if ( 1975 object->type != OBJT_SWAP || 1976 object->un_pager.swp.swp_bcount == 0 1977 ) { 1978 return(SWAPBLK_NONE); 1979 } 1980 1981 { 1982 struct swblock **pswap; 1983 struct swblock *swap; 1984 daddr_t r1 = SWAPBLK_NONE; 1985 1986 pswap = swp_pager_hash(object, index); 1987 1988 index &= SWAP_META_MASK; 1989 1990 if ((swap = *pswap) != NULL) { 1991 r1 = swap->swb_pages[index]; 1992 1993 if (r1 != SWAPBLK_NONE) { 1994 if (flags & SWM_FREE) { 1995 swp_pager_freeswapspace( 1996 r1, 1997 1 1998 ); 1999 r1 = SWAPBLK_NONE; 2000 } 2001 if (flags & (SWM_FREE|SWM_POP)) { 2002 swap->swb_pages[index] = SWAPBLK_NONE; 2003 if (--swap->swb_count == 0) { 2004 *pswap = swap->swb_hnext; 2005 zfree(swap_zone, swap); 2006 --object->un_pager.swp.swp_bcount; 2007 } 2008 } 2009 } 2010 } 2011 2012 return(r1); 2013 } 2014 /* not reached */ 2015 } 2016 2017