1 /* 2 * Copyright (c) 1998 Matthew Dillon, 3 * Copyright (c) 1994 John S. Dyson 4 * Copyright (c) 1990 University of Utah. 5 * Copyright (c) 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * New Swap System 41 * Matthew Dillon 42 * 43 * Radix Bitmap 'blists'. 44 * 45 * - The new swapper uses the new radix bitmap code. This should scale 46 * to arbitrarily small or arbitrarily large swap spaces and an almost 47 * arbitrary degree of fragmentation. 48 * 49 * Features: 50 * 51 * - on the fly reallocation of swap during putpages. The new system 52 * does not try to keep previously allocated swap blocks for dirty 53 * pages. 54 * 55 * - on the fly deallocation of swap 56 * 57 * - No more garbage collection required. Unnecessarily allocated swap 58 * blocks only exist for dirty vm_page_t's now and these are already 59 * cycled (in a high-load system) by the pager. We also do on-the-fly 60 * removal of invalidated swap blocks when a page is destroyed 61 * or renamed. 62 * 63 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 64 * 65 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 66 * 67 * $FreeBSD$ 68 */ 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/conf.h> 73 #include <sys/kernel.h> 74 #include <sys/proc.h> 75 #include <sys/buf.h> 76 #include <sys/vnode.h> 77 #include <sys/malloc.h> 78 #include <sys/vmmeter.h> 79 #include <sys/sysctl.h> 80 #include <sys/blist.h> 81 #include <sys/lock.h> 82 83 #ifndef MAX_PAGEOUT_CLUSTER 84 #define MAX_PAGEOUT_CLUSTER 16 85 #endif 86 87 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER 88 89 #include "opt_swap.h" 90 #include <vm/vm.h> 91 #include <vm/vm_object.h> 92 #include <vm/vm_page.h> 93 #include <vm/vm_pager.h> 94 #include <vm/vm_pageout.h> 95 #include <vm/swap_pager.h> 96 #include <vm/vm_extern.h> 97 #include <vm/vm_zone.h> 98 99 #define SWM_FREE 0x02 /* free, period */ 100 #define SWM_POP 0x04 /* pop out */ 101 102 /* 103 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks 104 * in the old system. 105 */ 106 107 extern int vm_swap_size; /* number of free swap blocks, in pages */ 108 109 int swap_pager_full; /* swap space exhaustion (task killing) */ 110 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/ 111 static int nsw_rcount; /* free read buffers */ 112 static int nsw_wcount_sync; /* limit write buffers / synchronous */ 113 static int nsw_wcount_async; /* limit write buffers / asynchronous */ 114 static int nsw_wcount_async_max;/* assigned maximum */ 115 static int nsw_cluster_max; /* maximum VOP I/O allowed */ 116 static int sw_alloc_interlock; /* swap pager allocation interlock */ 117 118 struct blist *swapblist; 119 static struct swblock **swhash; 120 static int swhash_mask; 121 static int swap_async_max = 4; /* maximum in-progress async I/O's */ 122 123 static struct vnode *swapdev_vp; /* XXX: This is not quite a real vnode */ 124 125 SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 126 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 127 128 /* 129 * "named" and "unnamed" anon region objects. Try to reduce the overhead 130 * of searching a named list by hashing it just a little. 131 */ 132 133 #define NOBJLISTS 8 134 135 #define NOBJLIST(handle) \ 136 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)]) 137 138 static struct pagerlst swap_pager_object_list[NOBJLISTS]; 139 struct pagerlst swap_pager_un_object_list; 140 vm_zone_t swap_zone; 141 142 /* 143 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 144 * calls hooked from other parts of the VM system and do not appear here. 145 * (see vm/swap_pager.h). 146 */ 147 148 static vm_object_t 149 swap_pager_alloc __P((void *handle, vm_ooffset_t size, 150 vm_prot_t prot, vm_ooffset_t offset)); 151 static void swap_pager_dealloc __P((vm_object_t object)); 152 static int swap_pager_getpages __P((vm_object_t, vm_page_t *, int, int)); 153 static void swap_pager_init __P((void)); 154 static void swap_pager_unswapped __P((vm_page_t)); 155 static void swap_pager_strategy __P((vm_object_t, struct buf *)); 156 157 struct pagerops swappagerops = { 158 swap_pager_init, /* early system initialization of pager */ 159 swap_pager_alloc, /* allocate an OBJT_SWAP object */ 160 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 161 swap_pager_getpages, /* pagein */ 162 swap_pager_putpages, /* pageout */ 163 swap_pager_haspage, /* get backing store status for page */ 164 swap_pager_unswapped, /* remove swap related to page */ 165 swap_pager_strategy /* pager strategy call */ 166 }; 167 168 /* 169 * dmmax is in page-sized chunks with the new swap system. It was 170 * dev-bsized chunks in the old. 171 * 172 * swap_*() routines are externally accessible. swp_*() routines are 173 * internal. 174 */ 175 176 int dmmax; 177 static int dmmax_mask; 178 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 179 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 180 181 static __inline void swp_sizecheck __P((void)); 182 static void swp_pager_sync_iodone __P((struct buf *bp)); 183 static void swp_pager_async_iodone __P((struct buf *bp)); 184 185 /* 186 * Swap bitmap functions 187 */ 188 189 static __inline void swp_pager_freeswapspace __P((daddr_t blk, int npages)); 190 static __inline daddr_t swp_pager_getswapspace __P((int npages)); 191 192 /* 193 * Metadata functions 194 */ 195 196 static void swp_pager_meta_build __P((vm_object_t, vm_pindex_t, daddr_t)); 197 static void swp_pager_meta_free __P((vm_object_t, vm_pindex_t, daddr_t)); 198 static void swp_pager_meta_free_all __P((vm_object_t)); 199 static daddr_t swp_pager_meta_ctl __P((vm_object_t, vm_pindex_t, int)); 200 201 /* 202 * SWP_SIZECHECK() - update swap_pager_full indication 203 * 204 * update the swap_pager_almost_full indication and warn when we are 205 * about to run out of swap space, using lowat/hiwat hysteresis. 206 * 207 * Clear swap_pager_full ( task killing ) indication when lowat is met. 208 * 209 * No restrictions on call 210 * This routine may not block. 211 * This routine must be called at splvm() 212 */ 213 214 static __inline void 215 swp_sizecheck() 216 { 217 if (vm_swap_size < nswap_lowat) { 218 if (swap_pager_almost_full == 0) { 219 printf("swap_pager: out of swap space\n"); 220 swap_pager_almost_full = 1; 221 } 222 } else { 223 swap_pager_full = 0; 224 if (vm_swap_size > nswap_hiwat) 225 swap_pager_almost_full = 0; 226 } 227 } 228 229 /* 230 * SWAP_PAGER_INIT() - initialize the swap pager! 231 * 232 * Expected to be started from system init. NOTE: This code is run 233 * before much else so be careful what you depend on. Most of the VM 234 * system has yet to be initialized at this point. 235 */ 236 237 static void 238 swap_pager_init() 239 { 240 /* 241 * Initialize object lists 242 */ 243 int i; 244 245 for (i = 0; i < NOBJLISTS; ++i) 246 TAILQ_INIT(&swap_pager_object_list[i]); 247 TAILQ_INIT(&swap_pager_un_object_list); 248 249 /* 250 * Device Stripe, in PAGE_SIZE'd blocks 251 */ 252 253 dmmax = SWB_NPAGES * 2; 254 dmmax_mask = ~(dmmax - 1); 255 } 256 257 /* 258 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 259 * 260 * Expected to be started from pageout process once, prior to entering 261 * its main loop. 262 */ 263 264 void 265 swap_pager_swap_init() 266 { 267 int n; 268 269 /* 270 * Number of in-transit swap bp operations. Don't 271 * exhaust the pbufs completely. Make sure we 272 * initialize workable values (0 will work for hysteresis 273 * but it isn't very efficient). 274 * 275 * The nsw_cluster_max is constrained by the bp->b_pages[] 276 * array (MAXPHYS/PAGE_SIZE) and our locally defined 277 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 278 * constrained by the swap device interleave stripe size. 279 * 280 * Currently we hardwire nsw_wcount_async to 4. This limit is 281 * designed to prevent other I/O from having high latencies due to 282 * our pageout I/O. The value 4 works well for one or two active swap 283 * devices but is probably a little low if you have more. Even so, 284 * a higher value would probably generate only a limited improvement 285 * with three or four active swap devices since the system does not 286 * typically have to pageout at extreme bandwidths. We will want 287 * at least 2 per swap devices, and 4 is a pretty good value if you 288 * have one NFS swap device due to the command/ack latency over NFS. 289 * So it all works out pretty well. 290 */ 291 292 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 293 294 nsw_rcount = (nswbuf + 1) / 2; 295 nsw_wcount_sync = (nswbuf + 3) / 4; 296 nsw_wcount_async = 4; 297 nsw_wcount_async_max = nsw_wcount_async; 298 299 /* 300 * Initialize our zone. Right now I'm just guessing on the number 301 * we need based on the number of pages in the system. Each swblock 302 * can hold 16 pages, so this is probably overkill. 303 */ 304 305 n = cnt.v_page_count * 2; 306 307 swap_zone = zinit( 308 "SWAPMETA", 309 sizeof(struct swblock), 310 n, 311 ZONE_INTERRUPT, 312 1 313 ); 314 315 /* 316 * Initialize our meta-data hash table. The swapper does not need to 317 * be quite as efficient as the VM system, so we do not use an 318 * oversized hash table. 319 * 320 * n: size of hash table, must be power of 2 321 * swhash_mask: hash table index mask 322 */ 323 324 for (n = 1; n < cnt.v_page_count / 4; n <<= 1) 325 ; 326 327 swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK); 328 bzero(swhash, sizeof(struct swblock *) * n); 329 330 swhash_mask = n - 1; 331 332 n = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &swapdev_vp); 333 if (n) 334 panic("Cannot get vnode for swapdev"); 335 swapdev_vp->v_type = VBLK; 336 } 337 338 /* 339 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 340 * its metadata structures. 341 * 342 * This routine is called from the mmap and fork code to create a new 343 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 344 * and then converting it with swp_pager_meta_build(). 345 * 346 * This routine may block in vm_object_allocate() and create a named 347 * object lookup race, so we must interlock. We must also run at 348 * splvm() for the object lookup to handle races with interrupts, but 349 * we do not have to maintain splvm() in between the lookup and the 350 * add because (I believe) it is not possible to attempt to create 351 * a new swap object w/handle when a default object with that handle 352 * already exists. 353 */ 354 355 static vm_object_t 356 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 357 vm_ooffset_t offset) 358 { 359 vm_object_t object; 360 361 if (handle) { 362 /* 363 * Reference existing named region or allocate new one. There 364 * should not be a race here against swp_pager_meta_build() 365 * as called from vm_page_remove() in regards to the lookup 366 * of the handle. 367 */ 368 369 while (sw_alloc_interlock) { 370 sw_alloc_interlock = -1; 371 tsleep(&sw_alloc_interlock, PVM, "swpalc", 0); 372 } 373 sw_alloc_interlock = 1; 374 375 object = vm_pager_object_lookup(NOBJLIST(handle), handle); 376 377 if (object != NULL) { 378 vm_object_reference(object); 379 } else { 380 object = vm_object_allocate(OBJT_DEFAULT, 381 OFF_TO_IDX(offset + PAGE_MASK + size)); 382 object->handle = handle; 383 384 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 385 } 386 387 if (sw_alloc_interlock < 0) 388 wakeup(&sw_alloc_interlock); 389 390 sw_alloc_interlock = 0; 391 } else { 392 object = vm_object_allocate(OBJT_DEFAULT, 393 OFF_TO_IDX(offset + PAGE_MASK + size)); 394 395 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 396 } 397 398 return (object); 399 } 400 401 /* 402 * SWAP_PAGER_DEALLOC() - remove swap metadata from object 403 * 404 * The swap backing for the object is destroyed. The code is 405 * designed such that we can reinstantiate it later, but this 406 * routine is typically called only when the entire object is 407 * about to be destroyed. 408 * 409 * This routine may block, but no longer does. 410 * 411 * The object must be locked or unreferenceable. 412 */ 413 414 static void 415 swap_pager_dealloc(object) 416 vm_object_t object; 417 { 418 int s; 419 420 /* 421 * Remove from list right away so lookups will fail if we block for 422 * pageout completion. 423 */ 424 425 if (object->handle == NULL) { 426 TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list); 427 } else { 428 TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list); 429 } 430 431 vm_object_pip_wait(object, "swpdea"); 432 433 /* 434 * Free all remaining metadata. We only bother to free it from 435 * the swap meta data. We do not attempt to free swapblk's still 436 * associated with vm_page_t's for this object. We do not care 437 * if paging is still in progress on some objects. 438 */ 439 s = splvm(); 440 swp_pager_meta_free_all(object); 441 splx(s); 442 } 443 444 /************************************************************************ 445 * SWAP PAGER BITMAP ROUTINES * 446 ************************************************************************/ 447 448 /* 449 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 450 * 451 * Allocate swap for the requested number of pages. The starting 452 * swap block number (a page index) is returned or SWAPBLK_NONE 453 * if the allocation failed. 454 * 455 * Also has the side effect of advising that somebody made a mistake 456 * when they configured swap and didn't configure enough. 457 * 458 * Must be called at splvm() to avoid races with bitmap frees from 459 * vm_page_remove() aka swap_pager_page_removed(). 460 * 461 * This routine may not block 462 * This routine must be called at splvm(). 463 */ 464 465 static __inline daddr_t 466 swp_pager_getswapspace(npages) 467 int npages; 468 { 469 daddr_t blk; 470 471 if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) { 472 if (swap_pager_full != 2) { 473 printf("swap_pager_getswapspace: failed\n"); 474 swap_pager_full = 2; 475 swap_pager_almost_full = 1; 476 } 477 } else { 478 vm_swap_size -= npages; 479 swp_sizecheck(); 480 } 481 return(blk); 482 } 483 484 /* 485 * SWP_PAGER_FREESWAPSPACE() - free raw swap space 486 * 487 * This routine returns the specified swap blocks back to the bitmap. 488 * 489 * Note: This routine may not block (it could in the old swap code), 490 * and through the use of the new blist routines it does not block. 491 * 492 * We must be called at splvm() to avoid races with bitmap frees from 493 * vm_page_remove() aka swap_pager_page_removed(). 494 * 495 * This routine may not block 496 * This routine must be called at splvm(). 497 */ 498 499 static __inline void 500 swp_pager_freeswapspace(blk, npages) 501 daddr_t blk; 502 int npages; 503 { 504 blist_free(swapblist, blk, npages); 505 vm_swap_size += npages; 506 swp_sizecheck(); 507 } 508 509 /* 510 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 511 * range within an object. 512 * 513 * This is a globally accessible routine. 514 * 515 * This routine removes swapblk assignments from swap metadata. 516 * 517 * The external callers of this routine typically have already destroyed 518 * or renamed vm_page_t's associated with this range in the object so 519 * we should be ok. 520 * 521 * This routine may be called at any spl. We up our spl to splvm temporarily 522 * in order to perform the metadata removal. 523 */ 524 525 void 526 swap_pager_freespace(object, start, size) 527 vm_object_t object; 528 vm_pindex_t start; 529 vm_size_t size; 530 { 531 int s = splvm(); 532 swp_pager_meta_free(object, start, size); 533 splx(s); 534 } 535 536 /* 537 * SWAP_PAGER_RESERVE() - reserve swap blocks in object 538 * 539 * Assigns swap blocks to the specified range within the object. The 540 * swap blocks are not zerod. Any previous swap assignment is destroyed. 541 * 542 * Returns 0 on success, -1 on failure. 543 */ 544 545 int 546 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 547 { 548 int s; 549 int n = 0; 550 daddr_t blk = SWAPBLK_NONE; 551 vm_pindex_t beg = start; /* save start index */ 552 553 s = splvm(); 554 while (size) { 555 if (n == 0) { 556 n = BLIST_MAX_ALLOC; 557 while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) { 558 n >>= 1; 559 if (n == 0) { 560 swp_pager_meta_free(object, beg, start - beg); 561 splx(s); 562 return(-1); 563 } 564 } 565 } 566 swp_pager_meta_build(object, start, blk); 567 --size; 568 ++start; 569 ++blk; 570 --n; 571 } 572 swp_pager_meta_free(object, start, n); 573 splx(s); 574 return(0); 575 } 576 577 /* 578 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 579 * and destroy the source. 580 * 581 * Copy any valid swapblks from the source to the destination. In 582 * cases where both the source and destination have a valid swapblk, 583 * we keep the destination's. 584 * 585 * This routine is allowed to block. It may block allocating metadata 586 * indirectly through swp_pager_meta_build() or if paging is still in 587 * progress on the source. 588 * 589 * This routine can be called at any spl 590 * 591 * XXX vm_page_collapse() kinda expects us not to block because we 592 * supposedly do not need to allocate memory, but for the moment we 593 * *may* have to get a little memory from the zone allocator, but 594 * it is taken from the interrupt memory. We should be ok. 595 * 596 * The source object contains no vm_page_t's (which is just as well) 597 * 598 * The source object is of type OBJT_SWAP. 599 * 600 * The source and destination objects must be locked or 601 * inaccessible (XXX are they ?) 602 */ 603 604 void 605 swap_pager_copy(srcobject, dstobject, offset, destroysource) 606 vm_object_t srcobject; 607 vm_object_t dstobject; 608 vm_pindex_t offset; 609 int destroysource; 610 { 611 vm_pindex_t i; 612 int s; 613 614 s = splvm(); 615 616 /* 617 * If destroysource is set, we remove the source object from the 618 * swap_pager internal queue now. 619 */ 620 621 if (destroysource) { 622 if (srcobject->handle == NULL) { 623 TAILQ_REMOVE( 624 &swap_pager_un_object_list, 625 srcobject, 626 pager_object_list 627 ); 628 } else { 629 TAILQ_REMOVE( 630 NOBJLIST(srcobject->handle), 631 srcobject, 632 pager_object_list 633 ); 634 } 635 } 636 637 /* 638 * transfer source to destination. 639 */ 640 641 for (i = 0; i < dstobject->size; ++i) { 642 daddr_t dstaddr; 643 644 /* 645 * Locate (without changing) the swapblk on the destination, 646 * unless it is invalid in which case free it silently, or 647 * if the destination is a resident page, in which case the 648 * source is thrown away. 649 */ 650 651 dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 652 653 if (dstaddr == SWAPBLK_NONE) { 654 /* 655 * Destination has no swapblk and is not resident, 656 * copy source. 657 */ 658 daddr_t srcaddr; 659 660 srcaddr = swp_pager_meta_ctl( 661 srcobject, 662 i + offset, 663 SWM_POP 664 ); 665 666 if (srcaddr != SWAPBLK_NONE) 667 swp_pager_meta_build(dstobject, i, srcaddr); 668 } else { 669 /* 670 * Destination has valid swapblk or it is represented 671 * by a resident page. We destroy the sourceblock. 672 */ 673 674 swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE); 675 } 676 } 677 678 /* 679 * Free left over swap blocks in source. 680 * 681 * We have to revert the type to OBJT_DEFAULT so we do not accidently 682 * double-remove the object from the swap queues. 683 */ 684 685 if (destroysource) { 686 swp_pager_meta_free_all(srcobject); 687 /* 688 * Reverting the type is not necessary, the caller is going 689 * to destroy srcobject directly, but I'm doing it here 690 * for consistancy since we've removed the object from its 691 * queues. 692 */ 693 srcobject->type = OBJT_DEFAULT; 694 } 695 splx(s); 696 } 697 698 /* 699 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 700 * the requested page. 701 * 702 * We determine whether good backing store exists for the requested 703 * page and return TRUE if it does, FALSE if it doesn't. 704 * 705 * If TRUE, we also try to determine how much valid, contiguous backing 706 * store exists before and after the requested page within a reasonable 707 * distance. We do not try to restrict it to the swap device stripe 708 * (that is handled in getpages/putpages). It probably isn't worth 709 * doing here. 710 * 711 * This routine must be called at splvm(). 712 */ 713 714 boolean_t 715 swap_pager_haspage(object, pindex, before, after) 716 vm_object_t object; 717 vm_pindex_t pindex; 718 int *before; 719 int *after; 720 { 721 daddr_t blk0; 722 723 /* 724 * do we have good backing store at the requested index ? 725 */ 726 727 blk0 = swp_pager_meta_ctl(object, pindex, 0); 728 729 if (blk0 == SWAPBLK_NONE) { 730 if (before) 731 *before = 0; 732 if (after) 733 *after = 0; 734 return (FALSE); 735 } 736 737 /* 738 * find backwards-looking contiguous good backing store 739 */ 740 741 if (before != NULL) { 742 int i; 743 744 for (i = 1; i < (SWB_NPAGES/2); ++i) { 745 daddr_t blk; 746 747 if (i > pindex) 748 break; 749 blk = swp_pager_meta_ctl(object, pindex - i, 0); 750 if (blk != blk0 - i) 751 break; 752 } 753 *before = (i - 1); 754 } 755 756 /* 757 * find forward-looking contiguous good backing store 758 */ 759 760 if (after != NULL) { 761 int i; 762 763 for (i = 1; i < (SWB_NPAGES/2); ++i) { 764 daddr_t blk; 765 766 blk = swp_pager_meta_ctl(object, pindex + i, 0); 767 if (blk != blk0 + i) 768 break; 769 } 770 *after = (i - 1); 771 } 772 773 return (TRUE); 774 } 775 776 /* 777 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 778 * 779 * This removes any associated swap backing store, whether valid or 780 * not, from the page. 781 * 782 * This routine is typically called when a page is made dirty, at 783 * which point any associated swap can be freed. MADV_FREE also 784 * calls us in a special-case situation 785 * 786 * NOTE!!! If the page is clean and the swap was valid, the caller 787 * should make the page dirty before calling this routine. This routine 788 * does NOT change the m->dirty status of the page. Also: MADV_FREE 789 * depends on it. 790 * 791 * This routine may not block 792 * This routine must be called at splvm() 793 */ 794 795 static void 796 swap_pager_unswapped(m) 797 vm_page_t m; 798 { 799 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 800 } 801 802 /* 803 * SWAP_PAGER_STRATEGY() - read, write, free blocks 804 * 805 * This implements the vm_pager_strategy() interface to swap and allows 806 * other parts of the system to directly access swap as backing store 807 * through vm_objects of type OBJT_SWAP. This is intended to be a 808 * cacheless interface ( i.e. caching occurs at higher levels ). 809 * Therefore we do not maintain any resident pages. All I/O goes 810 * directly to and from the swap device. 811 * 812 * Note that b_blkno is scaled for PAGE_SIZE 813 * 814 * We currently attempt to run I/O synchronously or asynchronously as 815 * the caller requests. This isn't perfect because we loose error 816 * sequencing when we run multiple ops in parallel to satisfy a request. 817 * But this is swap, so we let it all hang out. 818 */ 819 820 static void 821 swap_pager_strategy(vm_object_t object, struct buf *bp) 822 { 823 vm_pindex_t start; 824 int count; 825 int s; 826 char *data; 827 struct buf *nbp = NULL; 828 829 if (bp->b_bcount & PAGE_MASK) { 830 bp->b_error = EINVAL; 831 bp->b_flags |= B_ERROR | B_INVAL; 832 biodone(bp); 833 printf("swap_pager_strategy: bp %p b_vp %p blk %d size %d, not page bounded\n", bp, bp->b_vp, (int)bp->b_pblkno, (int)bp->b_bcount); 834 return; 835 } 836 837 /* 838 * Clear error indication, initialize page index, count, data pointer. 839 */ 840 841 bp->b_error = 0; 842 bp->b_flags &= ~B_ERROR; 843 bp->b_resid = bp->b_bcount; 844 845 start = bp->b_pblkno; 846 count = howmany(bp->b_bcount, PAGE_SIZE); 847 data = bp->b_data; 848 849 s = splvm(); 850 851 /* 852 * Deal with B_FREEBUF 853 */ 854 855 if (bp->b_flags & B_FREEBUF) { 856 /* 857 * FREE PAGE(s) - destroy underlying swap that is no longer 858 * needed. 859 */ 860 swp_pager_meta_free(object, start, count); 861 splx(s); 862 bp->b_resid = 0; 863 biodone(bp); 864 return; 865 } 866 867 /* 868 * Execute read or write 869 */ 870 871 while (count > 0) { 872 daddr_t blk; 873 874 /* 875 * Obtain block. If block not found and writing, allocate a 876 * new block and build it into the object. 877 */ 878 879 blk = swp_pager_meta_ctl(object, start, 0); 880 if ((blk == SWAPBLK_NONE) && (bp->b_flags & B_READ) == 0) { 881 blk = swp_pager_getswapspace(1); 882 if (blk == SWAPBLK_NONE) { 883 bp->b_error = ENOMEM; 884 bp->b_flags |= B_ERROR; 885 break; 886 } 887 swp_pager_meta_build(object, start, blk); 888 } 889 890 /* 891 * Do we have to flush our current collection? Yes if: 892 * 893 * - no swap block at this index 894 * - swap block is not contiguous 895 * - we cross a physical disk boundry in the 896 * stripe. 897 */ 898 899 if ( 900 nbp && (nbp->b_blkno + btoc(nbp->b_bcount) != blk || 901 ((nbp->b_blkno ^ blk) & dmmax_mask) 902 ) 903 ) { 904 splx(s); 905 if (bp->b_flags & B_READ) { 906 ++cnt.v_swapin; 907 cnt.v_swappgsin += btoc(nbp->b_bcount); 908 } else { 909 ++cnt.v_swapout; 910 cnt.v_swappgsout += btoc(nbp->b_bcount); 911 nbp->b_dirtyend = nbp->b_bcount; 912 } 913 flushchainbuf(nbp); 914 s = splvm(); 915 nbp = NULL; 916 } 917 918 /* 919 * Add new swapblk to nbp, instantiating nbp if necessary. 920 * Zero-fill reads are able to take a shortcut. 921 */ 922 923 if (blk == SWAPBLK_NONE) { 924 /* 925 * We can only get here if we are reading. Since 926 * we are at splvm() we can safely modify b_resid, 927 * even if chain ops are in progress. 928 */ 929 bzero(data, PAGE_SIZE); 930 bp->b_resid -= PAGE_SIZE; 931 } else { 932 if (nbp == NULL) { 933 nbp = getchainbuf(bp, swapdev_vp, (bp->b_flags & B_READ) | B_ASYNC); 934 nbp->b_blkno = blk; 935 nbp->b_bcount = 0; 936 nbp->b_data = data; 937 } 938 nbp->b_bcount += PAGE_SIZE; 939 } 940 --count; 941 ++start; 942 data += PAGE_SIZE; 943 } 944 945 /* 946 * Flush out last buffer 947 */ 948 949 splx(s); 950 951 if (nbp) { 952 if ((bp->b_flags & B_ASYNC) == 0) 953 nbp->b_flags &= ~B_ASYNC; 954 if (nbp->b_flags & B_READ) { 955 ++cnt.v_swapin; 956 cnt.v_swappgsin += btoc(nbp->b_bcount); 957 } else { 958 ++cnt.v_swapout; 959 cnt.v_swappgsout += btoc(nbp->b_bcount); 960 nbp->b_dirtyend = nbp->b_bcount; 961 } 962 flushchainbuf(nbp); 963 /* nbp = NULL; */ 964 } 965 966 /* 967 * Wait for completion. 968 */ 969 970 if (bp->b_flags & B_ASYNC) { 971 autochaindone(bp); 972 } else { 973 waitchainbuf(bp, 0, 1); 974 } 975 } 976 977 /* 978 * SWAP_PAGER_GETPAGES() - bring pages in from swap 979 * 980 * Attempt to retrieve (m, count) pages from backing store, but make 981 * sure we retrieve at least m[reqpage]. We try to load in as large 982 * a chunk surrounding m[reqpage] as is contiguous in swap and which 983 * belongs to the same object. 984 * 985 * The code is designed for asynchronous operation and 986 * immediate-notification of 'reqpage' but tends not to be 987 * used that way. Please do not optimize-out this algorithmic 988 * feature, I intend to improve on it in the future. 989 * 990 * The parent has a single vm_object_pip_add() reference prior to 991 * calling us and we should return with the same. 992 * 993 * The parent has BUSY'd the pages. We should return with 'm' 994 * left busy, but the others adjusted. 995 */ 996 997 static int 998 swap_pager_getpages(object, m, count, reqpage) 999 vm_object_t object; 1000 vm_page_t *m; 1001 int count, reqpage; 1002 { 1003 struct buf *bp; 1004 vm_page_t mreq; 1005 int s; 1006 int i; 1007 int j; 1008 daddr_t blk; 1009 vm_offset_t kva; 1010 vm_pindex_t lastpindex; 1011 1012 mreq = m[reqpage]; 1013 1014 #if !defined(MAX_PERF) 1015 if (mreq->object != object) { 1016 panic("swap_pager_getpages: object mismatch %p/%p", 1017 object, 1018 mreq->object 1019 ); 1020 } 1021 #endif 1022 /* 1023 * Calculate range to retrieve. The pages have already been assigned 1024 * their swapblks. We require a *contiguous* range that falls entirely 1025 * within a single device stripe. If we do not supply it, bad things 1026 * happen. Note that blk, iblk & jblk can be SWAPBLK_NONE, but the 1027 * loops are set up such that the case(s) are handled implicitly. 1028 * 1029 * The swp_*() calls must be made at splvm(). vm_page_free() does 1030 * not need to be, but it will go a little faster if it is. 1031 */ 1032 1033 s = splvm(); 1034 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 1035 1036 for (i = reqpage - 1; i >= 0; --i) { 1037 daddr_t iblk; 1038 1039 iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0); 1040 if (blk != iblk + (reqpage - i)) 1041 break; 1042 if ((blk ^ iblk) & dmmax_mask) 1043 break; 1044 } 1045 ++i; 1046 1047 for (j = reqpage + 1; j < count; ++j) { 1048 daddr_t jblk; 1049 1050 jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0); 1051 if (blk != jblk - (j - reqpage)) 1052 break; 1053 if ((blk ^ jblk) & dmmax_mask) 1054 break; 1055 } 1056 1057 /* 1058 * free pages outside our collection range. Note: we never free 1059 * mreq, it must remain busy throughout. 1060 */ 1061 1062 { 1063 int k; 1064 1065 for (k = 0; k < i; ++k) 1066 vm_page_free(m[k]); 1067 for (k = j; k < count; ++k) 1068 vm_page_free(m[k]); 1069 } 1070 splx(s); 1071 1072 1073 /* 1074 * Return VM_PAGER_FAIL if we have nothing to do. Return mreq 1075 * still busy, but the others unbusied. 1076 */ 1077 1078 if (blk == SWAPBLK_NONE) 1079 return(VM_PAGER_FAIL); 1080 1081 /* 1082 * Get a swap buffer header to perform the IO 1083 */ 1084 1085 bp = getpbuf(&nsw_rcount); 1086 kva = (vm_offset_t) bp->b_data; 1087 1088 /* 1089 * map our page(s) into kva for input 1090 * 1091 * NOTE: B_PAGING is set by pbgetvp() 1092 */ 1093 1094 pmap_qenter(kva, m + i, j - i); 1095 1096 bp->b_flags = B_READ | B_CALL; 1097 bp->b_iodone = swp_pager_async_iodone; 1098 bp->b_rcred = bp->b_wcred = proc0.p_ucred; 1099 bp->b_data = (caddr_t) kva; 1100 crhold(bp->b_rcred); 1101 crhold(bp->b_wcred); 1102 bp->b_blkno = blk - (reqpage - i); 1103 bp->b_bcount = PAGE_SIZE * (j - i); 1104 bp->b_bufsize = PAGE_SIZE * (j - i); 1105 bp->b_pager.pg_reqpage = reqpage - i; 1106 1107 { 1108 int k; 1109 1110 for (k = i; k < j; ++k) { 1111 bp->b_pages[k - i] = m[k]; 1112 vm_page_flag_set(m[k], PG_SWAPINPROG); 1113 } 1114 } 1115 bp->b_npages = j - i; 1116 1117 pbgetvp(swapdev_vp, bp); 1118 1119 cnt.v_swapin++; 1120 cnt.v_swappgsin += bp->b_npages; 1121 1122 /* 1123 * We still hold the lock on mreq, and our automatic completion routine 1124 * does not remove it. 1125 */ 1126 1127 vm_object_pip_add(mreq->object, bp->b_npages); 1128 lastpindex = m[j-1]->pindex; 1129 1130 /* 1131 * perform the I/O. NOTE!!! bp cannot be considered valid after 1132 * this point because we automatically release it on completion. 1133 * Instead, we look at the one page we are interested in which we 1134 * still hold a lock on even through the I/O completion. 1135 * 1136 * The other pages in our m[] array are also released on completion, 1137 * so we cannot assume they are valid anymore either. 1138 * 1139 * NOTE: b_blkno is destroyed by the call to swstrategy() 1140 */ 1141 1142 BUF_KERNPROC(bp); 1143 swstrategy(bp); 1144 1145 /* 1146 * wait for the page we want to complete. PG_SWAPINPROG is always 1147 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 1148 * is set in the meta-data. 1149 */ 1150 1151 s = splvm(); 1152 1153 while ((mreq->flags & PG_SWAPINPROG) != 0) { 1154 vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED); 1155 cnt.v_intrans++; 1156 if (tsleep(mreq, PSWP, "swread", hz*20)) { 1157 printf( 1158 "swap_pager: indefinite wait buffer: device:" 1159 " %s, blkno: %ld, size: %ld\n", 1160 devtoname(bp->b_dev), (long)bp->b_blkno, 1161 bp->b_bcount 1162 ); 1163 } 1164 } 1165 1166 splx(s); 1167 1168 /* 1169 * mreq is left bussied after completion, but all the other pages 1170 * are freed. If we had an unrecoverable read error the page will 1171 * not be valid. 1172 */ 1173 1174 if (mreq->valid != VM_PAGE_BITS_ALL) { 1175 return(VM_PAGER_ERROR); 1176 } else { 1177 return(VM_PAGER_OK); 1178 } 1179 1180 /* 1181 * A final note: in a low swap situation, we cannot deallocate swap 1182 * and mark a page dirty here because the caller is likely to mark 1183 * the page clean when we return, causing the page to possibly revert 1184 * to all-zero's later. 1185 */ 1186 } 1187 1188 /* 1189 * swap_pager_putpages: 1190 * 1191 * Assign swap (if necessary) and initiate I/O on the specified pages. 1192 * 1193 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 1194 * are automatically converted to SWAP objects. 1195 * 1196 * In a low memory situation we may block in swstrategy(), but the new 1197 * vm_page reservation system coupled with properly written VFS devices 1198 * should ensure that no low-memory deadlock occurs. This is an area 1199 * which needs work. 1200 * 1201 * The parent has N vm_object_pip_add() references prior to 1202 * calling us and will remove references for rtvals[] that are 1203 * not set to VM_PAGER_PEND. We need to remove the rest on I/O 1204 * completion. 1205 * 1206 * The parent has soft-busy'd the pages it passes us and will unbusy 1207 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 1208 * We need to unbusy the rest on I/O completion. 1209 */ 1210 1211 void 1212 swap_pager_putpages(object, m, count, sync, rtvals) 1213 vm_object_t object; 1214 vm_page_t *m; 1215 int count; 1216 boolean_t sync; 1217 int *rtvals; 1218 { 1219 int i; 1220 int n = 0; 1221 1222 #if !defined(MAX_PERF) 1223 if (count && m[0]->object != object) { 1224 panic("swap_pager_getpages: object mismatch %p/%p", 1225 object, 1226 m[0]->object 1227 ); 1228 } 1229 #endif 1230 /* 1231 * Step 1 1232 * 1233 * Turn object into OBJT_SWAP 1234 * check for bogus sysops 1235 * force sync if not pageout process 1236 */ 1237 1238 if (object->type != OBJT_SWAP) 1239 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 1240 1241 if (curproc != pageproc) 1242 sync = TRUE; 1243 1244 /* 1245 * Step 2 1246 * 1247 * Update nsw parameters from swap_async_max sysctl values. 1248 * Do not let the sysop crash the machine with bogus numbers. 1249 */ 1250 1251 if (swap_async_max != nsw_wcount_async_max) { 1252 int n; 1253 int s; 1254 1255 /* 1256 * limit range 1257 */ 1258 if ((n = swap_async_max) > nswbuf / 2) 1259 n = nswbuf / 2; 1260 if (n < 1) 1261 n = 1; 1262 swap_async_max = n; 1263 1264 /* 1265 * Adjust difference ( if possible ). If the current async 1266 * count is too low, we may not be able to make the adjustment 1267 * at this time. 1268 */ 1269 s = splvm(); 1270 n -= nsw_wcount_async_max; 1271 if (nsw_wcount_async + n >= 0) { 1272 nsw_wcount_async += n; 1273 nsw_wcount_async_max += n; 1274 wakeup(&nsw_wcount_async); 1275 } 1276 splx(s); 1277 } 1278 1279 /* 1280 * Step 3 1281 * 1282 * Assign swap blocks and issue I/O. We reallocate swap on the fly. 1283 * The page is left dirty until the pageout operation completes 1284 * successfully. 1285 */ 1286 1287 for (i = 0; i < count; i += n) { 1288 int s; 1289 int j; 1290 struct buf *bp; 1291 daddr_t blk; 1292 1293 /* 1294 * Maximum I/O size is limited by a number of factors. 1295 */ 1296 1297 n = min(BLIST_MAX_ALLOC, count - i); 1298 n = min(n, nsw_cluster_max); 1299 1300 s = splvm(); 1301 1302 /* 1303 * Get biggest block of swap we can. If we fail, fall 1304 * back and try to allocate a smaller block. Don't go 1305 * overboard trying to allocate space if it would overly 1306 * fragment swap. 1307 */ 1308 while ( 1309 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE && 1310 n > 4 1311 ) { 1312 n >>= 1; 1313 } 1314 if (blk == SWAPBLK_NONE) { 1315 for (j = 0; j < n; ++j) 1316 rtvals[i+j] = VM_PAGER_FAIL; 1317 splx(s); 1318 continue; 1319 } 1320 1321 /* 1322 * The I/O we are constructing cannot cross a physical 1323 * disk boundry in the swap stripe. Note: we are still 1324 * at splvm(). 1325 */ 1326 if ((blk ^ (blk + n)) & dmmax_mask) { 1327 j = ((blk + dmmax) & dmmax_mask) - blk; 1328 swp_pager_freeswapspace(blk + j, n - j); 1329 n = j; 1330 } 1331 1332 /* 1333 * All I/O parameters have been satisfied, build the I/O 1334 * request and assign the swap space. 1335 * 1336 * NOTE: B_PAGING is set by pbgetvp() 1337 */ 1338 1339 if (sync == TRUE) { 1340 bp = getpbuf(&nsw_wcount_sync); 1341 bp->b_flags = B_CALL; 1342 } else { 1343 bp = getpbuf(&nsw_wcount_async); 1344 bp->b_flags = B_CALL | B_ASYNC; 1345 } 1346 bp->b_spc = NULL; /* not used, but NULL-out anyway */ 1347 1348 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n); 1349 1350 bp->b_rcred = bp->b_wcred = proc0.p_ucred; 1351 bp->b_bcount = PAGE_SIZE * n; 1352 bp->b_bufsize = PAGE_SIZE * n; 1353 bp->b_blkno = blk; 1354 1355 crhold(bp->b_rcred); 1356 crhold(bp->b_wcred); 1357 1358 pbgetvp(swapdev_vp, bp); 1359 1360 for (j = 0; j < n; ++j) { 1361 vm_page_t mreq = m[i+j]; 1362 1363 swp_pager_meta_build( 1364 mreq->object, 1365 mreq->pindex, 1366 blk + j 1367 ); 1368 vm_page_dirty(mreq); 1369 rtvals[i+j] = VM_PAGER_OK; 1370 1371 vm_page_flag_set(mreq, PG_SWAPINPROG); 1372 bp->b_pages[j] = mreq; 1373 } 1374 bp->b_npages = n; 1375 /* 1376 * Must set dirty range for NFS to work. 1377 */ 1378 bp->b_dirtyoff = 0; 1379 bp->b_dirtyend = bp->b_bcount; 1380 1381 cnt.v_swapout++; 1382 cnt.v_swappgsout += bp->b_npages; 1383 swapdev_vp->v_numoutput++; 1384 1385 splx(s); 1386 1387 /* 1388 * asynchronous 1389 * 1390 * NOTE: b_blkno is destroyed by the call to swstrategy() 1391 */ 1392 1393 if (sync == FALSE) { 1394 bp->b_iodone = swp_pager_async_iodone; 1395 BUF_KERNPROC(bp); 1396 swstrategy(bp); 1397 1398 for (j = 0; j < n; ++j) 1399 rtvals[i+j] = VM_PAGER_PEND; 1400 continue; 1401 } 1402 1403 /* 1404 * synchronous 1405 * 1406 * NOTE: b_blkno is destroyed by the call to swstrategy() 1407 */ 1408 1409 bp->b_iodone = swp_pager_sync_iodone; 1410 swstrategy(bp); 1411 1412 /* 1413 * Wait for the sync I/O to complete, then update rtvals. 1414 * We just set the rtvals[] to VM_PAGER_PEND so we can call 1415 * our async completion routine at the end, thus avoiding a 1416 * double-free. 1417 */ 1418 s = splbio(); 1419 1420 while ((bp->b_flags & B_DONE) == 0) { 1421 tsleep(bp, PVM, "swwrt", 0); 1422 } 1423 1424 for (j = 0; j < n; ++j) 1425 rtvals[i+j] = VM_PAGER_PEND; 1426 1427 /* 1428 * Now that we are through with the bp, we can call the 1429 * normal async completion, which frees everything up. 1430 */ 1431 1432 swp_pager_async_iodone(bp); 1433 1434 splx(s); 1435 } 1436 } 1437 1438 /* 1439 * swap_pager_sync_iodone: 1440 * 1441 * Completion routine for synchronous reads and writes from/to swap. 1442 * We just mark the bp is complete and wake up anyone waiting on it. 1443 * 1444 * This routine may not block. This routine is called at splbio() or better. 1445 */ 1446 1447 static void 1448 swp_pager_sync_iodone(bp) 1449 struct buf *bp; 1450 { 1451 bp->b_flags |= B_DONE; 1452 bp->b_flags &= ~B_ASYNC; 1453 wakeup(bp); 1454 } 1455 1456 /* 1457 * swp_pager_async_iodone: 1458 * 1459 * Completion routine for asynchronous reads and writes from/to swap. 1460 * Also called manually by synchronous code to finish up a bp. 1461 * 1462 * For READ operations, the pages are PG_BUSY'd. For WRITE operations, 1463 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY 1464 * unbusy all pages except the 'main' request page. For WRITE 1465 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 1466 * because we marked them all VM_PAGER_PEND on return from putpages ). 1467 * 1468 * This routine may not block. 1469 * This routine is called at splbio() or better 1470 * 1471 * We up ourselves to splvm() as required for various vm_page related 1472 * calls. 1473 */ 1474 1475 static void 1476 swp_pager_async_iodone(bp) 1477 register struct buf *bp; 1478 { 1479 int s; 1480 int i; 1481 vm_object_t object = NULL; 1482 1483 bp->b_flags |= B_DONE; 1484 1485 /* 1486 * report error 1487 */ 1488 1489 if (bp->b_flags & B_ERROR) { 1490 printf( 1491 "swap_pager: I/O error - %s failed; blkno %ld," 1492 "size %ld, error %d\n", 1493 ((bp->b_flags & B_READ) ? "pagein" : "pageout"), 1494 (long)bp->b_blkno, 1495 (long)bp->b_bcount, 1496 bp->b_error 1497 ); 1498 } 1499 1500 /* 1501 * set object, raise to splvm(). 1502 */ 1503 1504 if (bp->b_npages) 1505 object = bp->b_pages[0]->object; 1506 s = splvm(); 1507 1508 /* 1509 * remove the mapping for kernel virtual 1510 */ 1511 1512 pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); 1513 1514 /* 1515 * cleanup pages. If an error occurs writing to swap, we are in 1516 * very serious trouble. If it happens to be a disk error, though, 1517 * we may be able to recover by reassigning the swap later on. So 1518 * in this case we remove the m->swapblk assignment for the page 1519 * but do not free it in the rlist. The errornous block(s) are thus 1520 * never reallocated as swap. Redirty the page and continue. 1521 */ 1522 1523 for (i = 0; i < bp->b_npages; ++i) { 1524 vm_page_t m = bp->b_pages[i]; 1525 1526 vm_page_flag_clear(m, PG_SWAPINPROG); 1527 1528 if (bp->b_flags & B_ERROR) { 1529 /* 1530 * If an error occurs I'd love to throw the swapblk 1531 * away without freeing it back to swapspace, so it 1532 * can never be used again. But I can't from an 1533 * interrupt. 1534 */ 1535 1536 if (bp->b_flags & B_READ) { 1537 /* 1538 * When reading, reqpage needs to stay 1539 * locked for the parent, but all other 1540 * pages can be freed. We still want to 1541 * wakeup the parent waiting on the page, 1542 * though. ( also: pg_reqpage can be -1 and 1543 * not match anything ). 1544 * 1545 * We have to wake specifically requested pages 1546 * up too because we cleared PG_SWAPINPROG and 1547 * someone may be waiting for that. 1548 * 1549 * NOTE: for reads, m->dirty will probably 1550 * be overriden by the original caller of 1551 * getpages so don't play cute tricks here. 1552 * 1553 * XXX it may not be legal to free the page 1554 * here as this messes with the object->memq's. 1555 */ 1556 1557 m->valid = 0; 1558 vm_page_flag_clear(m, PG_ZERO); 1559 1560 if (i != bp->b_pager.pg_reqpage) 1561 vm_page_free(m); 1562 else 1563 vm_page_flash(m); 1564 /* 1565 * If i == bp->b_pager.pg_reqpage, do not wake 1566 * the page up. The caller needs to. 1567 */ 1568 } else { 1569 /* 1570 * If a write error occurs, reactivate page 1571 * so it doesn't clog the inactive list, 1572 * then finish the I/O. 1573 */ 1574 vm_page_dirty(m); 1575 vm_page_activate(m); 1576 vm_page_io_finish(m); 1577 } 1578 } else if (bp->b_flags & B_READ) { 1579 /* 1580 * For read success, clear dirty bits. Nobody should 1581 * have this page mapped but don't take any chances, 1582 * make sure the pmap modify bits are also cleared. 1583 * 1584 * NOTE: for reads, m->dirty will probably be 1585 * overriden by the original caller of getpages so 1586 * we cannot set them in order to free the underlying 1587 * swap in a low-swap situation. I don't think we'd 1588 * want to do that anyway, but it was an optimization 1589 * that existed in the old swapper for a time before 1590 * it got ripped out due to precisely this problem. 1591 * 1592 * clear PG_ZERO in page. 1593 * 1594 * If not the requested page then deactivate it. 1595 * 1596 * Note that the requested page, reqpage, is left 1597 * busied, but we still have to wake it up. The 1598 * other pages are released (unbusied) by 1599 * vm_page_wakeup(). We do not set reqpage's 1600 * valid bits here, it is up to the caller. 1601 */ 1602 1603 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 1604 m->valid = VM_PAGE_BITS_ALL; 1605 vm_page_undirty(m); 1606 vm_page_flag_clear(m, PG_ZERO); 1607 1608 /* 1609 * We have to wake specifically requested pages 1610 * up too because we cleared PG_SWAPINPROG and 1611 * could be waiting for it in getpages. However, 1612 * be sure to not unbusy getpages specifically 1613 * requested page - getpages expects it to be 1614 * left busy. 1615 */ 1616 if (i != bp->b_pager.pg_reqpage) { 1617 vm_page_deactivate(m); 1618 vm_page_wakeup(m); 1619 } else { 1620 vm_page_flash(m); 1621 } 1622 } else { 1623 /* 1624 * For write success, clear the modify and dirty 1625 * status, then finish the I/O ( which decrements the 1626 * busy count and possibly wakes waiter's up ). 1627 */ 1628 vm_page_protect(m, VM_PROT_READ); 1629 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 1630 vm_page_undirty(m); 1631 vm_page_io_finish(m); 1632 } 1633 } 1634 1635 /* 1636 * adjust pip. NOTE: the original parent may still have its own 1637 * pip refs on the object. 1638 */ 1639 1640 if (object) 1641 vm_object_pip_wakeupn(object, bp->b_npages); 1642 1643 /* 1644 * release the physical I/O buffer 1645 */ 1646 1647 relpbuf( 1648 bp, 1649 ((bp->b_flags & B_READ) ? &nsw_rcount : 1650 ((bp->b_flags & B_ASYNC) ? 1651 &nsw_wcount_async : 1652 &nsw_wcount_sync 1653 ) 1654 ) 1655 ); 1656 splx(s); 1657 } 1658 1659 /************************************************************************ 1660 * SWAP META DATA * 1661 ************************************************************************ 1662 * 1663 * These routines manipulate the swap metadata stored in the 1664 * OBJT_SWAP object. All swp_*() routines must be called at 1665 * splvm() because swap can be freed up by the low level vm_page 1666 * code which might be called from interrupts beyond what splbio() covers. 1667 * 1668 * Swap metadata is implemented with a global hash and not directly 1669 * linked into the object. Instead the object simply contains 1670 * appropriate tracking counters. 1671 */ 1672 1673 /* 1674 * SWP_PAGER_HASH() - hash swap meta data 1675 * 1676 * This is an inline helper function which hashes the swapblk given 1677 * the object and page index. It returns a pointer to a pointer 1678 * to the object, or a pointer to a NULL pointer if it could not 1679 * find a swapblk. 1680 * 1681 * This routine must be called at splvm(). 1682 */ 1683 1684 static __inline struct swblock ** 1685 swp_pager_hash(vm_object_t object, vm_pindex_t index) 1686 { 1687 struct swblock **pswap; 1688 struct swblock *swap; 1689 1690 index &= ~SWAP_META_MASK; 1691 pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask]; 1692 1693 while ((swap = *pswap) != NULL) { 1694 if (swap->swb_object == object && 1695 swap->swb_index == index 1696 ) { 1697 break; 1698 } 1699 pswap = &swap->swb_hnext; 1700 } 1701 return(pswap); 1702 } 1703 1704 /* 1705 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 1706 * 1707 * We first convert the object to a swap object if it is a default 1708 * object. 1709 * 1710 * The specified swapblk is added to the object's swap metadata. If 1711 * the swapblk is not valid, it is freed instead. Any previously 1712 * assigned swapblk is freed. 1713 * 1714 * This routine must be called at splvm(), except when used to convert 1715 * an OBJT_DEFAULT object into an OBJT_SWAP object. 1716 1717 */ 1718 1719 static void 1720 swp_pager_meta_build( 1721 vm_object_t object, 1722 vm_pindex_t index, 1723 daddr_t swapblk 1724 ) { 1725 struct swblock *swap; 1726 struct swblock **pswap; 1727 1728 /* 1729 * Convert default object to swap object if necessary 1730 */ 1731 1732 if (object->type != OBJT_SWAP) { 1733 object->type = OBJT_SWAP; 1734 object->un_pager.swp.swp_bcount = 0; 1735 1736 if (object->handle != NULL) { 1737 TAILQ_INSERT_TAIL( 1738 NOBJLIST(object->handle), 1739 object, 1740 pager_object_list 1741 ); 1742 } else { 1743 TAILQ_INSERT_TAIL( 1744 &swap_pager_un_object_list, 1745 object, 1746 pager_object_list 1747 ); 1748 } 1749 } 1750 1751 /* 1752 * Locate hash entry. If not found create, but if we aren't adding 1753 * anything just return. If we run out of space in the map we wait 1754 * and, since the hash table may have changed, retry. 1755 */ 1756 1757 retry: 1758 pswap = swp_pager_hash(object, index); 1759 1760 if ((swap = *pswap) == NULL) { 1761 int i; 1762 1763 if (swapblk == SWAPBLK_NONE) 1764 return; 1765 1766 swap = *pswap = zalloc(swap_zone); 1767 if (swap == NULL) { 1768 VM_WAIT; 1769 goto retry; 1770 } 1771 swap->swb_hnext = NULL; 1772 swap->swb_object = object; 1773 swap->swb_index = index & ~SWAP_META_MASK; 1774 swap->swb_count = 0; 1775 1776 ++object->un_pager.swp.swp_bcount; 1777 1778 for (i = 0; i < SWAP_META_PAGES; ++i) 1779 swap->swb_pages[i] = SWAPBLK_NONE; 1780 } 1781 1782 /* 1783 * Delete prior contents of metadata 1784 */ 1785 1786 index &= SWAP_META_MASK; 1787 1788 if (swap->swb_pages[index] != SWAPBLK_NONE) { 1789 swp_pager_freeswapspace(swap->swb_pages[index], 1); 1790 --swap->swb_count; 1791 } 1792 1793 /* 1794 * Enter block into metadata 1795 */ 1796 1797 swap->swb_pages[index] = swapblk; 1798 if (swapblk != SWAPBLK_NONE) 1799 ++swap->swb_count; 1800 } 1801 1802 /* 1803 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 1804 * 1805 * The requested range of blocks is freed, with any associated swap 1806 * returned to the swap bitmap. 1807 * 1808 * This routine will free swap metadata structures as they are cleaned 1809 * out. This routine does *NOT* operate on swap metadata associated 1810 * with resident pages. 1811 * 1812 * This routine must be called at splvm() 1813 */ 1814 1815 static void 1816 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count) 1817 { 1818 if (object->type != OBJT_SWAP) 1819 return; 1820 1821 while (count > 0) { 1822 struct swblock **pswap; 1823 struct swblock *swap; 1824 1825 pswap = swp_pager_hash(object, index); 1826 1827 if ((swap = *pswap) != NULL) { 1828 daddr_t v = swap->swb_pages[index & SWAP_META_MASK]; 1829 1830 if (v != SWAPBLK_NONE) { 1831 swp_pager_freeswapspace(v, 1); 1832 swap->swb_pages[index & SWAP_META_MASK] = 1833 SWAPBLK_NONE; 1834 if (--swap->swb_count == 0) { 1835 *pswap = swap->swb_hnext; 1836 zfree(swap_zone, swap); 1837 --object->un_pager.swp.swp_bcount; 1838 } 1839 } 1840 --count; 1841 ++index; 1842 } else { 1843 int n = SWAP_META_PAGES - (index & SWAP_META_MASK); 1844 count -= n; 1845 index += n; 1846 } 1847 } 1848 } 1849 1850 /* 1851 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 1852 * 1853 * This routine locates and destroys all swap metadata associated with 1854 * an object. 1855 * 1856 * This routine must be called at splvm() 1857 */ 1858 1859 static void 1860 swp_pager_meta_free_all(vm_object_t object) 1861 { 1862 daddr_t index = 0; 1863 1864 if (object->type != OBJT_SWAP) 1865 return; 1866 1867 while (object->un_pager.swp.swp_bcount) { 1868 struct swblock **pswap; 1869 struct swblock *swap; 1870 1871 pswap = swp_pager_hash(object, index); 1872 if ((swap = *pswap) != NULL) { 1873 int i; 1874 1875 for (i = 0; i < SWAP_META_PAGES; ++i) { 1876 daddr_t v = swap->swb_pages[i]; 1877 if (v != SWAPBLK_NONE) { 1878 #if !defined(MAX_PERF) 1879 --swap->swb_count; 1880 #endif 1881 swp_pager_freeswapspace(v, 1); 1882 } 1883 } 1884 #if !defined(MAX_PERF) 1885 if (swap->swb_count != 0) 1886 panic("swap_pager_meta_free_all: swb_count != 0"); 1887 #endif 1888 *pswap = swap->swb_hnext; 1889 zfree(swap_zone, swap); 1890 --object->un_pager.swp.swp_bcount; 1891 } 1892 index += SWAP_META_PAGES; 1893 #if !defined(MAX_PERF) 1894 if (index > 0x20000000) 1895 panic("swp_pager_meta_free_all: failed to locate all swap meta blocks"); 1896 #endif 1897 } 1898 } 1899 1900 /* 1901 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 1902 * 1903 * This routine is capable of looking up, popping, or freeing 1904 * swapblk assignments in the swap meta data or in the vm_page_t. 1905 * The routine typically returns the swapblk being looked-up, or popped, 1906 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 1907 * was invalid. This routine will automatically free any invalid 1908 * meta-data swapblks. 1909 * 1910 * It is not possible to store invalid swapblks in the swap meta data 1911 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 1912 * 1913 * When acting on a busy resident page and paging is in progress, we 1914 * have to wait until paging is complete but otherwise can act on the 1915 * busy page. 1916 * 1917 * This routine must be called at splvm(). 1918 * 1919 * SWM_FREE remove and free swap block from metadata 1920 * SWM_POP remove from meta data but do not free.. pop it out 1921 */ 1922 1923 static daddr_t 1924 swp_pager_meta_ctl( 1925 vm_object_t object, 1926 vm_pindex_t index, 1927 int flags 1928 ) { 1929 struct swblock **pswap; 1930 struct swblock *swap; 1931 daddr_t r1; 1932 1933 /* 1934 * The meta data only exists of the object is OBJT_SWAP 1935 * and even then might not be allocated yet. 1936 */ 1937 1938 if (object->type != OBJT_SWAP) 1939 return(SWAPBLK_NONE); 1940 1941 r1 = SWAPBLK_NONE; 1942 pswap = swp_pager_hash(object, index); 1943 1944 if ((swap = *pswap) != NULL) { 1945 index &= SWAP_META_MASK; 1946 r1 = swap->swb_pages[index]; 1947 1948 if (r1 != SWAPBLK_NONE) { 1949 if (flags & SWM_FREE) { 1950 swp_pager_freeswapspace(r1, 1); 1951 r1 = SWAPBLK_NONE; 1952 } 1953 if (flags & (SWM_FREE|SWM_POP)) { 1954 swap->swb_pages[index] = SWAPBLK_NONE; 1955 if (--swap->swb_count == 0) { 1956 *pswap = swap->swb_hnext; 1957 zfree(swap_zone, swap); 1958 --object->un_pager.swp.swp_bcount; 1959 } 1960 } 1961 } 1962 } 1963 return(r1); 1964 } 1965 1966