1 /* 2 * Copyright (c) 1998 Matthew Dillon, 3 * Copyright (c) 1994 John S. Dyson 4 * Copyright (c) 1990 University of Utah. 5 * Copyright (c) 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * New Swap System 41 * Matthew Dillon 42 * 43 * Radix Bitmap 'blists'. 44 * 45 * - The new swapper uses the new radix bitmap code. This should scale 46 * to arbitrarily small or arbitrarily large swap spaces and an almost 47 * arbitrary degree of fragmentation. 48 * 49 * Features: 50 * 51 * - on the fly reallocation of swap during putpages. The new system 52 * does not try to keep previously allocated swap blocks for dirty 53 * pages. 54 * 55 * - on the fly deallocation of swap 56 * 57 * - No more garbage collection required. Unnecessarily allocated swap 58 * blocks only exist for dirty vm_page_t's now and these are already 59 * cycled (in a high-load system) by the pager. We also do on-the-fly 60 * removal of invalidated swap blocks when a page is destroyed 61 * or renamed. 62 * 63 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 64 * 65 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 66 * 67 * $FreeBSD$ 68 */ 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/conf.h> 73 #include <sys/kernel.h> 74 #include <sys/proc.h> 75 #include <sys/bio.h> 76 #include <sys/buf.h> 77 #include <sys/vnode.h> 78 #include <sys/malloc.h> 79 #include <sys/sysctl.h> 80 #include <sys/blist.h> 81 #include <sys/lock.h> 82 #include <sys/sx.h> 83 #include <sys/vmmeter.h> 84 85 #ifndef MAX_PAGEOUT_CLUSTER 86 #define MAX_PAGEOUT_CLUSTER 16 87 #endif 88 89 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER 90 91 #include "opt_swap.h" 92 #include <vm/vm.h> 93 #include <vm/pmap.h> 94 #include <vm/vm_map.h> 95 #include <vm/vm_kern.h> 96 #include <vm/vm_object.h> 97 #include <vm/vm_page.h> 98 #include <vm/vm_pager.h> 99 #include <vm/vm_pageout.h> 100 #include <vm/swap_pager.h> 101 #include <vm/vm_extern.h> 102 #include <vm/uma.h> 103 104 #define SWM_FREE 0x02 /* free, period */ 105 #define SWM_POP 0x04 /* pop out */ 106 107 int swap_pager_full; /* swap space exhaustion (task killing) */ 108 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/ 109 static int nsw_rcount; /* free read buffers */ 110 static int nsw_wcount_sync; /* limit write buffers / synchronous */ 111 static int nsw_wcount_async; /* limit write buffers / asynchronous */ 112 static int nsw_wcount_async_max;/* assigned maximum */ 113 static int nsw_cluster_max; /* maximum VOP I/O allowed */ 114 115 struct blist *swapblist; 116 static struct swblock **swhash; 117 static int swhash_mask; 118 static int swap_async_max = 4; /* maximum in-progress async I/O's */ 119 static struct sx sw_alloc_sx; 120 121 122 SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 123 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 124 125 #define BLK2DEVIDX(blk) (nswdev > 1 ? blk / dmmax % nswdev : 0) 126 127 /* 128 * "named" and "unnamed" anon region objects. Try to reduce the overhead 129 * of searching a named list by hashing it just a little. 130 */ 131 132 #define NOBJLISTS 8 133 134 #define NOBJLIST(handle) \ 135 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)]) 136 137 static struct mtx sw_alloc_mtx; /* protect list manipulation */ 138 static struct pagerlst swap_pager_object_list[NOBJLISTS]; 139 struct pagerlst swap_pager_un_object_list; 140 uma_zone_t swap_zone; 141 142 /* 143 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 144 * calls hooked from other parts of the VM system and do not appear here. 145 * (see vm/swap_pager.h). 146 */ 147 static vm_object_t 148 swap_pager_alloc(void *handle, vm_ooffset_t size, 149 vm_prot_t prot, vm_ooffset_t offset); 150 static void swap_pager_dealloc(vm_object_t object); 151 static int swap_pager_getpages(vm_object_t, vm_page_t *, int, int); 152 static void swap_pager_init(void); 153 static void swap_pager_unswapped(vm_page_t); 154 static void swap_pager_strategy(vm_object_t, struct bio *); 155 156 struct pagerops swappagerops = { 157 swap_pager_init, /* early system initialization of pager */ 158 swap_pager_alloc, /* allocate an OBJT_SWAP object */ 159 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 160 swap_pager_getpages, /* pagein */ 161 swap_pager_putpages, /* pageout */ 162 swap_pager_haspage, /* get backing store status for page */ 163 swap_pager_unswapped, /* remove swap related to page */ 164 swap_pager_strategy /* pager strategy call */ 165 }; 166 167 static struct buf *getchainbuf(struct bio *bp, struct vnode *vp, int flags); 168 static void flushchainbuf(struct buf *nbp); 169 static void waitchainbuf(struct bio *bp, int count, int done); 170 171 /* 172 * dmmax is in page-sized chunks with the new swap system. It was 173 * dev-bsized chunks in the old. dmmax is always a power of 2. 174 * 175 * swap_*() routines are externally accessible. swp_*() routines are 176 * internal. 177 */ 178 int dmmax, dmmax_mask; 179 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 180 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 181 182 SYSCTL_INT(_vm, OID_AUTO, dmmax, 183 CTLFLAG_RD, &dmmax, 0, "Maximum size of a swap block"); 184 185 static __inline void swp_sizecheck(void); 186 static void swp_pager_sync_iodone(struct buf *bp); 187 static void swp_pager_async_iodone(struct buf *bp); 188 189 /* 190 * Swap bitmap functions 191 */ 192 static __inline void swp_pager_freeswapspace(daddr_t blk, int npages); 193 static __inline daddr_t swp_pager_getswapspace(int npages); 194 195 /* 196 * Metadata functions 197 */ 198 static __inline struct swblock ** 199 swp_pager_hash(vm_object_t object, vm_pindex_t index); 200 static void swp_pager_meta_build(vm_object_t, vm_pindex_t, daddr_t); 201 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, daddr_t); 202 static void swp_pager_meta_free_all(vm_object_t); 203 static daddr_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int); 204 205 /* 206 * SWP_SIZECHECK() - update swap_pager_full indication 207 * 208 * update the swap_pager_almost_full indication and warn when we are 209 * about to run out of swap space, using lowat/hiwat hysteresis. 210 * 211 * Clear swap_pager_full ( task killing ) indication when lowat is met. 212 * 213 * No restrictions on call 214 * This routine may not block. 215 * This routine must be called at splvm() 216 */ 217 static __inline void 218 swp_sizecheck() 219 { 220 GIANT_REQUIRED; 221 222 if (vm_swap_size < nswap_lowat) { 223 if (swap_pager_almost_full == 0) { 224 printf("swap_pager: out of swap space\n"); 225 swap_pager_almost_full = 1; 226 } 227 } else { 228 swap_pager_full = 0; 229 if (vm_swap_size > nswap_hiwat) 230 swap_pager_almost_full = 0; 231 } 232 } 233 234 /* 235 * SWAP_PAGER_INIT() - initialize the swap pager! 236 * 237 * Expected to be started from system init. NOTE: This code is run 238 * before much else so be careful what you depend on. Most of the VM 239 * system has yet to be initialized at this point. 240 */ 241 static void 242 swap_pager_init() 243 { 244 /* 245 * Initialize object lists 246 */ 247 int i; 248 249 for (i = 0; i < NOBJLISTS; ++i) 250 TAILQ_INIT(&swap_pager_object_list[i]); 251 TAILQ_INIT(&swap_pager_un_object_list); 252 mtx_init(&sw_alloc_mtx, "swap_pager list", NULL, MTX_DEF); 253 254 /* 255 * Device Stripe, in PAGE_SIZE'd blocks 256 */ 257 dmmax = SWB_NPAGES * 2; 258 dmmax_mask = ~(dmmax - 1); 259 } 260 261 /* 262 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 263 * 264 * Expected to be started from pageout process once, prior to entering 265 * its main loop. 266 */ 267 void 268 swap_pager_swap_init() 269 { 270 int n, n2; 271 272 /* 273 * Number of in-transit swap bp operations. Don't 274 * exhaust the pbufs completely. Make sure we 275 * initialize workable values (0 will work for hysteresis 276 * but it isn't very efficient). 277 * 278 * The nsw_cluster_max is constrained by the bp->b_pages[] 279 * array (MAXPHYS/PAGE_SIZE) and our locally defined 280 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 281 * constrained by the swap device interleave stripe size. 282 * 283 * Currently we hardwire nsw_wcount_async to 4. This limit is 284 * designed to prevent other I/O from having high latencies due to 285 * our pageout I/O. The value 4 works well for one or two active swap 286 * devices but is probably a little low if you have more. Even so, 287 * a higher value would probably generate only a limited improvement 288 * with three or four active swap devices since the system does not 289 * typically have to pageout at extreme bandwidths. We will want 290 * at least 2 per swap devices, and 4 is a pretty good value if you 291 * have one NFS swap device due to the command/ack latency over NFS. 292 * So it all works out pretty well. 293 */ 294 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 295 296 mtx_lock(&pbuf_mtx); 297 nsw_rcount = (nswbuf + 1) / 2; 298 nsw_wcount_sync = (nswbuf + 3) / 4; 299 nsw_wcount_async = 4; 300 nsw_wcount_async_max = nsw_wcount_async; 301 mtx_unlock(&pbuf_mtx); 302 303 /* 304 * Initialize our zone. Right now I'm just guessing on the number 305 * we need based on the number of pages in the system. Each swblock 306 * can hold 16 pages, so this is probably overkill. This reservation 307 * is typically limited to around 32MB by default. 308 */ 309 n = cnt.v_page_count / 2; 310 if (maxswzone && n > maxswzone / sizeof(struct swblock)) 311 n = maxswzone / sizeof(struct swblock); 312 n2 = n; 313 swap_zone = uma_zcreate("SWAPMETA", sizeof(struct swblock), NULL, NULL, 314 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 315 do { 316 if (uma_zone_set_obj(swap_zone, NULL, n)) 317 break; 318 /* 319 * if the allocation failed, try a zone two thirds the 320 * size of the previous attempt. 321 */ 322 n -= ((n + 2) / 3); 323 } while (n > 0); 324 if (swap_zone == NULL) 325 panic("failed to create swap_zone."); 326 if (n2 != n) 327 printf("Swap zone entries reduced from %d to %d.\n", n2, n); 328 n2 = n; 329 330 /* 331 * Initialize our meta-data hash table. The swapper does not need to 332 * be quite as efficient as the VM system, so we do not use an 333 * oversized hash table. 334 * 335 * n: size of hash table, must be power of 2 336 * swhash_mask: hash table index mask 337 */ 338 for (n = 1; n < n2 / 8; n *= 2) 339 ; 340 swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK | M_ZERO); 341 swhash_mask = n - 1; 342 } 343 344 /* 345 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 346 * its metadata structures. 347 * 348 * This routine is called from the mmap and fork code to create a new 349 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 350 * and then converting it with swp_pager_meta_build(). 351 * 352 * This routine may block in vm_object_allocate() and create a named 353 * object lookup race, so we must interlock. We must also run at 354 * splvm() for the object lookup to handle races with interrupts, but 355 * we do not have to maintain splvm() in between the lookup and the 356 * add because (I believe) it is not possible to attempt to create 357 * a new swap object w/handle when a default object with that handle 358 * already exists. 359 * 360 * MPSAFE 361 */ 362 static vm_object_t 363 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 364 vm_ooffset_t offset) 365 { 366 vm_object_t object; 367 368 mtx_lock(&Giant); 369 if (handle) { 370 /* 371 * Reference existing named region or allocate new one. There 372 * should not be a race here against swp_pager_meta_build() 373 * as called from vm_page_remove() in regards to the lookup 374 * of the handle. 375 */ 376 sx_xlock(&sw_alloc_sx); 377 object = vm_pager_object_lookup(NOBJLIST(handle), handle); 378 379 if (object != NULL) { 380 vm_object_reference(object); 381 } else { 382 object = vm_object_allocate(OBJT_DEFAULT, 383 OFF_TO_IDX(offset + PAGE_MASK + size)); 384 object->handle = handle; 385 386 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 387 } 388 sx_xunlock(&sw_alloc_sx); 389 } else { 390 object = vm_object_allocate(OBJT_DEFAULT, 391 OFF_TO_IDX(offset + PAGE_MASK + size)); 392 393 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 394 } 395 mtx_unlock(&Giant); 396 return (object); 397 } 398 399 /* 400 * SWAP_PAGER_DEALLOC() - remove swap metadata from object 401 * 402 * The swap backing for the object is destroyed. The code is 403 * designed such that we can reinstantiate it later, but this 404 * routine is typically called only when the entire object is 405 * about to be destroyed. 406 * 407 * This routine may block, but no longer does. 408 * 409 * The object must be locked or unreferenceable. 410 */ 411 static void 412 swap_pager_dealloc(object) 413 vm_object_t object; 414 { 415 int s; 416 417 GIANT_REQUIRED; 418 419 /* 420 * Remove from list right away so lookups will fail if we block for 421 * pageout completion. 422 */ 423 mtx_lock(&sw_alloc_mtx); 424 if (object->handle == NULL) { 425 TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list); 426 } else { 427 TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list); 428 } 429 mtx_unlock(&sw_alloc_mtx); 430 431 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 432 vm_object_pip_wait(object, "swpdea"); 433 434 /* 435 * Free all remaining metadata. We only bother to free it from 436 * the swap meta data. We do not attempt to free swapblk's still 437 * associated with vm_page_t's for this object. We do not care 438 * if paging is still in progress on some objects. 439 */ 440 s = splvm(); 441 swp_pager_meta_free_all(object); 442 splx(s); 443 } 444 445 /************************************************************************ 446 * SWAP PAGER BITMAP ROUTINES * 447 ************************************************************************/ 448 449 /* 450 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 451 * 452 * Allocate swap for the requested number of pages. The starting 453 * swap block number (a page index) is returned or SWAPBLK_NONE 454 * if the allocation failed. 455 * 456 * Also has the side effect of advising that somebody made a mistake 457 * when they configured swap and didn't configure enough. 458 * 459 * Must be called at splvm() to avoid races with bitmap frees from 460 * vm_page_remove() aka swap_pager_page_removed(). 461 * 462 * This routine may not block 463 * This routine must be called at splvm(). 464 */ 465 static __inline daddr_t 466 swp_pager_getswapspace(npages) 467 int npages; 468 { 469 daddr_t blk; 470 471 GIANT_REQUIRED; 472 473 if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) { 474 if (swap_pager_full != 2) { 475 printf("swap_pager_getswapspace: failed\n"); 476 swap_pager_full = 2; 477 swap_pager_almost_full = 1; 478 } 479 } else { 480 vm_swap_size -= npages; 481 /* per-swap area stats */ 482 swdevt[BLK2DEVIDX(blk)].sw_used += npages; 483 swp_sizecheck(); 484 } 485 return (blk); 486 } 487 488 /* 489 * SWP_PAGER_FREESWAPSPACE() - free raw swap space 490 * 491 * This routine returns the specified swap blocks back to the bitmap. 492 * 493 * Note: This routine may not block (it could in the old swap code), 494 * and through the use of the new blist routines it does not block. 495 * 496 * We must be called at splvm() to avoid races with bitmap frees from 497 * vm_page_remove() aka swap_pager_page_removed(). 498 * 499 * This routine may not block 500 * This routine must be called at splvm(). 501 */ 502 static __inline void 503 swp_pager_freeswapspace(blk, npages) 504 daddr_t blk; 505 int npages; 506 { 507 struct swdevt *sp = &swdevt[BLK2DEVIDX(blk)]; 508 509 GIANT_REQUIRED; 510 511 /* per-swap area stats */ 512 sp->sw_used -= npages; 513 514 /* 515 * If we are attempting to stop swapping on this device, we 516 * don't want to mark any blocks free lest they be reused. 517 */ 518 if (sp->sw_flags & SW_CLOSING) 519 return; 520 521 blist_free(swapblist, blk, npages); 522 vm_swap_size += npages; 523 swp_sizecheck(); 524 } 525 526 /* 527 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 528 * range within an object. 529 * 530 * This is a globally accessible routine. 531 * 532 * This routine removes swapblk assignments from swap metadata. 533 * 534 * The external callers of this routine typically have already destroyed 535 * or renamed vm_page_t's associated with this range in the object so 536 * we should be ok. 537 * 538 * This routine may be called at any spl. We up our spl to splvm temporarily 539 * in order to perform the metadata removal. 540 */ 541 void 542 swap_pager_freespace(object, start, size) 543 vm_object_t object; 544 vm_pindex_t start; 545 vm_size_t size; 546 { 547 int s = splvm(); 548 549 GIANT_REQUIRED; 550 swp_pager_meta_free(object, start, size); 551 splx(s); 552 } 553 554 /* 555 * SWAP_PAGER_RESERVE() - reserve swap blocks in object 556 * 557 * Assigns swap blocks to the specified range within the object. The 558 * swap blocks are not zerod. Any previous swap assignment is destroyed. 559 * 560 * Returns 0 on success, -1 on failure. 561 */ 562 int 563 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 564 { 565 int s; 566 int n = 0; 567 daddr_t blk = SWAPBLK_NONE; 568 vm_pindex_t beg = start; /* save start index */ 569 570 s = splvm(); 571 while (size) { 572 if (n == 0) { 573 n = BLIST_MAX_ALLOC; 574 while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) { 575 n >>= 1; 576 if (n == 0) { 577 swp_pager_meta_free(object, beg, start - beg); 578 splx(s); 579 return (-1); 580 } 581 } 582 } 583 swp_pager_meta_build(object, start, blk); 584 --size; 585 ++start; 586 ++blk; 587 --n; 588 } 589 swp_pager_meta_free(object, start, n); 590 splx(s); 591 return (0); 592 } 593 594 /* 595 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 596 * and destroy the source. 597 * 598 * Copy any valid swapblks from the source to the destination. In 599 * cases where both the source and destination have a valid swapblk, 600 * we keep the destination's. 601 * 602 * This routine is allowed to block. It may block allocating metadata 603 * indirectly through swp_pager_meta_build() or if paging is still in 604 * progress on the source. 605 * 606 * This routine can be called at any spl 607 * 608 * XXX vm_page_collapse() kinda expects us not to block because we 609 * supposedly do not need to allocate memory, but for the moment we 610 * *may* have to get a little memory from the zone allocator, but 611 * it is taken from the interrupt memory. We should be ok. 612 * 613 * The source object contains no vm_page_t's (which is just as well) 614 * 615 * The source object is of type OBJT_SWAP. 616 * 617 * The source and destination objects must be locked or 618 * inaccessible (XXX are they ?) 619 */ 620 void 621 swap_pager_copy(srcobject, dstobject, offset, destroysource) 622 vm_object_t srcobject; 623 vm_object_t dstobject; 624 vm_pindex_t offset; 625 int destroysource; 626 { 627 vm_pindex_t i; 628 int s; 629 630 GIANT_REQUIRED; 631 632 s = splvm(); 633 /* 634 * If destroysource is set, we remove the source object from the 635 * swap_pager internal queue now. 636 */ 637 if (destroysource) { 638 mtx_lock(&sw_alloc_mtx); 639 if (srcobject->handle == NULL) { 640 TAILQ_REMOVE( 641 &swap_pager_un_object_list, 642 srcobject, 643 pager_object_list 644 ); 645 } else { 646 TAILQ_REMOVE( 647 NOBJLIST(srcobject->handle), 648 srcobject, 649 pager_object_list 650 ); 651 } 652 mtx_unlock(&sw_alloc_mtx); 653 } 654 655 /* 656 * transfer source to destination. 657 */ 658 for (i = 0; i < dstobject->size; ++i) { 659 daddr_t dstaddr; 660 661 /* 662 * Locate (without changing) the swapblk on the destination, 663 * unless it is invalid in which case free it silently, or 664 * if the destination is a resident page, in which case the 665 * source is thrown away. 666 */ 667 dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 668 669 if (dstaddr == SWAPBLK_NONE) { 670 /* 671 * Destination has no swapblk and is not resident, 672 * copy source. 673 */ 674 daddr_t srcaddr; 675 676 srcaddr = swp_pager_meta_ctl( 677 srcobject, 678 i + offset, 679 SWM_POP 680 ); 681 682 if (srcaddr != SWAPBLK_NONE) 683 swp_pager_meta_build(dstobject, i, srcaddr); 684 } else { 685 /* 686 * Destination has valid swapblk or it is represented 687 * by a resident page. We destroy the sourceblock. 688 */ 689 690 swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE); 691 } 692 } 693 694 /* 695 * Free left over swap blocks in source. 696 * 697 * We have to revert the type to OBJT_DEFAULT so we do not accidently 698 * double-remove the object from the swap queues. 699 */ 700 if (destroysource) { 701 swp_pager_meta_free_all(srcobject); 702 /* 703 * Reverting the type is not necessary, the caller is going 704 * to destroy srcobject directly, but I'm doing it here 705 * for consistency since we've removed the object from its 706 * queues. 707 */ 708 srcobject->type = OBJT_DEFAULT; 709 } 710 splx(s); 711 } 712 713 /* 714 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 715 * the requested page. 716 * 717 * We determine whether good backing store exists for the requested 718 * page and return TRUE if it does, FALSE if it doesn't. 719 * 720 * If TRUE, we also try to determine how much valid, contiguous backing 721 * store exists before and after the requested page within a reasonable 722 * distance. We do not try to restrict it to the swap device stripe 723 * (that is handled in getpages/putpages). It probably isn't worth 724 * doing here. 725 */ 726 boolean_t 727 swap_pager_haspage(object, pindex, before, after) 728 vm_object_t object; 729 vm_pindex_t pindex; 730 int *before; 731 int *after; 732 { 733 daddr_t blk0; 734 int s; 735 736 /* 737 * do we have good backing store at the requested index ? 738 */ 739 s = splvm(); 740 blk0 = swp_pager_meta_ctl(object, pindex, 0); 741 742 if (blk0 == SWAPBLK_NONE) { 743 splx(s); 744 if (before) 745 *before = 0; 746 if (after) 747 *after = 0; 748 return (FALSE); 749 } 750 751 /* 752 * find backwards-looking contiguous good backing store 753 */ 754 if (before != NULL) { 755 int i; 756 757 for (i = 1; i < (SWB_NPAGES/2); ++i) { 758 daddr_t blk; 759 760 if (i > pindex) 761 break; 762 blk = swp_pager_meta_ctl(object, pindex - i, 0); 763 if (blk != blk0 - i) 764 break; 765 } 766 *before = (i - 1); 767 } 768 769 /* 770 * find forward-looking contiguous good backing store 771 */ 772 if (after != NULL) { 773 int i; 774 775 for (i = 1; i < (SWB_NPAGES/2); ++i) { 776 daddr_t blk; 777 778 blk = swp_pager_meta_ctl(object, pindex + i, 0); 779 if (blk != blk0 + i) 780 break; 781 } 782 *after = (i - 1); 783 } 784 splx(s); 785 return (TRUE); 786 } 787 788 /* 789 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 790 * 791 * This removes any associated swap backing store, whether valid or 792 * not, from the page. 793 * 794 * This routine is typically called when a page is made dirty, at 795 * which point any associated swap can be freed. MADV_FREE also 796 * calls us in a special-case situation 797 * 798 * NOTE!!! If the page is clean and the swap was valid, the caller 799 * should make the page dirty before calling this routine. This routine 800 * does NOT change the m->dirty status of the page. Also: MADV_FREE 801 * depends on it. 802 * 803 * This routine may not block 804 * This routine must be called at splvm() 805 */ 806 static void 807 swap_pager_unswapped(m) 808 vm_page_t m; 809 { 810 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 811 } 812 813 /* 814 * SWAP_PAGER_STRATEGY() - read, write, free blocks 815 * 816 * This implements the vm_pager_strategy() interface to swap and allows 817 * other parts of the system to directly access swap as backing store 818 * through vm_objects of type OBJT_SWAP. This is intended to be a 819 * cacheless interface ( i.e. caching occurs at higher levels ). 820 * Therefore we do not maintain any resident pages. All I/O goes 821 * directly to and from the swap device. 822 * 823 * Note that b_blkno is scaled for PAGE_SIZE 824 * 825 * We currently attempt to run I/O synchronously or asynchronously as 826 * the caller requests. This isn't perfect because we loose error 827 * sequencing when we run multiple ops in parallel to satisfy a request. 828 * But this is swap, so we let it all hang out. 829 */ 830 static void 831 swap_pager_strategy(vm_object_t object, struct bio *bp) 832 { 833 vm_pindex_t start; 834 int count; 835 int s; 836 char *data; 837 struct buf *nbp = NULL; 838 839 GIANT_REQUIRED; 840 841 /* XXX: KASSERT instead ? */ 842 if (bp->bio_bcount & PAGE_MASK) { 843 biofinish(bp, NULL, EINVAL); 844 printf("swap_pager_strategy: bp %p blk %d size %d, not page bounded\n", bp, (int)bp->bio_pblkno, (int)bp->bio_bcount); 845 return; 846 } 847 848 /* 849 * Clear error indication, initialize page index, count, data pointer. 850 */ 851 bp->bio_error = 0; 852 bp->bio_flags &= ~BIO_ERROR; 853 bp->bio_resid = bp->bio_bcount; 854 *(u_int *) &bp->bio_driver1 = 0; 855 856 start = bp->bio_pblkno; 857 count = howmany(bp->bio_bcount, PAGE_SIZE); 858 data = bp->bio_data; 859 860 s = splvm(); 861 862 /* 863 * Deal with BIO_DELETE 864 */ 865 if (bp->bio_cmd == BIO_DELETE) { 866 /* 867 * FREE PAGE(s) - destroy underlying swap that is no longer 868 * needed. 869 */ 870 swp_pager_meta_free(object, start, count); 871 splx(s); 872 bp->bio_resid = 0; 873 biodone(bp); 874 return; 875 } 876 877 /* 878 * Execute read or write 879 */ 880 while (count > 0) { 881 daddr_t blk; 882 883 /* 884 * Obtain block. If block not found and writing, allocate a 885 * new block and build it into the object. 886 */ 887 888 blk = swp_pager_meta_ctl(object, start, 0); 889 if ((blk == SWAPBLK_NONE) && (bp->bio_cmd == BIO_WRITE)) { 890 blk = swp_pager_getswapspace(1); 891 if (blk == SWAPBLK_NONE) { 892 bp->bio_error = ENOMEM; 893 bp->bio_flags |= BIO_ERROR; 894 break; 895 } 896 swp_pager_meta_build(object, start, blk); 897 } 898 899 /* 900 * Do we have to flush our current collection? Yes if: 901 * 902 * - no swap block at this index 903 * - swap block is not contiguous 904 * - we cross a physical disk boundry in the 905 * stripe. 906 */ 907 if ( 908 nbp && (nbp->b_blkno + btoc(nbp->b_bcount) != blk || 909 ((nbp->b_blkno ^ blk) & dmmax_mask) 910 ) 911 ) { 912 splx(s); 913 if (bp->bio_cmd == BIO_READ) { 914 ++cnt.v_swapin; 915 cnt.v_swappgsin += btoc(nbp->b_bcount); 916 } else { 917 ++cnt.v_swapout; 918 cnt.v_swappgsout += btoc(nbp->b_bcount); 919 nbp->b_dirtyend = nbp->b_bcount; 920 } 921 flushchainbuf(nbp); 922 s = splvm(); 923 nbp = NULL; 924 } 925 926 /* 927 * Add new swapblk to nbp, instantiating nbp if necessary. 928 * Zero-fill reads are able to take a shortcut. 929 */ 930 if (blk == SWAPBLK_NONE) { 931 /* 932 * We can only get here if we are reading. Since 933 * we are at splvm() we can safely modify b_resid, 934 * even if chain ops are in progress. 935 */ 936 bzero(data, PAGE_SIZE); 937 bp->bio_resid -= PAGE_SIZE; 938 } else { 939 if (nbp == NULL) { 940 nbp = getchainbuf(bp, swapdev_vp, B_ASYNC); 941 nbp->b_blkno = blk; 942 nbp->b_bcount = 0; 943 nbp->b_data = data; 944 } 945 nbp->b_bcount += PAGE_SIZE; 946 } 947 --count; 948 ++start; 949 data += PAGE_SIZE; 950 } 951 952 /* 953 * Flush out last buffer 954 */ 955 splx(s); 956 957 if (nbp) { 958 if (nbp->b_iocmd == BIO_READ) { 959 ++cnt.v_swapin; 960 cnt.v_swappgsin += btoc(nbp->b_bcount); 961 } else { 962 ++cnt.v_swapout; 963 cnt.v_swappgsout += btoc(nbp->b_bcount); 964 nbp->b_dirtyend = nbp->b_bcount; 965 } 966 flushchainbuf(nbp); 967 /* nbp = NULL; */ 968 } 969 /* 970 * Wait for completion. 971 */ 972 waitchainbuf(bp, 0, 1); 973 } 974 975 /* 976 * SWAP_PAGER_GETPAGES() - bring pages in from swap 977 * 978 * Attempt to retrieve (m, count) pages from backing store, but make 979 * sure we retrieve at least m[reqpage]. We try to load in as large 980 * a chunk surrounding m[reqpage] as is contiguous in swap and which 981 * belongs to the same object. 982 * 983 * The code is designed for asynchronous operation and 984 * immediate-notification of 'reqpage' but tends not to be 985 * used that way. Please do not optimize-out this algorithmic 986 * feature, I intend to improve on it in the future. 987 * 988 * The parent has a single vm_object_pip_add() reference prior to 989 * calling us and we should return with the same. 990 * 991 * The parent has BUSY'd the pages. We should return with 'm' 992 * left busy, but the others adjusted. 993 */ 994 static int 995 swap_pager_getpages(object, m, count, reqpage) 996 vm_object_t object; 997 vm_page_t *m; 998 int count, reqpage; 999 { 1000 struct buf *bp; 1001 vm_page_t mreq; 1002 int s; 1003 int i; 1004 int j; 1005 daddr_t blk; 1006 vm_pindex_t lastpindex; 1007 1008 GIANT_REQUIRED; 1009 1010 mreq = m[reqpage]; 1011 1012 if (mreq->object != object) { 1013 panic("swap_pager_getpages: object mismatch %p/%p", 1014 object, 1015 mreq->object 1016 ); 1017 } 1018 /* 1019 * Calculate range to retrieve. The pages have already been assigned 1020 * their swapblks. We require a *contiguous* range that falls entirely 1021 * within a single device stripe. If we do not supply it, bad things 1022 * happen. Note that blk, iblk & jblk can be SWAPBLK_NONE, but the 1023 * loops are set up such that the case(s) are handled implicitly. 1024 * 1025 * The swp_*() calls must be made at splvm(). vm_page_free() does 1026 * not need to be, but it will go a little faster if it is. 1027 */ 1028 s = splvm(); 1029 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 1030 1031 for (i = reqpage - 1; i >= 0; --i) { 1032 daddr_t iblk; 1033 1034 iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0); 1035 if (blk != iblk + (reqpage - i)) 1036 break; 1037 if ((blk ^ iblk) & dmmax_mask) 1038 break; 1039 } 1040 ++i; 1041 1042 for (j = reqpage + 1; j < count; ++j) { 1043 daddr_t jblk; 1044 1045 jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0); 1046 if (blk != jblk - (j - reqpage)) 1047 break; 1048 if ((blk ^ jblk) & dmmax_mask) 1049 break; 1050 } 1051 1052 /* 1053 * free pages outside our collection range. Note: we never free 1054 * mreq, it must remain busy throughout. 1055 */ 1056 vm_page_lock_queues(); 1057 { 1058 int k; 1059 1060 for (k = 0; k < i; ++k) 1061 vm_page_free(m[k]); 1062 for (k = j; k < count; ++k) 1063 vm_page_free(m[k]); 1064 } 1065 vm_page_unlock_queues(); 1066 splx(s); 1067 1068 1069 /* 1070 * Return VM_PAGER_FAIL if we have nothing to do. Return mreq 1071 * still busy, but the others unbusied. 1072 */ 1073 if (blk == SWAPBLK_NONE) 1074 return (VM_PAGER_FAIL); 1075 1076 /* 1077 * Get a swap buffer header to perform the IO 1078 */ 1079 bp = getpbuf(&nsw_rcount); 1080 1081 /* 1082 * map our page(s) into kva for input 1083 * 1084 * NOTE: B_PAGING is set by pbgetvp() 1085 */ 1086 pmap_qenter((vm_offset_t)bp->b_data, m + i, j - i); 1087 1088 bp->b_iocmd = BIO_READ; 1089 bp->b_iodone = swp_pager_async_iodone; 1090 bp->b_rcred = crhold(thread0.td_ucred); 1091 bp->b_wcred = crhold(thread0.td_ucred); 1092 bp->b_blkno = blk - (reqpage - i); 1093 bp->b_bcount = PAGE_SIZE * (j - i); 1094 bp->b_bufsize = PAGE_SIZE * (j - i); 1095 bp->b_pager.pg_reqpage = reqpage - i; 1096 1097 vm_page_lock_queues(); 1098 { 1099 int k; 1100 1101 for (k = i; k < j; ++k) { 1102 bp->b_pages[k - i] = m[k]; 1103 vm_page_flag_set(m[k], PG_SWAPINPROG); 1104 } 1105 } 1106 vm_page_unlock_queues(); 1107 bp->b_npages = j - i; 1108 1109 pbgetvp(swapdev_vp, bp); 1110 1111 cnt.v_swapin++; 1112 cnt.v_swappgsin += bp->b_npages; 1113 1114 /* 1115 * We still hold the lock on mreq, and our automatic completion routine 1116 * does not remove it. 1117 */ 1118 VM_OBJECT_LOCK(mreq->object); 1119 vm_object_pip_add(mreq->object, bp->b_npages); 1120 VM_OBJECT_UNLOCK(mreq->object); 1121 lastpindex = m[j-1]->pindex; 1122 1123 /* 1124 * perform the I/O. NOTE!!! bp cannot be considered valid after 1125 * this point because we automatically release it on completion. 1126 * Instead, we look at the one page we are interested in which we 1127 * still hold a lock on even through the I/O completion. 1128 * 1129 * The other pages in our m[] array are also released on completion, 1130 * so we cannot assume they are valid anymore either. 1131 * 1132 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 1133 */ 1134 BUF_KERNPROC(bp); 1135 VOP_STRATEGY(bp->b_vp, bp); 1136 1137 /* 1138 * wait for the page we want to complete. PG_SWAPINPROG is always 1139 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 1140 * is set in the meta-data. 1141 */ 1142 s = splvm(); 1143 vm_page_lock_queues(); 1144 while ((mreq->flags & PG_SWAPINPROG) != 0) { 1145 vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED); 1146 cnt.v_intrans++; 1147 if (msleep(mreq, &vm_page_queue_mtx, PSWP, "swread", hz*20)) { 1148 printf( 1149 "swap_pager: indefinite wait buffer: device:" 1150 " %s, blkno: %ld, size: %ld\n", 1151 devtoname(bp->b_dev), (long)bp->b_blkno, 1152 bp->b_bcount 1153 ); 1154 } 1155 } 1156 vm_page_unlock_queues(); 1157 splx(s); 1158 1159 /* 1160 * mreq is left busied after completion, but all the other pages 1161 * are freed. If we had an unrecoverable read error the page will 1162 * not be valid. 1163 */ 1164 if (mreq->valid != VM_PAGE_BITS_ALL) { 1165 return (VM_PAGER_ERROR); 1166 } else { 1167 return (VM_PAGER_OK); 1168 } 1169 1170 /* 1171 * A final note: in a low swap situation, we cannot deallocate swap 1172 * and mark a page dirty here because the caller is likely to mark 1173 * the page clean when we return, causing the page to possibly revert 1174 * to all-zero's later. 1175 */ 1176 } 1177 1178 /* 1179 * swap_pager_putpages: 1180 * 1181 * Assign swap (if necessary) and initiate I/O on the specified pages. 1182 * 1183 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 1184 * are automatically converted to SWAP objects. 1185 * 1186 * In a low memory situation we may block in VOP_STRATEGY(), but the new 1187 * vm_page reservation system coupled with properly written VFS devices 1188 * should ensure that no low-memory deadlock occurs. This is an area 1189 * which needs work. 1190 * 1191 * The parent has N vm_object_pip_add() references prior to 1192 * calling us and will remove references for rtvals[] that are 1193 * not set to VM_PAGER_PEND. We need to remove the rest on I/O 1194 * completion. 1195 * 1196 * The parent has soft-busy'd the pages it passes us and will unbusy 1197 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 1198 * We need to unbusy the rest on I/O completion. 1199 */ 1200 void 1201 swap_pager_putpages(object, m, count, sync, rtvals) 1202 vm_object_t object; 1203 vm_page_t *m; 1204 int count; 1205 boolean_t sync; 1206 int *rtvals; 1207 { 1208 int i; 1209 int n = 0; 1210 1211 GIANT_REQUIRED; 1212 if (count && m[0]->object != object) { 1213 panic("swap_pager_getpages: object mismatch %p/%p", 1214 object, 1215 m[0]->object 1216 ); 1217 } 1218 /* 1219 * Step 1 1220 * 1221 * Turn object into OBJT_SWAP 1222 * check for bogus sysops 1223 * force sync if not pageout process 1224 */ 1225 if (object->type != OBJT_SWAP) 1226 swp_pager_meta_build(object, 0, SWAPBLK_NONE); 1227 1228 if (curproc != pageproc) 1229 sync = TRUE; 1230 1231 /* 1232 * Step 2 1233 * 1234 * Update nsw parameters from swap_async_max sysctl values. 1235 * Do not let the sysop crash the machine with bogus numbers. 1236 */ 1237 mtx_lock(&pbuf_mtx); 1238 if (swap_async_max != nsw_wcount_async_max) { 1239 int n; 1240 int s; 1241 1242 /* 1243 * limit range 1244 */ 1245 if ((n = swap_async_max) > nswbuf / 2) 1246 n = nswbuf / 2; 1247 if (n < 1) 1248 n = 1; 1249 swap_async_max = n; 1250 1251 /* 1252 * Adjust difference ( if possible ). If the current async 1253 * count is too low, we may not be able to make the adjustment 1254 * at this time. 1255 */ 1256 s = splvm(); 1257 n -= nsw_wcount_async_max; 1258 if (nsw_wcount_async + n >= 0) { 1259 nsw_wcount_async += n; 1260 nsw_wcount_async_max += n; 1261 wakeup(&nsw_wcount_async); 1262 } 1263 splx(s); 1264 } 1265 mtx_unlock(&pbuf_mtx); 1266 1267 /* 1268 * Step 3 1269 * 1270 * Assign swap blocks and issue I/O. We reallocate swap on the fly. 1271 * The page is left dirty until the pageout operation completes 1272 * successfully. 1273 */ 1274 for (i = 0; i < count; i += n) { 1275 int s; 1276 int j; 1277 struct buf *bp; 1278 daddr_t blk; 1279 1280 /* 1281 * Maximum I/O size is limited by a number of factors. 1282 */ 1283 n = min(BLIST_MAX_ALLOC, count - i); 1284 n = min(n, nsw_cluster_max); 1285 1286 s = splvm(); 1287 1288 /* 1289 * Get biggest block of swap we can. If we fail, fall 1290 * back and try to allocate a smaller block. Don't go 1291 * overboard trying to allocate space if it would overly 1292 * fragment swap. 1293 */ 1294 while ( 1295 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE && 1296 n > 4 1297 ) { 1298 n >>= 1; 1299 } 1300 if (blk == SWAPBLK_NONE) { 1301 for (j = 0; j < n; ++j) 1302 rtvals[i+j] = VM_PAGER_FAIL; 1303 splx(s); 1304 continue; 1305 } 1306 1307 /* 1308 * The I/O we are constructing cannot cross a physical 1309 * disk boundry in the swap stripe. Note: we are still 1310 * at splvm(). 1311 */ 1312 if ((blk ^ (blk + n)) & dmmax_mask) { 1313 j = ((blk + dmmax) & dmmax_mask) - blk; 1314 swp_pager_freeswapspace(blk + j, n - j); 1315 n = j; 1316 } 1317 1318 /* 1319 * All I/O parameters have been satisfied, build the I/O 1320 * request and assign the swap space. 1321 * 1322 * NOTE: B_PAGING is set by pbgetvp() 1323 */ 1324 if (sync == TRUE) { 1325 bp = getpbuf(&nsw_wcount_sync); 1326 } else { 1327 bp = getpbuf(&nsw_wcount_async); 1328 bp->b_flags = B_ASYNC; 1329 } 1330 bp->b_iocmd = BIO_WRITE; 1331 bp->b_spc = NULL; /* not used, but NULL-out anyway */ 1332 1333 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n); 1334 1335 bp->b_rcred = crhold(thread0.td_ucred); 1336 bp->b_wcred = crhold(thread0.td_ucred); 1337 bp->b_bcount = PAGE_SIZE * n; 1338 bp->b_bufsize = PAGE_SIZE * n; 1339 bp->b_blkno = blk; 1340 1341 pbgetvp(swapdev_vp, bp); 1342 1343 for (j = 0; j < n; ++j) { 1344 vm_page_t mreq = m[i+j]; 1345 1346 swp_pager_meta_build( 1347 mreq->object, 1348 mreq->pindex, 1349 blk + j 1350 ); 1351 vm_page_dirty(mreq); 1352 rtvals[i+j] = VM_PAGER_OK; 1353 1354 vm_page_lock_queues(); 1355 vm_page_flag_set(mreq, PG_SWAPINPROG); 1356 vm_page_unlock_queues(); 1357 bp->b_pages[j] = mreq; 1358 } 1359 bp->b_npages = n; 1360 /* 1361 * Must set dirty range for NFS to work. 1362 */ 1363 bp->b_dirtyoff = 0; 1364 bp->b_dirtyend = bp->b_bcount; 1365 1366 cnt.v_swapout++; 1367 cnt.v_swappgsout += bp->b_npages; 1368 VI_LOCK(swapdev_vp); 1369 swapdev_vp->v_numoutput++; 1370 VI_UNLOCK(swapdev_vp); 1371 1372 splx(s); 1373 1374 /* 1375 * asynchronous 1376 * 1377 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 1378 */ 1379 if (sync == FALSE) { 1380 bp->b_iodone = swp_pager_async_iodone; 1381 BUF_KERNPROC(bp); 1382 VOP_STRATEGY(bp->b_vp, bp); 1383 1384 for (j = 0; j < n; ++j) 1385 rtvals[i+j] = VM_PAGER_PEND; 1386 /* restart outter loop */ 1387 continue; 1388 } 1389 1390 /* 1391 * synchronous 1392 * 1393 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 1394 */ 1395 bp->b_iodone = swp_pager_sync_iodone; 1396 VOP_STRATEGY(bp->b_vp, bp); 1397 1398 /* 1399 * Wait for the sync I/O to complete, then update rtvals. 1400 * We just set the rtvals[] to VM_PAGER_PEND so we can call 1401 * our async completion routine at the end, thus avoiding a 1402 * double-free. 1403 */ 1404 s = splbio(); 1405 while ((bp->b_flags & B_DONE) == 0) { 1406 tsleep(bp, PVM, "swwrt", 0); 1407 } 1408 for (j = 0; j < n; ++j) 1409 rtvals[i+j] = VM_PAGER_PEND; 1410 /* 1411 * Now that we are through with the bp, we can call the 1412 * normal async completion, which frees everything up. 1413 */ 1414 swp_pager_async_iodone(bp); 1415 splx(s); 1416 } 1417 } 1418 1419 /* 1420 * swap_pager_sync_iodone: 1421 * 1422 * Completion routine for synchronous reads and writes from/to swap. 1423 * We just mark the bp is complete and wake up anyone waiting on it. 1424 * 1425 * This routine may not block. This routine is called at splbio() or better. 1426 */ 1427 static void 1428 swp_pager_sync_iodone(bp) 1429 struct buf *bp; 1430 { 1431 bp->b_flags |= B_DONE; 1432 bp->b_flags &= ~B_ASYNC; 1433 wakeup(bp); 1434 } 1435 1436 /* 1437 * swp_pager_async_iodone: 1438 * 1439 * Completion routine for asynchronous reads and writes from/to swap. 1440 * Also called manually by synchronous code to finish up a bp. 1441 * 1442 * For READ operations, the pages are PG_BUSY'd. For WRITE operations, 1443 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY 1444 * unbusy all pages except the 'main' request page. For WRITE 1445 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 1446 * because we marked them all VM_PAGER_PEND on return from putpages ). 1447 * 1448 * This routine may not block. 1449 * This routine is called at splbio() or better 1450 * 1451 * We up ourselves to splvm() as required for various vm_page related 1452 * calls. 1453 */ 1454 static void 1455 swp_pager_async_iodone(bp) 1456 struct buf *bp; 1457 { 1458 int s; 1459 int i; 1460 vm_object_t object = NULL; 1461 1462 GIANT_REQUIRED; 1463 bp->b_flags |= B_DONE; 1464 1465 /* 1466 * report error 1467 */ 1468 if (bp->b_ioflags & BIO_ERROR) { 1469 printf( 1470 "swap_pager: I/O error - %s failed; blkno %ld," 1471 "size %ld, error %d\n", 1472 ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"), 1473 (long)bp->b_blkno, 1474 (long)bp->b_bcount, 1475 bp->b_error 1476 ); 1477 } 1478 1479 /* 1480 * set object, raise to splvm(). 1481 */ 1482 if (bp->b_npages) 1483 object = bp->b_pages[0]->object; 1484 s = splvm(); 1485 1486 /* 1487 * remove the mapping for kernel virtual 1488 */ 1489 pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); 1490 1491 vm_page_lock_queues(); 1492 /* 1493 * cleanup pages. If an error occurs writing to swap, we are in 1494 * very serious trouble. If it happens to be a disk error, though, 1495 * we may be able to recover by reassigning the swap later on. So 1496 * in this case we remove the m->swapblk assignment for the page 1497 * but do not free it in the rlist. The errornous block(s) are thus 1498 * never reallocated as swap. Redirty the page and continue. 1499 */ 1500 for (i = 0; i < bp->b_npages; ++i) { 1501 vm_page_t m = bp->b_pages[i]; 1502 1503 vm_page_flag_clear(m, PG_SWAPINPROG); 1504 1505 if (bp->b_ioflags & BIO_ERROR) { 1506 /* 1507 * If an error occurs I'd love to throw the swapblk 1508 * away without freeing it back to swapspace, so it 1509 * can never be used again. But I can't from an 1510 * interrupt. 1511 */ 1512 if (bp->b_iocmd == BIO_READ) { 1513 /* 1514 * When reading, reqpage needs to stay 1515 * locked for the parent, but all other 1516 * pages can be freed. We still want to 1517 * wakeup the parent waiting on the page, 1518 * though. ( also: pg_reqpage can be -1 and 1519 * not match anything ). 1520 * 1521 * We have to wake specifically requested pages 1522 * up too because we cleared PG_SWAPINPROG and 1523 * someone may be waiting for that. 1524 * 1525 * NOTE: for reads, m->dirty will probably 1526 * be overridden by the original caller of 1527 * getpages so don't play cute tricks here. 1528 * 1529 * XXX IT IS NOT LEGAL TO FREE THE PAGE HERE 1530 * AS THIS MESSES WITH object->memq, and it is 1531 * not legal to mess with object->memq from an 1532 * interrupt. 1533 */ 1534 m->valid = 0; 1535 vm_page_flag_clear(m, PG_ZERO); 1536 if (i != bp->b_pager.pg_reqpage) 1537 vm_page_free(m); 1538 else 1539 vm_page_flash(m); 1540 /* 1541 * If i == bp->b_pager.pg_reqpage, do not wake 1542 * the page up. The caller needs to. 1543 */ 1544 } else { 1545 /* 1546 * If a write error occurs, reactivate page 1547 * so it doesn't clog the inactive list, 1548 * then finish the I/O. 1549 */ 1550 vm_page_dirty(m); 1551 vm_page_activate(m); 1552 vm_page_io_finish(m); 1553 } 1554 } else if (bp->b_iocmd == BIO_READ) { 1555 /* 1556 * For read success, clear dirty bits. Nobody should 1557 * have this page mapped but don't take any chances, 1558 * make sure the pmap modify bits are also cleared. 1559 * 1560 * NOTE: for reads, m->dirty will probably be 1561 * overridden by the original caller of getpages so 1562 * we cannot set them in order to free the underlying 1563 * swap in a low-swap situation. I don't think we'd 1564 * want to do that anyway, but it was an optimization 1565 * that existed in the old swapper for a time before 1566 * it got ripped out due to precisely this problem. 1567 * 1568 * clear PG_ZERO in page. 1569 * 1570 * If not the requested page then deactivate it. 1571 * 1572 * Note that the requested page, reqpage, is left 1573 * busied, but we still have to wake it up. The 1574 * other pages are released (unbusied) by 1575 * vm_page_wakeup(). We do not set reqpage's 1576 * valid bits here, it is up to the caller. 1577 */ 1578 pmap_clear_modify(m); 1579 m->valid = VM_PAGE_BITS_ALL; 1580 vm_page_undirty(m); 1581 vm_page_flag_clear(m, PG_ZERO); 1582 1583 /* 1584 * We have to wake specifically requested pages 1585 * up too because we cleared PG_SWAPINPROG and 1586 * could be waiting for it in getpages. However, 1587 * be sure to not unbusy getpages specifically 1588 * requested page - getpages expects it to be 1589 * left busy. 1590 */ 1591 if (i != bp->b_pager.pg_reqpage) { 1592 vm_page_deactivate(m); 1593 vm_page_wakeup(m); 1594 } else { 1595 vm_page_flash(m); 1596 } 1597 } else { 1598 /* 1599 * For write success, clear the modify and dirty 1600 * status, then finish the I/O ( which decrements the 1601 * busy count and possibly wakes waiter's up ). 1602 */ 1603 pmap_clear_modify(m); 1604 vm_page_undirty(m); 1605 vm_page_io_finish(m); 1606 if (!vm_page_count_severe() || !vm_page_try_to_cache(m)) 1607 pmap_page_protect(m, VM_PROT_READ); 1608 } 1609 } 1610 vm_page_unlock_queues(); 1611 1612 /* 1613 * adjust pip. NOTE: the original parent may still have its own 1614 * pip refs on the object. 1615 */ 1616 if (object != NULL) { 1617 VM_OBJECT_LOCK(object); 1618 vm_object_pip_wakeupn(object, bp->b_npages); 1619 VM_OBJECT_UNLOCK(object); 1620 } 1621 1622 /* 1623 * release the physical I/O buffer 1624 */ 1625 relpbuf( 1626 bp, 1627 ((bp->b_iocmd == BIO_READ) ? &nsw_rcount : 1628 ((bp->b_flags & B_ASYNC) ? 1629 &nsw_wcount_async : 1630 &nsw_wcount_sync 1631 ) 1632 ) 1633 ); 1634 splx(s); 1635 } 1636 1637 /* 1638 * swap_pager_isswapped: 1639 * 1640 * Return 1 if at least one page in the given object is paged 1641 * out to the given swap device. 1642 * 1643 * This routine may not block. 1644 */ 1645 int swap_pager_isswapped(vm_object_t object, int devidx) { 1646 daddr_t index = 0; 1647 int bcount; 1648 int i; 1649 1650 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1651 for (bcount = 0; bcount < object->un_pager.swp.swp_bcount; bcount++) { 1652 struct swblock *swap; 1653 1654 if ((swap = *swp_pager_hash(object, index)) != NULL) { 1655 for (i = 0; i < SWAP_META_PAGES; ++i) { 1656 daddr_t v = swap->swb_pages[i]; 1657 if (v != SWAPBLK_NONE && 1658 BLK2DEVIDX(v) == devidx) 1659 return 1; 1660 } 1661 } 1662 1663 index += SWAP_META_PAGES; 1664 if (index > 0x20000000) 1665 panic("swap_pager_isswapped: failed to locate all swap meta blocks"); 1666 } 1667 return 0; 1668 } 1669 1670 /* 1671 * SWP_PAGER_FORCE_PAGEIN() - force a swap block to be paged in 1672 * 1673 * This routine dissociates the page at the given index within a 1674 * swap block from its backing store, paging it in if necessary. 1675 * If the page is paged in, it is placed in the inactive queue, 1676 * since it had its backing store ripped out from under it. 1677 * We also attempt to swap in all other pages in the swap block, 1678 * we only guarantee that the one at the specified index is 1679 * paged in. 1680 * 1681 * XXX - The code to page the whole block in doesn't work, so we 1682 * revert to the one-by-one behavior for now. Sigh. 1683 */ 1684 static __inline void 1685 swp_pager_force_pagein(struct swblock *swap, int idx) 1686 { 1687 vm_object_t object; 1688 vm_page_t m; 1689 vm_pindex_t pindex; 1690 1691 object = swap->swb_object; 1692 pindex = swap->swb_index; 1693 1694 VM_OBJECT_LOCK(object); 1695 vm_object_pip_add(object, 1); 1696 VM_OBJECT_UNLOCK(object); 1697 m = vm_page_grab(object, pindex + idx, VM_ALLOC_NORMAL|VM_ALLOC_RETRY); 1698 if (m->valid == VM_PAGE_BITS_ALL) { 1699 VM_OBJECT_LOCK(object); 1700 vm_object_pip_subtract(object, 1); 1701 VM_OBJECT_UNLOCK(object); 1702 vm_page_lock_queues(); 1703 vm_page_activate(m); 1704 vm_page_dirty(m); 1705 vm_page_wakeup(m); 1706 vm_page_unlock_queues(); 1707 vm_pager_page_unswapped(m); 1708 return; 1709 } 1710 1711 if (swap_pager_getpages(object, &m, 1, 0) != 1712 VM_PAGER_OK) 1713 panic("swap_pager_force_pagein: read from swap failed");/*XXX*/ 1714 VM_OBJECT_LOCK(object); 1715 vm_object_pip_subtract(object, 1); 1716 VM_OBJECT_UNLOCK(object); 1717 1718 vm_page_lock_queues(); 1719 vm_page_dirty(m); 1720 vm_page_dontneed(m); 1721 vm_page_wakeup(m); 1722 vm_page_unlock_queues(); 1723 vm_pager_page_unswapped(m); 1724 } 1725 1726 1727 /* 1728 * swap_pager_swapoff: 1729 * 1730 * Page in all of the pages that have been paged out to the 1731 * given device. The corresponding blocks in the bitmap must be 1732 * marked as allocated and the device must be flagged SW_CLOSING. 1733 * There may be no processes swapped out to the device. 1734 * 1735 * The sw_used parameter points to the field in the swdev structure 1736 * that contains a count of the number of blocks still allocated 1737 * on the device. If we encounter objects with a nonzero pip count 1738 * in our scan, we use this number to determine if we're really done. 1739 * 1740 * This routine may block. 1741 */ 1742 void 1743 swap_pager_swapoff(int devidx, int *sw_used) 1744 { 1745 struct swblock **pswap; 1746 struct swblock *swap; 1747 vm_object_t waitobj; 1748 daddr_t v; 1749 int i, j; 1750 1751 GIANT_REQUIRED; 1752 1753 full_rescan: 1754 waitobj = NULL; 1755 for (i = 0; i <= swhash_mask; i++) { /* '<=' is correct here */ 1756 restart: 1757 pswap = &swhash[i]; 1758 while ((swap = *pswap) != NULL) { 1759 for (j = 0; j < SWAP_META_PAGES; ++j) { 1760 v = swap->swb_pages[j]; 1761 if (v != SWAPBLK_NONE && 1762 BLK2DEVIDX(v) == devidx) 1763 break; 1764 } 1765 if (j < SWAP_META_PAGES) { 1766 swp_pager_force_pagein(swap, j); 1767 goto restart; 1768 } else if (swap->swb_object->paging_in_progress) { 1769 if (!waitobj) 1770 waitobj = swap->swb_object; 1771 } 1772 pswap = &swap->swb_hnext; 1773 } 1774 } 1775 if (waitobj && *sw_used) { 1776 /* 1777 * We wait on an arbitrary object to clock our rescans 1778 * to the rate of paging completion. 1779 */ 1780 VM_OBJECT_LOCK(waitobj); 1781 vm_object_pip_wait(waitobj, "swpoff"); 1782 VM_OBJECT_UNLOCK(waitobj); 1783 goto full_rescan; 1784 } 1785 if (*sw_used) 1786 panic("swapoff: failed to locate %d swap blocks", *sw_used); 1787 } 1788 1789 /************************************************************************ 1790 * SWAP META DATA * 1791 ************************************************************************ 1792 * 1793 * These routines manipulate the swap metadata stored in the 1794 * OBJT_SWAP object. All swp_*() routines must be called at 1795 * splvm() because swap can be freed up by the low level vm_page 1796 * code which might be called from interrupts beyond what splbio() covers. 1797 * 1798 * Swap metadata is implemented with a global hash and not directly 1799 * linked into the object. Instead the object simply contains 1800 * appropriate tracking counters. 1801 */ 1802 1803 /* 1804 * SWP_PAGER_HASH() - hash swap meta data 1805 * 1806 * This is an inline helper function which hashes the swapblk given 1807 * the object and page index. It returns a pointer to a pointer 1808 * to the object, or a pointer to a NULL pointer if it could not 1809 * find a swapblk. 1810 * 1811 * This routine must be called at splvm(). 1812 */ 1813 static __inline struct swblock ** 1814 swp_pager_hash(vm_object_t object, vm_pindex_t index) 1815 { 1816 struct swblock **pswap; 1817 struct swblock *swap; 1818 1819 index &= ~(vm_pindex_t)SWAP_META_MASK; 1820 pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask]; 1821 while ((swap = *pswap) != NULL) { 1822 if (swap->swb_object == object && 1823 swap->swb_index == index 1824 ) { 1825 break; 1826 } 1827 pswap = &swap->swb_hnext; 1828 } 1829 return (pswap); 1830 } 1831 1832 /* 1833 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 1834 * 1835 * We first convert the object to a swap object if it is a default 1836 * object. 1837 * 1838 * The specified swapblk is added to the object's swap metadata. If 1839 * the swapblk is not valid, it is freed instead. Any previously 1840 * assigned swapblk is freed. 1841 * 1842 * This routine must be called at splvm(), except when used to convert 1843 * an OBJT_DEFAULT object into an OBJT_SWAP object. 1844 */ 1845 static void 1846 swp_pager_meta_build( 1847 vm_object_t object, 1848 vm_pindex_t pindex, 1849 daddr_t swapblk 1850 ) { 1851 struct swblock *swap; 1852 struct swblock **pswap; 1853 int idx; 1854 1855 GIANT_REQUIRED; 1856 /* 1857 * Convert default object to swap object if necessary 1858 */ 1859 if (object->type != OBJT_SWAP) { 1860 object->type = OBJT_SWAP; 1861 object->un_pager.swp.swp_bcount = 0; 1862 1863 mtx_lock(&sw_alloc_mtx); 1864 if (object->handle != NULL) { 1865 TAILQ_INSERT_TAIL( 1866 NOBJLIST(object->handle), 1867 object, 1868 pager_object_list 1869 ); 1870 } else { 1871 TAILQ_INSERT_TAIL( 1872 &swap_pager_un_object_list, 1873 object, 1874 pager_object_list 1875 ); 1876 } 1877 mtx_unlock(&sw_alloc_mtx); 1878 } 1879 1880 /* 1881 * Locate hash entry. If not found create, but if we aren't adding 1882 * anything just return. If we run out of space in the map we wait 1883 * and, since the hash table may have changed, retry. 1884 */ 1885 retry: 1886 pswap = swp_pager_hash(object, pindex); 1887 1888 if ((swap = *pswap) == NULL) { 1889 int i; 1890 1891 if (swapblk == SWAPBLK_NONE) 1892 return; 1893 1894 swap = *pswap = uma_zalloc(swap_zone, M_NOWAIT); 1895 if (swap == NULL) { 1896 VM_WAIT; 1897 goto retry; 1898 } 1899 1900 swap->swb_hnext = NULL; 1901 swap->swb_object = object; 1902 swap->swb_index = pindex & ~(vm_pindex_t)SWAP_META_MASK; 1903 swap->swb_count = 0; 1904 1905 ++object->un_pager.swp.swp_bcount; 1906 1907 for (i = 0; i < SWAP_META_PAGES; ++i) 1908 swap->swb_pages[i] = SWAPBLK_NONE; 1909 } 1910 1911 /* 1912 * Delete prior contents of metadata 1913 */ 1914 idx = pindex & SWAP_META_MASK; 1915 1916 if (swap->swb_pages[idx] != SWAPBLK_NONE) { 1917 swp_pager_freeswapspace(swap->swb_pages[idx], 1); 1918 --swap->swb_count; 1919 } 1920 1921 /* 1922 * Enter block into metadata 1923 */ 1924 swap->swb_pages[idx] = swapblk; 1925 if (swapblk != SWAPBLK_NONE) 1926 ++swap->swb_count; 1927 } 1928 1929 /* 1930 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 1931 * 1932 * The requested range of blocks is freed, with any associated swap 1933 * returned to the swap bitmap. 1934 * 1935 * This routine will free swap metadata structures as they are cleaned 1936 * out. This routine does *NOT* operate on swap metadata associated 1937 * with resident pages. 1938 * 1939 * This routine must be called at splvm() 1940 */ 1941 static void 1942 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count) 1943 { 1944 GIANT_REQUIRED; 1945 1946 if (object->type != OBJT_SWAP) 1947 return; 1948 1949 while (count > 0) { 1950 struct swblock **pswap; 1951 struct swblock *swap; 1952 1953 pswap = swp_pager_hash(object, index); 1954 1955 if ((swap = *pswap) != NULL) { 1956 daddr_t v = swap->swb_pages[index & SWAP_META_MASK]; 1957 1958 if (v != SWAPBLK_NONE) { 1959 swp_pager_freeswapspace(v, 1); 1960 swap->swb_pages[index & SWAP_META_MASK] = 1961 SWAPBLK_NONE; 1962 if (--swap->swb_count == 0) { 1963 *pswap = swap->swb_hnext; 1964 uma_zfree(swap_zone, swap); 1965 --object->un_pager.swp.swp_bcount; 1966 } 1967 } 1968 --count; 1969 ++index; 1970 } else { 1971 int n = SWAP_META_PAGES - (index & SWAP_META_MASK); 1972 count -= n; 1973 index += n; 1974 } 1975 } 1976 } 1977 1978 /* 1979 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 1980 * 1981 * This routine locates and destroys all swap metadata associated with 1982 * an object. 1983 * 1984 * This routine must be called at splvm() 1985 */ 1986 static void 1987 swp_pager_meta_free_all(vm_object_t object) 1988 { 1989 daddr_t index = 0; 1990 1991 GIANT_REQUIRED; 1992 1993 if (object->type != OBJT_SWAP) 1994 return; 1995 1996 while (object->un_pager.swp.swp_bcount) { 1997 struct swblock **pswap; 1998 struct swblock *swap; 1999 2000 pswap = swp_pager_hash(object, index); 2001 if ((swap = *pswap) != NULL) { 2002 int i; 2003 2004 for (i = 0; i < SWAP_META_PAGES; ++i) { 2005 daddr_t v = swap->swb_pages[i]; 2006 if (v != SWAPBLK_NONE) { 2007 --swap->swb_count; 2008 swp_pager_freeswapspace(v, 1); 2009 } 2010 } 2011 if (swap->swb_count != 0) 2012 panic("swap_pager_meta_free_all: swb_count != 0"); 2013 *pswap = swap->swb_hnext; 2014 uma_zfree(swap_zone, swap); 2015 --object->un_pager.swp.swp_bcount; 2016 } 2017 index += SWAP_META_PAGES; 2018 if (index > 0x20000000) 2019 panic("swp_pager_meta_free_all: failed to locate all swap meta blocks"); 2020 } 2021 } 2022 2023 /* 2024 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 2025 * 2026 * This routine is capable of looking up, popping, or freeing 2027 * swapblk assignments in the swap meta data or in the vm_page_t. 2028 * The routine typically returns the swapblk being looked-up, or popped, 2029 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 2030 * was invalid. This routine will automatically free any invalid 2031 * meta-data swapblks. 2032 * 2033 * It is not possible to store invalid swapblks in the swap meta data 2034 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 2035 * 2036 * When acting on a busy resident page and paging is in progress, we 2037 * have to wait until paging is complete but otherwise can act on the 2038 * busy page. 2039 * 2040 * This routine must be called at splvm(). 2041 * 2042 * SWM_FREE remove and free swap block from metadata 2043 * SWM_POP remove from meta data but do not free.. pop it out 2044 */ 2045 static daddr_t 2046 swp_pager_meta_ctl( 2047 vm_object_t object, 2048 vm_pindex_t pindex, 2049 int flags 2050 ) { 2051 struct swblock **pswap; 2052 struct swblock *swap; 2053 daddr_t r1; 2054 int idx; 2055 2056 GIANT_REQUIRED; 2057 /* 2058 * The meta data only exists of the object is OBJT_SWAP 2059 * and even then might not be allocated yet. 2060 */ 2061 if (object->type != OBJT_SWAP) 2062 return (SWAPBLK_NONE); 2063 2064 r1 = SWAPBLK_NONE; 2065 pswap = swp_pager_hash(object, pindex); 2066 2067 if ((swap = *pswap) != NULL) { 2068 idx = pindex & SWAP_META_MASK; 2069 r1 = swap->swb_pages[idx]; 2070 2071 if (r1 != SWAPBLK_NONE) { 2072 if (flags & SWM_FREE) { 2073 swp_pager_freeswapspace(r1, 1); 2074 r1 = SWAPBLK_NONE; 2075 } 2076 if (flags & (SWM_FREE|SWM_POP)) { 2077 swap->swb_pages[idx] = SWAPBLK_NONE; 2078 if (--swap->swb_count == 0) { 2079 *pswap = swap->swb_hnext; 2080 uma_zfree(swap_zone, swap); 2081 --object->un_pager.swp.swp_bcount; 2082 } 2083 } 2084 } 2085 } 2086 return (r1); 2087 } 2088 2089 /******************************************************** 2090 * CHAINING FUNCTIONS * 2091 ******************************************************** 2092 * 2093 * These functions support recursion of I/O operations 2094 * on bp's, typically by chaining one or more 'child' bp's 2095 * to the parent. Synchronous, asynchronous, and semi-synchronous 2096 * chaining is possible. 2097 */ 2098 2099 /* 2100 * vm_pager_chain_iodone: 2101 * 2102 * io completion routine for child bp. Currently we fudge a bit 2103 * on dealing with b_resid. Since users of these routines may issue 2104 * multiple children simultaneously, sequencing of the error can be lost. 2105 */ 2106 static void 2107 vm_pager_chain_iodone(struct buf *nbp) 2108 { 2109 struct bio *bp; 2110 u_int *count; 2111 2112 bp = nbp->b_caller1; 2113 count = (u_int *)&(bp->bio_driver1); 2114 if (bp != NULL) { 2115 if (nbp->b_ioflags & BIO_ERROR) { 2116 bp->bio_flags |= BIO_ERROR; 2117 bp->bio_error = nbp->b_error; 2118 } else if (nbp->b_resid != 0) { 2119 bp->bio_flags |= BIO_ERROR; 2120 bp->bio_error = EINVAL; 2121 } else { 2122 bp->bio_resid -= nbp->b_bcount; 2123 } 2124 nbp->b_caller1 = NULL; 2125 --(*count); 2126 if (bp->bio_flags & BIO_FLAG1) { 2127 bp->bio_flags &= ~BIO_FLAG1; 2128 wakeup(bp); 2129 } 2130 } 2131 nbp->b_flags |= B_DONE; 2132 nbp->b_flags &= ~B_ASYNC; 2133 relpbuf(nbp, NULL); 2134 } 2135 2136 /* 2137 * getchainbuf: 2138 * 2139 * Obtain a physical buffer and chain it to its parent buffer. When 2140 * I/O completes, the parent buffer will be B_SIGNAL'd. Errors are 2141 * automatically propagated to the parent 2142 */ 2143 static struct buf * 2144 getchainbuf(struct bio *bp, struct vnode *vp, int flags) 2145 { 2146 struct buf *nbp; 2147 u_int *count; 2148 2149 GIANT_REQUIRED; 2150 nbp = getpbuf(NULL); 2151 count = (u_int *)&(bp->bio_driver1); 2152 2153 nbp->b_caller1 = bp; 2154 ++(*count); 2155 2156 if (*count > 4) 2157 waitchainbuf(bp, 4, 0); 2158 2159 nbp->b_iocmd = bp->bio_cmd; 2160 nbp->b_ioflags = 0; 2161 nbp->b_flags = flags; 2162 nbp->b_rcred = crhold(thread0.td_ucred); 2163 nbp->b_wcred = crhold(thread0.td_ucred); 2164 nbp->b_iodone = vm_pager_chain_iodone; 2165 2166 if (vp) 2167 pbgetvp(vp, nbp); 2168 return (nbp); 2169 } 2170 2171 static void 2172 flushchainbuf(struct buf *nbp) 2173 { 2174 GIANT_REQUIRED; 2175 if (nbp->b_bcount) { 2176 nbp->b_bufsize = nbp->b_bcount; 2177 if (nbp->b_iocmd == BIO_WRITE) 2178 nbp->b_dirtyend = nbp->b_bcount; 2179 BUF_KERNPROC(nbp); 2180 VOP_STRATEGY(nbp->b_vp, nbp); 2181 } else { 2182 bufdone(nbp); 2183 } 2184 } 2185 2186 static void 2187 waitchainbuf(struct bio *bp, int limit, int done) 2188 { 2189 int s; 2190 u_int *count; 2191 2192 GIANT_REQUIRED; 2193 count = (u_int *)&(bp->bio_driver1); 2194 s = splbio(); 2195 while (*count > limit) { 2196 bp->bio_flags |= BIO_FLAG1; 2197 tsleep(bp, PRIBIO + 4, "bpchain", 0); 2198 } 2199 if (done) { 2200 if (bp->bio_resid != 0 && !(bp->bio_flags & BIO_ERROR)) { 2201 bp->bio_flags |= BIO_ERROR; 2202 bp->bio_error = EINVAL; 2203 } 2204 biodone(bp); 2205 } 2206 splx(s); 2207 } 2208 2209