1 /* 2 * Copyright (c) 1994 John S. Dyson 3 * Copyright (c) 1990 University of Utah. 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 40 * 41 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 42 * $Id: swap_pager.c,v 1.8 1994/08/29 06:23:18 davidg Exp $ 43 */ 44 45 /* 46 * Quick hack to page to dedicated partition(s). 47 * TODO: 48 * Add multiprocessor locks 49 * Deal with async writes in a better fashion 50 */ 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/proc.h> 55 #include <sys/buf.h> 56 #include <sys/vnode.h> 57 #include <sys/malloc.h> 58 59 #include <miscfs/specfs/specdev.h> 60 #include <sys/rlist.h> 61 62 #include <vm/vm.h> 63 #include <vm/vm_pager.h> 64 #include <vm/vm_page.h> 65 #include <vm/vm_pageout.h> 66 #include <vm/swap_pager.h> 67 68 #ifndef NPENDINGIO 69 #define NPENDINGIO 16 70 #endif 71 72 int nswiodone; 73 extern int vm_pageout_rate_limit; 74 static int cleandone; 75 extern int hz; 76 int swap_pager_full; 77 extern vm_map_t pager_map; 78 extern int vm_swap_size; 79 80 #define MAX_PAGEOUT_CLUSTER 8 81 82 TAILQ_HEAD(swpclean, swpagerclean); 83 84 typedef struct swpagerclean *swp_clean_t; 85 86 struct swpagerclean { 87 TAILQ_ENTRY(swpagerclean) spc_list; 88 int spc_flags; 89 struct buf *spc_bp; 90 sw_pager_t spc_swp; 91 vm_offset_t spc_kva; 92 int spc_count; 93 vm_page_t spc_m[MAX_PAGEOUT_CLUSTER]; 94 } swcleanlist [NPENDINGIO] ; 95 96 97 extern vm_map_t kernel_map; 98 99 /* spc_flags values */ 100 #define SPC_ERROR 0x01 101 102 #define SWB_EMPTY (-1) 103 104 struct swpclean swap_pager_done; /* list of compileted page cleans */ 105 struct swpclean swap_pager_inuse; /* list of pending page cleans */ 106 struct swpclean swap_pager_free; /* list of free pager clean structs */ 107 struct pagerlst swap_pager_list; /* list of "named" anon regions */ 108 struct pagerlst swap_pager_un_list; /* list of "unnamed" anon pagers */ 109 110 #define SWAP_FREE_NEEDED 0x1 /* need a swap block */ 111 int swap_pager_needflags; 112 struct rlist *swapfrag; 113 114 struct pagerlst *swp_qs[]={ 115 &swap_pager_list, &swap_pager_un_list, (struct pagerlst *) 0 116 }; 117 118 int swap_pager_putmulti(); 119 120 struct pagerops swappagerops = { 121 swap_pager_init, 122 swap_pager_alloc, 123 swap_pager_dealloc, 124 swap_pager_getpage, 125 swap_pager_getmulti, 126 swap_pager_putpage, 127 swap_pager_putmulti, 128 swap_pager_haspage 129 }; 130 131 int npendingio = NPENDINGIO; 132 int pendingiowait; 133 int require_swap_init; 134 void swap_pager_finish(); 135 int dmmin, dmmax; 136 extern int vm_page_count; 137 138 struct buf * getpbuf() ; 139 void relpbuf(struct buf *bp) ; 140 141 static inline void swapsizecheck() { 142 if( vm_swap_size < 128*btodb(PAGE_SIZE)) { 143 if( swap_pager_full) 144 printf("swap_pager: out of space\n"); 145 swap_pager_full = 1; 146 } else if( vm_swap_size > 192*btodb(PAGE_SIZE)) 147 swap_pager_full = 0; 148 } 149 150 void 151 swap_pager_init() 152 { 153 dfltpagerops = &swappagerops; 154 155 TAILQ_INIT(&swap_pager_list); 156 TAILQ_INIT(&swap_pager_un_list); 157 158 /* 159 * Initialize clean lists 160 */ 161 TAILQ_INIT(&swap_pager_inuse); 162 TAILQ_INIT(&swap_pager_done); 163 TAILQ_INIT(&swap_pager_free); 164 165 require_swap_init = 1; 166 167 /* 168 * Calculate the swap allocation constants. 169 */ 170 171 dmmin = CLBYTES/DEV_BSIZE; 172 dmmax = btodb(SWB_NPAGES*PAGE_SIZE)*2; 173 174 } 175 176 /* 177 * Allocate a pager structure and associated resources. 178 * Note that if we are called from the pageout daemon (handle == NULL) 179 * we should not wait for memory as it could resulting in deadlock. 180 */ 181 vm_pager_t 182 swap_pager_alloc(handle, size, prot, offset) 183 caddr_t handle; 184 register vm_size_t size; 185 vm_prot_t prot; 186 vm_offset_t offset; 187 { 188 register vm_pager_t pager; 189 register sw_pager_t swp; 190 int waitok; 191 int i,j; 192 193 if (require_swap_init) { 194 swp_clean_t spc; 195 struct buf *bp; 196 /* 197 * kva's are allocated here so that we dont need to keep 198 * doing kmem_alloc pageables at runtime 199 */ 200 for (i = 0, spc = swcleanlist; i < npendingio ; i++, spc++) { 201 spc->spc_kva = kmem_alloc_pageable(pager_map, PAGE_SIZE*MAX_PAGEOUT_CLUSTER); 202 if (!spc->spc_kva) { 203 break; 204 } 205 spc->spc_bp = malloc( sizeof( *bp), M_TEMP, M_NOWAIT); 206 if (!spc->spc_bp) { 207 kmem_free_wakeup(pager_map, spc->spc_kva, PAGE_SIZE); 208 break; 209 } 210 spc->spc_flags = 0; 211 TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list); 212 } 213 require_swap_init = 0; 214 if( size == 0) 215 return(NULL); 216 } 217 218 /* 219 * If this is a "named" anonymous region, look it up and 220 * return the appropriate pager if it exists. 221 */ 222 if (handle) { 223 pager = vm_pager_lookup(&swap_pager_list, handle); 224 if (pager != NULL) { 225 /* 226 * Use vm_object_lookup to gain a reference 227 * to the object and also to remove from the 228 * object cache. 229 */ 230 if (vm_object_lookup(pager) == NULL) 231 panic("swap_pager_alloc: bad object"); 232 return(pager); 233 } 234 } 235 236 if (swap_pager_full) { 237 return(NULL); 238 } 239 240 /* 241 * Pager doesn't exist, allocate swap management resources 242 * and initialize. 243 */ 244 waitok = handle ? M_WAITOK : M_NOWAIT; 245 pager = (vm_pager_t)malloc(sizeof *pager, M_VMPAGER, waitok); 246 if (pager == NULL) 247 return(NULL); 248 swp = (sw_pager_t)malloc(sizeof *swp, M_VMPGDATA, waitok); 249 if (swp == NULL) { 250 free((caddr_t)pager, M_VMPAGER); 251 return(NULL); 252 } 253 size = round_page(size); 254 swp->sw_osize = size; 255 swp->sw_nblocks = (btodb(size) + btodb(SWB_NPAGES * PAGE_SIZE) - 1) / btodb(SWB_NPAGES*PAGE_SIZE); 256 swp->sw_blocks = (sw_blk_t) 257 malloc(swp->sw_nblocks*sizeof(*swp->sw_blocks), 258 M_VMPGDATA, waitok); 259 if (swp->sw_blocks == NULL) { 260 free((caddr_t)swp, M_VMPGDATA); 261 free((caddr_t)pager, M_VMPAGER); 262 return(NULL); 263 } 264 265 for (i = 0; i < swp->sw_nblocks; i++) { 266 swp->sw_blocks[i].swb_valid = 0; 267 swp->sw_blocks[i].swb_locked = 0; 268 for (j = 0; j < SWB_NPAGES; j++) 269 swp->sw_blocks[i].swb_block[j] = SWB_EMPTY; 270 } 271 272 swp->sw_poip = 0; 273 if (handle) { 274 vm_object_t object; 275 276 swp->sw_flags = SW_NAMED; 277 TAILQ_INSERT_TAIL(&swap_pager_list, pager, pg_list); 278 /* 279 * Consistant with other pagers: return with object 280 * referenced. Can't do this with handle == NULL 281 * since it might be the pageout daemon calling. 282 */ 283 object = vm_object_allocate(size); 284 vm_object_enter(object, pager); 285 vm_object_setpager(object, pager, 0, FALSE); 286 } else { 287 swp->sw_flags = 0; 288 TAILQ_INSERT_TAIL(&swap_pager_un_list, pager, pg_list); 289 } 290 pager->pg_handle = handle; 291 pager->pg_ops = &swappagerops; 292 pager->pg_type = PG_SWAP; 293 pager->pg_data = (caddr_t)swp; 294 295 return(pager); 296 } 297 298 /* 299 * returns disk block associated with pager and offset 300 * additionally, as a side effect returns a flag indicating 301 * if the block has been written 302 */ 303 304 static int * 305 swap_pager_diskaddr(swp, offset, valid) 306 sw_pager_t swp; 307 vm_offset_t offset; 308 int *valid; 309 { 310 register sw_blk_t swb; 311 int ix; 312 313 if (valid) 314 *valid = 0; 315 ix = offset / (SWB_NPAGES*PAGE_SIZE); 316 if (swp->sw_blocks == NULL || ix >= swp->sw_nblocks) { 317 return(FALSE); 318 } 319 swb = &swp->sw_blocks[ix]; 320 ix = (offset % (SWB_NPAGES*PAGE_SIZE)) / PAGE_SIZE; 321 if (valid) 322 *valid = swb->swb_valid & (1<<ix); 323 return &swb->swb_block[ix]; 324 } 325 326 /* 327 * Utility routine to set the valid (written) bit for 328 * a block associated with a pager and offset 329 */ 330 static void 331 swap_pager_setvalid(swp, offset, valid) 332 sw_pager_t swp; 333 vm_offset_t offset; 334 int valid; 335 { 336 register sw_blk_t swb; 337 int ix; 338 339 ix = offset / (SWB_NPAGES*PAGE_SIZE); 340 if (swp->sw_blocks == NULL || ix >= swp->sw_nblocks) 341 return; 342 343 swb = &swp->sw_blocks[ix]; 344 ix = (offset % (SWB_NPAGES*PAGE_SIZE)) / PAGE_SIZE; 345 if (valid) 346 swb->swb_valid |= (1 << ix); 347 else 348 swb->swb_valid &= ~(1 << ix); 349 return; 350 } 351 352 /* 353 * this routine allocates swap space with a fragmentation 354 * minimization policy. 355 */ 356 int 357 swap_pager_getswapspace( unsigned amount, unsigned *rtval) { 358 #ifdef EXP 359 unsigned tmpalloc; 360 unsigned nblocksfrag = btodb(SWB_NPAGES*PAGE_SIZE); 361 if( amount < nblocksfrag) { 362 if( rlist_alloc(&swapfrag, amount, rtval)) 363 return 1; 364 if( !rlist_alloc(&swapmap, nblocksfrag, &tmpalloc)) 365 return 0; 366 rlist_free( &swapfrag, tmpalloc+amount, tmpalloc + nblocksfrag - 1); 367 *rtval = tmpalloc; 368 return 1; 369 } 370 #endif 371 if( !rlist_alloc(&swapmap, amount, rtval)) 372 return 0; 373 else 374 return 1; 375 } 376 377 /* 378 * this routine frees swap space with a fragmentation 379 * minimization policy. 380 */ 381 void 382 swap_pager_freeswapspace( unsigned from, unsigned to) { 383 unsigned nblocksfrag = btodb(SWB_NPAGES*PAGE_SIZE); 384 unsigned tmpalloc; 385 #ifdef EXP 386 if( ((to + 1) - from) >= nblocksfrag) { 387 #endif 388 rlist_free(&swapmap, from, to); 389 #ifdef EXP 390 return; 391 } 392 rlist_free(&swapfrag, from, to); 393 while( rlist_alloc(&swapfrag, nblocksfrag, &tmpalloc)) { 394 rlist_free(&swapmap, tmpalloc, tmpalloc + nblocksfrag-1); 395 } 396 #endif 397 } 398 /* 399 * this routine frees swap blocks from a specified pager 400 */ 401 void 402 _swap_pager_freespace(swp, start, size) 403 sw_pager_t swp; 404 vm_offset_t start; 405 vm_offset_t size; 406 { 407 vm_offset_t i; 408 int s; 409 410 s = splbio(); 411 for (i = start; i < round_page(start + size - 1); i += PAGE_SIZE) { 412 int valid; 413 int *addr = swap_pager_diskaddr(swp, i, &valid); 414 if (addr && *addr != SWB_EMPTY) { 415 swap_pager_freeswapspace(*addr, *addr+btodb(PAGE_SIZE) - 1); 416 if( valid) { 417 vm_swap_size += btodb(PAGE_SIZE); 418 swap_pager_setvalid(swp, i, 0); 419 } 420 *addr = SWB_EMPTY; 421 } 422 } 423 swapsizecheck(); 424 splx(s); 425 } 426 427 void 428 swap_pager_freespace(pager, start, size) 429 vm_pager_t pager; 430 vm_offset_t start; 431 vm_offset_t size; 432 { 433 _swap_pager_freespace((sw_pager_t) pager->pg_data, start, size); 434 } 435 436 /* 437 * swap_pager_reclaim frees up over-allocated space from all pagers 438 * this eliminates internal fragmentation due to allocation of space 439 * for segments that are never swapped to. It has been written so that 440 * it does not block until the rlist_free operation occurs; it keeps 441 * the queues consistant. 442 */ 443 444 /* 445 * Maximum number of blocks (pages) to reclaim per pass 446 */ 447 #define MAXRECLAIM 256 448 449 void 450 swap_pager_reclaim() 451 { 452 vm_pager_t p; 453 sw_pager_t swp; 454 int i, j, k; 455 int s; 456 int reclaimcount; 457 static int reclaims[MAXRECLAIM]; 458 static int in_reclaim; 459 460 /* 461 * allow only one process to be in the swap_pager_reclaim subroutine 462 */ 463 s = splbio(); 464 if (in_reclaim) { 465 tsleep((caddr_t) &in_reclaim, PSWP, "swrclm", 0); 466 splx(s); 467 return; 468 } 469 in_reclaim = 1; 470 reclaimcount = 0; 471 472 /* for each pager queue */ 473 for (k = 0; swp_qs[k]; k++) { 474 475 p = swp_qs[k]->tqh_first; 476 while (p && (reclaimcount < MAXRECLAIM)) { 477 478 /* 479 * see if any blocks associated with a pager has been 480 * allocated but not used (written) 481 */ 482 swp = (sw_pager_t) p->pg_data; 483 for (i = 0; i < swp->sw_nblocks; i++) { 484 sw_blk_t swb = &swp->sw_blocks[i]; 485 if( swb->swb_locked) 486 continue; 487 for (j = 0; j < SWB_NPAGES; j++) { 488 if (swb->swb_block[j] != SWB_EMPTY && 489 (swb->swb_valid & (1 << j)) == 0) { 490 reclaims[reclaimcount++] = swb->swb_block[j]; 491 swb->swb_block[j] = SWB_EMPTY; 492 if (reclaimcount >= MAXRECLAIM) 493 goto rfinished; 494 } 495 } 496 } 497 p = p->pg_list.tqe_next; 498 } 499 } 500 501 rfinished: 502 503 /* 504 * free the blocks that have been added to the reclaim list 505 */ 506 for (i = 0; i < reclaimcount; i++) { 507 swap_pager_freeswapspace(reclaims[i], reclaims[i]+btodb(PAGE_SIZE) - 1); 508 swapsizecheck(); 509 wakeup((caddr_t) &in_reclaim); 510 } 511 512 splx(s); 513 in_reclaim = 0; 514 wakeup((caddr_t) &in_reclaim); 515 } 516 517 518 /* 519 * swap_pager_copy copies blocks from one pager to another and 520 * destroys the source pager 521 */ 522 523 void 524 swap_pager_copy(srcpager, srcoffset, dstpager, dstoffset, offset) 525 vm_pager_t srcpager; 526 vm_offset_t srcoffset; 527 vm_pager_t dstpager; 528 vm_offset_t dstoffset; 529 vm_offset_t offset; 530 { 531 sw_pager_t srcswp, dstswp; 532 vm_offset_t i; 533 int s; 534 535 srcswp = (sw_pager_t) srcpager->pg_data; 536 dstswp = (sw_pager_t) dstpager->pg_data; 537 538 /* 539 * remove the source pager from the swap_pager internal queue 540 */ 541 s = splbio(); 542 if (srcswp->sw_flags & SW_NAMED) { 543 TAILQ_REMOVE(&swap_pager_list, srcpager, pg_list); 544 srcswp->sw_flags &= ~SW_NAMED; 545 } else { 546 TAILQ_REMOVE(&swap_pager_un_list, srcpager, pg_list); 547 } 548 549 while (srcswp->sw_poip) { 550 tsleep((caddr_t)srcswp, PVM, "spgout", 0); 551 } 552 splx(s); 553 554 /* 555 * clean all of the pages that are currently active and finished 556 */ 557 (void) swap_pager_clean(); 558 559 s = splbio(); 560 /* 561 * clear source block before destination object 562 * (release allocated space) 563 */ 564 for (i = 0; i < offset + srcoffset; i += PAGE_SIZE) { 565 int valid; 566 int *addr = swap_pager_diskaddr(srcswp, i, &valid); 567 if (addr && *addr != SWB_EMPTY) { 568 swap_pager_freeswapspace(*addr, *addr+btodb(PAGE_SIZE) - 1); 569 if( valid) 570 vm_swap_size += btodb(PAGE_SIZE); 571 swapsizecheck(); 572 *addr = SWB_EMPTY; 573 } 574 } 575 /* 576 * transfer source to destination 577 */ 578 for (i = 0; i < dstswp->sw_osize; i += PAGE_SIZE) { 579 int srcvalid, dstvalid; 580 int *srcaddrp = swap_pager_diskaddr(srcswp, i + offset + srcoffset, 581 &srcvalid); 582 int *dstaddrp; 583 /* 584 * see if the source has space allocated 585 */ 586 if (srcaddrp && *srcaddrp != SWB_EMPTY) { 587 /* 588 * if the source is valid and the dest has no space, then 589 * copy the allocation from the srouce to the dest. 590 */ 591 if (srcvalid) { 592 dstaddrp = swap_pager_diskaddr(dstswp, i + dstoffset, &dstvalid); 593 /* 594 * if the dest already has a valid block, deallocate the 595 * source block without copying. 596 */ 597 if (!dstvalid && dstaddrp && *dstaddrp != SWB_EMPTY) { 598 swap_pager_freeswapspace(*dstaddrp, *dstaddrp+btodb(PAGE_SIZE) - 1); 599 *dstaddrp = SWB_EMPTY; 600 } 601 if (dstaddrp && *dstaddrp == SWB_EMPTY) { 602 *dstaddrp = *srcaddrp; 603 *srcaddrp = SWB_EMPTY; 604 swap_pager_setvalid(dstswp, i + dstoffset, 1); 605 vm_swap_size -= btodb(PAGE_SIZE); 606 } 607 } 608 /* 609 * if the source is not empty at this point, then deallocate the space. 610 */ 611 if (*srcaddrp != SWB_EMPTY) { 612 swap_pager_freeswapspace(*srcaddrp, *srcaddrp+btodb(PAGE_SIZE) - 1); 613 if( srcvalid) 614 vm_swap_size += btodb(PAGE_SIZE); 615 *srcaddrp = SWB_EMPTY; 616 } 617 } 618 } 619 620 /* 621 * deallocate the rest of the source object 622 */ 623 for (i = dstswp->sw_osize + offset + srcoffset; i < srcswp->sw_osize; i += PAGE_SIZE) { 624 int valid; 625 int *srcaddrp = swap_pager_diskaddr(srcswp, i, &valid); 626 if (srcaddrp && *srcaddrp != SWB_EMPTY) { 627 swap_pager_freeswapspace(*srcaddrp, *srcaddrp+btodb(PAGE_SIZE) - 1); 628 if( valid) 629 vm_swap_size += btodb(PAGE_SIZE); 630 *srcaddrp = SWB_EMPTY; 631 } 632 } 633 634 swapsizecheck(); 635 splx(s); 636 637 free((caddr_t)srcswp->sw_blocks, M_VMPGDATA); 638 srcswp->sw_blocks = 0; 639 free((caddr_t)srcswp, M_VMPGDATA); 640 srcpager->pg_data = 0; 641 free((caddr_t)srcpager, M_VMPAGER); 642 643 return; 644 } 645 646 647 void 648 swap_pager_dealloc(pager) 649 vm_pager_t pager; 650 { 651 register int i,j; 652 register sw_blk_t bp; 653 register sw_pager_t swp; 654 int s; 655 656 /* 657 * Remove from list right away so lookups will fail if we 658 * block for pageout completion. 659 */ 660 s = splbio(); 661 swp = (sw_pager_t) pager->pg_data; 662 if (swp->sw_flags & SW_NAMED) { 663 TAILQ_REMOVE(&swap_pager_list, pager, pg_list); 664 swp->sw_flags &= ~SW_NAMED; 665 } else { 666 TAILQ_REMOVE(&swap_pager_un_list, pager, pg_list); 667 } 668 /* 669 * Wait for all pageouts to finish and remove 670 * all entries from cleaning list. 671 */ 672 673 while (swp->sw_poip) { 674 tsleep((caddr_t)swp, PVM, "swpout", 0); 675 } 676 splx(s); 677 678 679 (void) swap_pager_clean(); 680 681 /* 682 * Free left over swap blocks 683 */ 684 s = splbio(); 685 for (i = 0, bp = swp->sw_blocks; i < swp->sw_nblocks; i++, bp++) { 686 for (j = 0; j < SWB_NPAGES; j++) 687 if (bp->swb_block[j] != SWB_EMPTY) { 688 swap_pager_freeswapspace((unsigned)bp->swb_block[j], 689 (unsigned)bp->swb_block[j] + btodb(PAGE_SIZE) - 1); 690 if( bp->swb_valid & (1<<j)) 691 vm_swap_size += btodb(PAGE_SIZE); 692 bp->swb_block[j] = SWB_EMPTY; 693 } 694 } 695 splx(s); 696 swapsizecheck(); 697 698 /* 699 * Free swap management resources 700 */ 701 free((caddr_t)swp->sw_blocks, M_VMPGDATA); 702 swp->sw_blocks = 0; 703 free((caddr_t)swp, M_VMPGDATA); 704 pager->pg_data = 0; 705 free((caddr_t)pager, M_VMPAGER); 706 } 707 708 /* 709 * swap_pager_getmulti can get multiple pages. 710 */ 711 int 712 swap_pager_getmulti(pager, m, count, reqpage, sync) 713 vm_pager_t pager; 714 vm_page_t *m; 715 int count; 716 int reqpage; 717 boolean_t sync; 718 { 719 if( reqpage >= count) 720 panic("swap_pager_getmulti: reqpage >= count\n"); 721 return swap_pager_input((sw_pager_t) pager->pg_data, m, count, reqpage); 722 } 723 724 /* 725 * swap_pager_getpage gets individual pages 726 */ 727 int 728 swap_pager_getpage(pager, m, sync) 729 vm_pager_t pager; 730 vm_page_t m; 731 boolean_t sync; 732 { 733 vm_page_t marray[1]; 734 735 marray[0] = m; 736 return swap_pager_input((sw_pager_t)pager->pg_data, marray, 1, 0); 737 } 738 739 int 740 swap_pager_putmulti(pager, m, c, sync, rtvals) 741 vm_pager_t pager; 742 vm_page_t *m; 743 int c; 744 boolean_t sync; 745 int *rtvals; 746 { 747 int flags; 748 749 if (pager == NULL) { 750 (void) swap_pager_clean(); 751 return VM_PAGER_OK; 752 } 753 754 flags = B_WRITE; 755 if (!sync) 756 flags |= B_ASYNC; 757 758 return swap_pager_output((sw_pager_t)pager->pg_data, m, c, flags, rtvals); 759 } 760 761 /* 762 * swap_pager_putpage writes individual pages 763 */ 764 int 765 swap_pager_putpage(pager, m, sync) 766 vm_pager_t pager; 767 vm_page_t m; 768 boolean_t sync; 769 { 770 int flags; 771 vm_page_t marray[1]; 772 int rtvals[1]; 773 774 775 if (pager == NULL) { 776 (void) swap_pager_clean(); 777 return VM_PAGER_OK; 778 } 779 780 marray[0] = m; 781 flags = B_WRITE; 782 if (!sync) 783 flags |= B_ASYNC; 784 785 swap_pager_output((sw_pager_t)pager->pg_data, marray, 1, flags, rtvals); 786 787 return rtvals[0]; 788 } 789 790 static inline int 791 const swap_pager_block_index(swp, offset) 792 sw_pager_t swp; 793 vm_offset_t offset; 794 { 795 return (offset / (SWB_NPAGES*PAGE_SIZE)); 796 } 797 798 static inline int 799 const swap_pager_block_offset(swp, offset) 800 sw_pager_t swp; 801 vm_offset_t offset; 802 { 803 return ((offset % (PAGE_SIZE*SWB_NPAGES)) / PAGE_SIZE); 804 } 805 806 /* 807 * _swap_pager_haspage returns TRUE if the pager has data that has 808 * been written out. 809 */ 810 static boolean_t 811 _swap_pager_haspage(swp, offset) 812 sw_pager_t swp; 813 vm_offset_t offset; 814 { 815 register sw_blk_t swb; 816 int ix; 817 818 ix = offset / (SWB_NPAGES*PAGE_SIZE); 819 if (swp->sw_blocks == NULL || ix >= swp->sw_nblocks) { 820 return(FALSE); 821 } 822 swb = &swp->sw_blocks[ix]; 823 ix = (offset % (SWB_NPAGES*PAGE_SIZE)) / PAGE_SIZE; 824 if (swb->swb_block[ix] != SWB_EMPTY) { 825 if (swb->swb_valid & (1 << ix)) 826 return TRUE; 827 } 828 829 return(FALSE); 830 } 831 832 /* 833 * swap_pager_haspage is the externally accessible version of 834 * _swap_pager_haspage above. this routine takes a vm_pager_t 835 * for an argument instead of sw_pager_t. 836 */ 837 boolean_t 838 swap_pager_haspage(pager, offset) 839 vm_pager_t pager; 840 vm_offset_t offset; 841 { 842 return _swap_pager_haspage((sw_pager_t) pager->pg_data, offset); 843 } 844 845 /* 846 * swap_pager_freepage is a convienience routine that clears the busy 847 * bit and deallocates a page. 848 */ 849 static void 850 swap_pager_freepage(m) 851 vm_page_t m; 852 { 853 PAGE_WAKEUP(m); 854 vm_page_free(m); 855 } 856 857 /* 858 * swap_pager_ridpages is a convienience routine that deallocates all 859 * but the required page. this is usually used in error returns that 860 * need to invalidate the "extra" readahead pages. 861 */ 862 static void 863 swap_pager_ridpages(m, count, reqpage) 864 vm_page_t *m; 865 int count; 866 int reqpage; 867 { 868 int i; 869 for (i = 0; i < count; i++) 870 if (i != reqpage) 871 swap_pager_freepage(m[i]); 872 } 873 874 int swapwritecount=0; 875 876 /* 877 * swap_pager_iodone1 is the completion routine for both reads and async writes 878 */ 879 void 880 swap_pager_iodone1(bp) 881 struct buf *bp; 882 { 883 bp->b_flags |= B_DONE; 884 bp->b_flags &= ~B_ASYNC; 885 wakeup((caddr_t)bp); 886 /* 887 if ((bp->b_flags & B_READ) == 0) 888 vwakeup(bp); 889 */ 890 } 891 892 893 int 894 swap_pager_input(swp, m, count, reqpage) 895 register sw_pager_t swp; 896 vm_page_t *m; 897 int count, reqpage; 898 { 899 register struct buf *bp; 900 sw_blk_t swb[count]; 901 register int s; 902 int i; 903 boolean_t rv; 904 vm_offset_t kva, off[count]; 905 swp_clean_t spc; 906 vm_offset_t paging_offset; 907 vm_object_t object; 908 int reqaddr[count]; 909 910 int first, last; 911 int failed; 912 int reqdskregion; 913 914 object = m[reqpage]->object; 915 paging_offset = object->paging_offset; 916 /* 917 * First determine if the page exists in the pager if this is 918 * a sync read. This quickly handles cases where we are 919 * following shadow chains looking for the top level object 920 * with the page. 921 */ 922 if (swp->sw_blocks == NULL) { 923 swap_pager_ridpages(m, count, reqpage); 924 return(VM_PAGER_FAIL); 925 } 926 927 for(i = 0; i < count; i++) { 928 vm_offset_t foff = m[i]->offset + paging_offset; 929 int ix = swap_pager_block_index(swp, foff); 930 if (ix >= swp->sw_nblocks) { 931 int j; 932 if( i <= reqpage) { 933 swap_pager_ridpages(m, count, reqpage); 934 return(VM_PAGER_FAIL); 935 } 936 for(j = i; j < count; j++) { 937 swap_pager_freepage(m[j]); 938 } 939 count = i; 940 break; 941 } 942 943 swb[i] = &swp->sw_blocks[ix]; 944 off[i] = swap_pager_block_offset(swp, foff); 945 reqaddr[i] = swb[i]->swb_block[off[i]]; 946 } 947 948 /* make sure that our required input request is existant */ 949 950 if (reqaddr[reqpage] == SWB_EMPTY || 951 (swb[reqpage]->swb_valid & (1 << off[reqpage])) == 0) { 952 swap_pager_ridpages(m, count, reqpage); 953 return(VM_PAGER_FAIL); 954 } 955 956 957 reqdskregion = reqaddr[reqpage] / dmmax; 958 959 /* 960 * search backwards for the first contiguous page to transfer 961 */ 962 failed = 0; 963 first = 0; 964 for (i = reqpage - 1; i >= 0; --i) { 965 if ( failed || (reqaddr[i] == SWB_EMPTY) || 966 (swb[i]->swb_valid & (1 << off[i])) == 0 || 967 (reqaddr[i] != (reqaddr[reqpage] + (i - reqpage) * btodb(PAGE_SIZE))) || 968 ((reqaddr[i] / dmmax) != reqdskregion)) { 969 failed = 1; 970 swap_pager_freepage(m[i]); 971 if (first == 0) 972 first = i + 1; 973 } 974 } 975 /* 976 * search forwards for the last contiguous page to transfer 977 */ 978 failed = 0; 979 last = count; 980 for (i = reqpage + 1; i < count; i++) { 981 if ( failed || (reqaddr[i] == SWB_EMPTY) || 982 (swb[i]->swb_valid & (1 << off[i])) == 0 || 983 (reqaddr[i] != (reqaddr[reqpage] + (i - reqpage) * btodb(PAGE_SIZE))) || 984 ((reqaddr[i] / dmmax) != reqdskregion)) { 985 failed = 1; 986 swap_pager_freepage(m[i]); 987 if (last == count) 988 last = i; 989 } 990 } 991 992 count = last; 993 if (first != 0) { 994 for (i = first; i < count; i++) { 995 m[i-first] = m[i]; 996 reqaddr[i-first] = reqaddr[i]; 997 off[i-first] = off[i]; 998 } 999 count -= first; 1000 reqpage -= first; 1001 } 1002 1003 ++swb[reqpage]->swb_locked; 1004 1005 /* 1006 * at this point: 1007 * "m" is a pointer to the array of vm_page_t for paging I/O 1008 * "count" is the number of vm_page_t entries represented by "m" 1009 * "object" is the vm_object_t for I/O 1010 * "reqpage" is the index into "m" for the page actually faulted 1011 */ 1012 1013 spc = NULL; /* we might not use an spc data structure */ 1014 1015 if (count == 1) { 1016 /* 1017 * if a kva has not been allocated, we can only do a one page transfer, 1018 * so we free the other pages that might have been allocated by 1019 * vm_fault. 1020 */ 1021 swap_pager_ridpages(m, count, reqpage); 1022 m[0] = m[reqpage]; 1023 reqaddr[0] = reqaddr[reqpage]; 1024 1025 count = 1; 1026 reqpage = 0; 1027 /* 1028 * get a swap pager clean data structure, block until we get it 1029 */ 1030 if (swap_pager_free.tqh_first == NULL) { 1031 s = splbio(); 1032 if( curproc == pageproc) 1033 (void) swap_pager_clean(); 1034 else 1035 wakeup((caddr_t) &vm_pages_needed); 1036 while (swap_pager_free.tqh_first == NULL) { 1037 swap_pager_needflags |= SWAP_FREE_NEEDED; 1038 tsleep((caddr_t)&swap_pager_free, 1039 PVM, "swpfre", 0); 1040 if( curproc == pageproc) 1041 (void) swap_pager_clean(); 1042 else 1043 wakeup((caddr_t) &vm_pages_needed); 1044 } 1045 splx(s); 1046 } 1047 spc = swap_pager_free.tqh_first; 1048 TAILQ_REMOVE(&swap_pager_free, spc, spc_list); 1049 kva = spc->spc_kva; 1050 bp = spc->spc_bp; 1051 bzero(bp, sizeof *bp); 1052 bp->b_spc = spc; 1053 } else { 1054 /* 1055 * Get a swap buffer header to perform the IO 1056 */ 1057 bp = getpbuf(); 1058 kva = (vm_offset_t) bp->b_data; 1059 } 1060 1061 /* 1062 * map our page(s) into kva for input 1063 */ 1064 pmap_qenter( kva, m, count); 1065 1066 s = splbio(); 1067 bp->b_flags = B_BUSY | B_READ | B_CALL; 1068 bp->b_iodone = swap_pager_iodone1; 1069 bp->b_proc = &proc0; /* XXX (but without B_PHYS set this is ok) */ 1070 bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred; 1071 crhold(bp->b_rcred); 1072 crhold(bp->b_wcred); 1073 bp->b_un.b_addr = (caddr_t) kva; 1074 bp->b_blkno = reqaddr[0]; 1075 bp->b_bcount = PAGE_SIZE*count; 1076 bp->b_bufsize = PAGE_SIZE*count; 1077 1078 bgetvp( swapdev_vp, bp); 1079 1080 swp->sw_piip++; 1081 1082 /* 1083 * perform the I/O 1084 */ 1085 VOP_STRATEGY(bp); 1086 1087 /* 1088 * wait for the sync I/O to complete 1089 */ 1090 while ((bp->b_flags & B_DONE) == 0) { 1091 tsleep((caddr_t)bp, PVM, "swread", 0); 1092 } 1093 rv = (bp->b_flags & B_ERROR) ? VM_PAGER_FAIL : VM_PAGER_OK; 1094 bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_DIRTY|B_CALL|B_DONE); 1095 1096 --swp->sw_piip; 1097 if (swp->sw_piip == 0) 1098 wakeup((caddr_t) swp); 1099 1100 /* 1101 * relpbuf does this, but we maintain our own buffer 1102 * list also... 1103 */ 1104 if (bp->b_vp) 1105 brelvp(bp); 1106 1107 splx(s); 1108 --swb[reqpage]->swb_locked; 1109 1110 /* 1111 * remove the mapping for kernel virtual 1112 */ 1113 pmap_qremove( kva, count); 1114 1115 if (spc) { 1116 /* 1117 * if we have used an spc, we need to free it. 1118 */ 1119 if( bp->b_rcred != NOCRED) 1120 crfree(bp->b_rcred); 1121 if( bp->b_wcred != NOCRED) 1122 crfree(bp->b_wcred); 1123 TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list); 1124 if (swap_pager_needflags & SWAP_FREE_NEEDED) { 1125 swap_pager_needflags &= ~SWAP_FREE_NEEDED; 1126 wakeup((caddr_t)&swap_pager_free); 1127 } 1128 } else { 1129 /* 1130 * release the physical I/O buffer 1131 */ 1132 relpbuf(bp); 1133 /* 1134 * finish up input if everything is ok 1135 */ 1136 if( rv == VM_PAGER_OK) { 1137 for (i = 0; i < count; i++) { 1138 pmap_clear_modify(VM_PAGE_TO_PHYS(m[i])); 1139 m[i]->flags |= PG_CLEAN; 1140 m[i]->flags &= ~PG_LAUNDRY; 1141 if (i != reqpage) { 1142 /* 1143 * whether or not to leave the page activated 1144 * is up in the air, but we should put the page 1145 * on a page queue somewhere. (it already is in 1146 * the object). 1147 * After some emperical results, it is best 1148 * to deactivate the readahead pages. 1149 */ 1150 vm_page_deactivate(m[i]); 1151 1152 /* 1153 * just in case someone was asking for this 1154 * page we now tell them that it is ok to use 1155 */ 1156 m[i]->flags &= ~PG_FAKE; 1157 PAGE_WAKEUP(m[i]); 1158 } 1159 } 1160 if( swap_pager_full) { 1161 _swap_pager_freespace( swp, m[0]->offset+paging_offset, count*PAGE_SIZE); 1162 } 1163 } else { 1164 swap_pager_ridpages(m, count, reqpage); 1165 } 1166 } 1167 return(rv); 1168 } 1169 1170 int 1171 swap_pager_output(swp, m, count, flags, rtvals) 1172 register sw_pager_t swp; 1173 vm_page_t *m; 1174 int count; 1175 int flags; 1176 int *rtvals; 1177 { 1178 register struct buf *bp; 1179 sw_blk_t swb[count]; 1180 register int s; 1181 int i, j, ix; 1182 boolean_t rv; 1183 vm_offset_t kva, off, foff; 1184 swp_clean_t spc; 1185 vm_offset_t paging_offset; 1186 vm_object_t object; 1187 int reqaddr[count]; 1188 int failed; 1189 1190 /* 1191 if( count > 1) 1192 printf("off: 0x%x, count: %d\n", m[0]->offset, count); 1193 */ 1194 spc = NULL; 1195 1196 object = m[0]->object; 1197 paging_offset = object->paging_offset; 1198 1199 failed = 0; 1200 for(j=0;j<count;j++) { 1201 foff = m[j]->offset + paging_offset; 1202 ix = swap_pager_block_index(swp, foff); 1203 swb[j] = 0; 1204 if( swp->sw_blocks == NULL || ix >= swp->sw_nblocks) { 1205 rtvals[j] = VM_PAGER_FAIL; 1206 failed = 1; 1207 continue; 1208 } else { 1209 rtvals[j] = VM_PAGER_OK; 1210 } 1211 swb[j] = &swp->sw_blocks[ix]; 1212 ++swb[j]->swb_locked; 1213 if( failed) { 1214 rtvals[j] = VM_PAGER_FAIL; 1215 continue; 1216 } 1217 off = swap_pager_block_offset(swp, foff); 1218 reqaddr[j] = swb[j]->swb_block[off]; 1219 if( reqaddr[j] == SWB_EMPTY) { 1220 int blk; 1221 int tries; 1222 int ntoget; 1223 tries = 0; 1224 s = splbio(); 1225 1226 /* 1227 * if any other pages have been allocated in this block, we 1228 * only try to get one page. 1229 */ 1230 for (i = 0; i < SWB_NPAGES; i++) { 1231 if (swb[j]->swb_block[i] != SWB_EMPTY) 1232 break; 1233 } 1234 1235 1236 ntoget = (i == SWB_NPAGES) ? SWB_NPAGES : 1; 1237 /* 1238 * this code is alittle conservative, but works 1239 * (the intent of this code is to allocate small chunks 1240 * for small objects) 1241 */ 1242 if( (m[j]->offset == 0) && (ntoget*PAGE_SIZE > object->size)) { 1243 ntoget = (object->size + (PAGE_SIZE-1))/PAGE_SIZE; 1244 } 1245 1246 retrygetspace: 1247 if (!swap_pager_full && ntoget > 1 && 1248 swap_pager_getswapspace(ntoget * btodb(PAGE_SIZE), &blk)) { 1249 1250 for (i = 0; i < ntoget; i++) { 1251 swb[j]->swb_block[i] = blk + btodb(PAGE_SIZE) * i; 1252 swb[j]->swb_valid = 0; 1253 } 1254 1255 reqaddr[j] = swb[j]->swb_block[off]; 1256 } else if (!swap_pager_getswapspace(btodb(PAGE_SIZE), 1257 &swb[j]->swb_block[off])) { 1258 /* 1259 * if the allocation has failed, we try to reclaim space and 1260 * retry. 1261 */ 1262 if (++tries == 1) { 1263 swap_pager_reclaim(); 1264 goto retrygetspace; 1265 } 1266 rtvals[j] = VM_PAGER_AGAIN; 1267 failed = 1; 1268 } else { 1269 reqaddr[j] = swb[j]->swb_block[off]; 1270 swb[j]->swb_valid &= ~(1<<off); 1271 } 1272 splx(s); 1273 } 1274 } 1275 1276 /* 1277 * search forwards for the last contiguous page to transfer 1278 */ 1279 failed = 0; 1280 for (i = 0; i < count; i++) { 1281 if( failed || (reqaddr[i] != reqaddr[0] + i*btodb(PAGE_SIZE)) || 1282 (reqaddr[i] / dmmax) != (reqaddr[0] / dmmax) || 1283 (rtvals[i] != VM_PAGER_OK)) { 1284 failed = 1; 1285 if( rtvals[i] == VM_PAGER_OK) 1286 rtvals[i] = VM_PAGER_AGAIN; 1287 } 1288 } 1289 1290 for(i = 0; i < count; i++) { 1291 if( rtvals[i] != VM_PAGER_OK) { 1292 if( swb[i]) 1293 --swb[i]->swb_locked; 1294 } 1295 } 1296 1297 for(i = 0; i < count; i++) 1298 if( rtvals[i] != VM_PAGER_OK) 1299 break; 1300 1301 if( i == 0) { 1302 return VM_PAGER_AGAIN; 1303 } 1304 1305 count = i; 1306 for(i=0;i<count;i++) { 1307 if( reqaddr[i] == SWB_EMPTY) 1308 printf("I/O to empty block????\n"); 1309 } 1310 1311 /* 1312 */ 1313 1314 /* 1315 * For synchronous writes, we clean up 1316 * all completed async pageouts. 1317 */ 1318 if ((flags & B_ASYNC) == 0) { 1319 swap_pager_clean(); 1320 } 1321 1322 kva = 0; 1323 1324 /* 1325 * we allocate a new kva for transfers > 1 page 1326 * but for transfers == 1 page, the swap_pager_free list contains 1327 * entries that have pre-allocated kva's (for efficiency). 1328 * NOTE -- we do not use the physical buffer pool or the 1329 * preallocated associated kva's because of the potential for 1330 * deadlock. This is very subtile -- but deadlocks or resource 1331 * contention must be avoided on pageouts -- or your system will 1332 * sleep (forever) !!! 1333 */ 1334 /* 1335 if ( count > 1) { 1336 kva = kmem_alloc_pageable(pager_map, count*PAGE_SIZE); 1337 if( !kva) { 1338 for (i = 0; i < count; i++) { 1339 if( swb[i]) 1340 --swb[i]->swb_locked; 1341 rtvals[i] = VM_PAGER_AGAIN; 1342 } 1343 return VM_PAGER_AGAIN; 1344 } 1345 } 1346 */ 1347 1348 /* 1349 * get a swap pager clean data structure, block until we get it 1350 */ 1351 if (swap_pager_free.tqh_first == NULL) { 1352 s = splbio(); 1353 if( curproc == pageproc) 1354 (void) swap_pager_clean(); 1355 else 1356 wakeup((caddr_t) &vm_pages_needed); 1357 while (swap_pager_free.tqh_first == NULL) { 1358 swap_pager_needflags |= SWAP_FREE_NEEDED; 1359 tsleep((caddr_t)&swap_pager_free, 1360 PVM, "swpfre", 0); 1361 if( curproc == pageproc) 1362 (void) swap_pager_clean(); 1363 else 1364 wakeup((caddr_t) &vm_pages_needed); 1365 } 1366 splx(s); 1367 } 1368 1369 spc = swap_pager_free.tqh_first; 1370 TAILQ_REMOVE(&swap_pager_free, spc, spc_list); 1371 1372 kva = spc->spc_kva; 1373 1374 /* 1375 * map our page(s) into kva for I/O 1376 */ 1377 pmap_qenter(kva, m, count); 1378 1379 /* 1380 * get the base I/O offset into the swap file 1381 */ 1382 for(i=0;i<count;i++) { 1383 foff = m[i]->offset + paging_offset; 1384 off = swap_pager_block_offset(swp, foff); 1385 /* 1386 * if we are setting the valid bit anew, 1387 * then diminish the swap free space 1388 */ 1389 if( (swb[i]->swb_valid & (1 << off)) == 0) 1390 vm_swap_size -= btodb(PAGE_SIZE); 1391 1392 /* 1393 * set the valid bit 1394 */ 1395 swb[i]->swb_valid |= (1 << off); 1396 /* 1397 * and unlock the data structure 1398 */ 1399 --swb[i]->swb_locked; 1400 } 1401 1402 s = splbio(); 1403 /* 1404 * Get a swap buffer header and perform the IO 1405 */ 1406 bp = spc->spc_bp; 1407 bzero(bp, sizeof *bp); 1408 bp->b_spc = spc; 1409 1410 bp->b_flags = B_BUSY; 1411 bp->b_proc = &proc0; /* XXX (but without B_PHYS set this is ok) */ 1412 bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred; 1413 if( bp->b_rcred != NOCRED) 1414 crhold(bp->b_rcred); 1415 if( bp->b_wcred != NOCRED) 1416 crhold(bp->b_wcred); 1417 bp->b_data = (caddr_t) kva; 1418 bp->b_blkno = reqaddr[0]; 1419 bgetvp( swapdev_vp, bp); 1420 1421 bp->b_bcount = PAGE_SIZE*count; 1422 bp->b_bufsize = PAGE_SIZE*count; 1423 swapdev_vp->v_numoutput++; 1424 1425 /* 1426 * If this is an async write we set up additional buffer fields 1427 * and place a "cleaning" entry on the inuse queue. 1428 */ 1429 if ( flags & B_ASYNC ) { 1430 spc->spc_flags = 0; 1431 spc->spc_swp = swp; 1432 for(i=0;i<count;i++) 1433 spc->spc_m[i] = m[i]; 1434 spc->spc_count = count; 1435 /* 1436 * the completion routine for async writes 1437 */ 1438 bp->b_flags |= B_CALL; 1439 bp->b_iodone = swap_pager_iodone; 1440 bp->b_dirtyoff = 0; 1441 bp->b_dirtyend = bp->b_bcount; 1442 swp->sw_poip++; 1443 TAILQ_INSERT_TAIL(&swap_pager_inuse, spc, spc_list); 1444 } else { 1445 swp->sw_poip++; 1446 bp->b_flags |= B_CALL; 1447 bp->b_iodone = swap_pager_iodone1; 1448 } 1449 /* 1450 * perform the I/O 1451 */ 1452 VOP_STRATEGY(bp); 1453 if ((flags & (B_READ|B_ASYNC)) == B_ASYNC ) { 1454 if ((bp->b_flags & B_DONE) == B_DONE) { 1455 swap_pager_clean(); 1456 } 1457 splx(s); 1458 for(i=0;i<count;i++) { 1459 rtvals[i] = VM_PAGER_PEND; 1460 } 1461 return VM_PAGER_PEND; 1462 } 1463 1464 /* 1465 * wait for the sync I/O to complete 1466 */ 1467 while ((bp->b_flags & B_DONE) == 0) { 1468 tsleep((caddr_t)bp, PVM, "swwrt", 0); 1469 } 1470 rv = (bp->b_flags & B_ERROR) ? VM_PAGER_FAIL : VM_PAGER_OK; 1471 bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_DIRTY|B_CALL|B_DONE); 1472 1473 --swp->sw_poip; 1474 if (swp->sw_poip == 0) 1475 wakeup((caddr_t) swp); 1476 1477 if (bp->b_vp) 1478 brelvp(bp); 1479 1480 splx(s); 1481 1482 /* 1483 * remove the mapping for kernel virtual 1484 */ 1485 pmap_qremove( kva, count); 1486 1487 /* 1488 * if we have written the page, then indicate that the page 1489 * is clean. 1490 */ 1491 if (rv == VM_PAGER_OK) { 1492 for(i=0;i<count;i++) { 1493 if( rtvals[i] == VM_PAGER_OK) { 1494 m[i]->flags |= PG_CLEAN; 1495 m[i]->flags &= ~PG_LAUNDRY; 1496 pmap_clear_modify(VM_PAGE_TO_PHYS(m[i])); 1497 /* 1498 * optimization, if a page has been read during the 1499 * pageout process, we activate it. 1500 */ 1501 if ( (m[i]->flags & PG_ACTIVE) == 0 && 1502 pmap_is_referenced(VM_PAGE_TO_PHYS(m[i]))) 1503 vm_page_activate(m[i]); 1504 } 1505 } 1506 } else { 1507 for(i=0;i<count;i++) { 1508 rtvals[i] = rv; 1509 m[i]->flags |= PG_LAUNDRY; 1510 } 1511 } 1512 1513 if( bp->b_rcred != NOCRED) 1514 crfree(bp->b_rcred); 1515 if( bp->b_wcred != NOCRED) 1516 crfree(bp->b_wcred); 1517 TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list); 1518 if (swap_pager_needflags & SWAP_FREE_NEEDED) { 1519 swap_pager_needflags &= ~SWAP_FREE_NEEDED; 1520 wakeup((caddr_t)&swap_pager_free); 1521 } 1522 1523 return(rv); 1524 } 1525 1526 boolean_t 1527 swap_pager_clean() 1528 { 1529 register swp_clean_t spc, tspc; 1530 register int s; 1531 1532 tspc = NULL; 1533 if (swap_pager_done.tqh_first == NULL) 1534 return FALSE; 1535 for (;;) { 1536 s = splbio(); 1537 /* 1538 * Look up and removal from done list must be done 1539 * at splbio() to avoid conflicts with swap_pager_iodone. 1540 */ 1541 while (spc = swap_pager_done.tqh_first) { 1542 pmap_qremove( spc->spc_kva, spc->spc_count); 1543 swap_pager_finish(spc); 1544 TAILQ_REMOVE(&swap_pager_done, spc, spc_list); 1545 goto doclean; 1546 } 1547 1548 /* 1549 * No operations done, thats all we can do for now. 1550 */ 1551 1552 splx(s); 1553 break; 1554 1555 /* 1556 * The desired page was found to be busy earlier in 1557 * the scan but has since completed. 1558 */ 1559 doclean: 1560 if (tspc && tspc == spc) { 1561 tspc = NULL; 1562 } 1563 spc->spc_flags = 0; 1564 TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list); 1565 if (swap_pager_needflags & SWAP_FREE_NEEDED) { 1566 swap_pager_needflags &= ~SWAP_FREE_NEEDED; 1567 wakeup((caddr_t)&swap_pager_free); 1568 } 1569 ++cleandone; 1570 splx(s); 1571 } 1572 1573 return(tspc ? TRUE : FALSE); 1574 } 1575 1576 void 1577 swap_pager_finish(spc) 1578 register swp_clean_t spc; 1579 { 1580 vm_object_t object = spc->spc_m[0]->object; 1581 int i; 1582 1583 if ((object->paging_in_progress -= spc->spc_count) == 0) 1584 thread_wakeup((int) object); 1585 1586 /* 1587 * If no error mark as clean and inform the pmap system. 1588 * If error, mark as dirty so we will try again. 1589 * (XXX could get stuck doing this, should give up after awhile) 1590 */ 1591 if (spc->spc_flags & SPC_ERROR) { 1592 for(i=0;i<spc->spc_count;i++) { 1593 printf("swap_pager_finish: clean of page %x failed\n", 1594 VM_PAGE_TO_PHYS(spc->spc_m[i])); 1595 spc->spc_m[i]->flags |= PG_LAUNDRY; 1596 } 1597 } else { 1598 for(i=0;i<spc->spc_count;i++) { 1599 pmap_clear_modify(VM_PAGE_TO_PHYS(spc->spc_m[i])); 1600 spc->spc_m[i]->flags |= PG_CLEAN; 1601 } 1602 } 1603 1604 1605 for(i=0;i<spc->spc_count;i++) { 1606 /* 1607 * we wakeup any processes that are waiting on 1608 * these pages. 1609 */ 1610 PAGE_WAKEUP(spc->spc_m[i]); 1611 } 1612 nswiodone -= spc->spc_count; 1613 1614 return; 1615 } 1616 1617 /* 1618 * swap_pager_iodone 1619 */ 1620 void 1621 swap_pager_iodone(bp) 1622 register struct buf *bp; 1623 { 1624 register swp_clean_t spc; 1625 int s; 1626 1627 s = splbio(); 1628 spc = (swp_clean_t) bp->b_spc; 1629 TAILQ_REMOVE(&swap_pager_inuse, spc, spc_list); 1630 TAILQ_INSERT_TAIL(&swap_pager_done, spc, spc_list); 1631 if (bp->b_flags & B_ERROR) { 1632 spc->spc_flags |= SPC_ERROR; 1633 printf("error %d blkno %d sz %d ", 1634 bp->b_error, bp->b_blkno, bp->b_bcount); 1635 } 1636 1637 /* 1638 if ((bp->b_flags & B_READ) == 0) 1639 vwakeup(bp); 1640 */ 1641 1642 bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_DIRTY|B_ASYNC); 1643 if (bp->b_vp) { 1644 brelvp(bp); 1645 } 1646 if( bp->b_rcred != NOCRED) 1647 crfree(bp->b_rcred); 1648 if( bp->b_wcred != NOCRED) 1649 crfree(bp->b_wcred); 1650 1651 nswiodone += spc->spc_count; 1652 if (--spc->spc_swp->sw_poip == 0) { 1653 wakeup((caddr_t)spc->spc_swp); 1654 } 1655 1656 if ((swap_pager_needflags & SWAP_FREE_NEEDED) || 1657 swap_pager_inuse.tqh_first == 0) { 1658 swap_pager_needflags &= ~SWAP_FREE_NEEDED; 1659 wakeup((caddr_t)&swap_pager_free); 1660 wakeup((caddr_t)&vm_pages_needed); 1661 } 1662 1663 if (vm_pageout_pages_needed) { 1664 wakeup((caddr_t)&vm_pageout_pages_needed); 1665 } 1666 1667 if ((swap_pager_inuse.tqh_first == NULL) || 1668 (cnt.v_free_count < cnt.v_free_min && 1669 nswiodone + cnt.v_free_count >= cnt.v_free_min) ) { 1670 wakeup((caddr_t)&vm_pages_needed); 1671 } 1672 splx(s); 1673 } 1674 1675 /* 1676 * return true if any swap control structures can be allocated 1677 */ 1678 int 1679 swap_pager_ready() { 1680 if( swap_pager_free.tqh_first) 1681 return 1; 1682 else 1683 return 0; 1684 } 1685