1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $Id: vm_pageout.c,v 1.7 1994/08/06 09:15:39 davidg Exp $ 69 */ 70 71 /* 72 * The proverbial page-out daemon. 73 */ 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/proc.h> 78 #include <sys/resourcevar.h> 79 #include <sys/malloc.h> 80 81 #include <vm/vm.h> 82 #include <vm/vm_page.h> 83 #include <vm/vm_pageout.h> 84 85 extern vm_map_t kmem_map; 86 int vm_pages_needed; /* Event on which pageout daemon sleeps */ 87 int vm_pagescanner; /* Event on which pagescanner sleeps */ 88 int vm_pageout_free_min = 0; /* Stop pageout to wait for pagers at this free level */ 89 90 int vm_pageout_pages_needed = 0; /* flag saying that the pageout daemon needs pages */ 91 int vm_page_pagesfreed; 92 93 extern int npendingio; 94 extern int hz; 95 int vm_pageout_proc_limit; 96 extern int nswiodone; 97 extern int swap_pager_full; 98 extern int swap_pager_ready(); 99 100 #define MAXREF 32767 101 102 #define MAXSCAN 512 /* maximum number of pages to scan in active queue */ 103 /* set the "clock" hands to be (MAXSCAN * 4096) Bytes */ 104 #define ACT_DECLINE 1 105 #define ACT_ADVANCE 3 106 #define ACT_MAX 300 107 108 #define LOWATER ((2048*1024)/NBPG) 109 110 #define VM_PAGEOUT_PAGE_COUNT 8 111 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 112 static vm_offset_t vm_space_needed; 113 int vm_pageout_req_do_stats; 114 115 int vm_page_max_wired = 0; /* XXX max # of wired pages system-wide */ 116 117 118 /* 119 * vm_pageout_clean: 120 * cleans a vm_page 121 */ 122 int 123 vm_pageout_clean(m, sync) 124 register vm_page_t m; 125 int sync; 126 { 127 /* 128 * Clean the page and remove it from the 129 * laundry. 130 * 131 * We set the busy bit to cause 132 * potential page faults on this page to 133 * block. 134 * 135 * And we set pageout-in-progress to keep 136 * the object from disappearing during 137 * pageout. This guarantees that the 138 * page won't move from the inactive 139 * queue. (However, any other page on 140 * the inactive queue may move!) 141 */ 142 143 register vm_object_t object; 144 register vm_pager_t pager; 145 int pageout_status[VM_PAGEOUT_PAGE_COUNT]; 146 vm_page_t ms[VM_PAGEOUT_PAGE_COUNT]; 147 int pageout_count; 148 int anyok=0; 149 int i; 150 vm_offset_t offset = m->offset; 151 152 object = m->object; 153 if (!object) { 154 printf("pager: object missing\n"); 155 return 0; 156 } 157 158 /* 159 * Try to collapse the object before 160 * making a pager for it. We must 161 * unlock the page queues first. 162 * We try to defer the creation of a pager 163 * until all shadows are not paging. This 164 * allows vm_object_collapse to work better and 165 * helps control swap space size. 166 * (J. Dyson 11 Nov 93) 167 */ 168 169 if (!object->pager && 170 cnt.v_free_count < vm_pageout_free_min) 171 return 0; 172 173 if (!object->pager && 174 object->shadow && 175 object->shadow->paging_in_progress) 176 return 0; 177 178 if( !sync) { 179 if (object->shadow) { 180 vm_object_collapse(object); 181 if (!vm_page_lookup(object, offset)) 182 return 0; 183 } 184 185 if ((m->flags & PG_BUSY) || (m->hold_count != 0)) { 186 return 0; 187 } 188 } 189 190 pageout_count = 1; 191 ms[0] = m; 192 193 if( pager = object->pager) { 194 for(i=1;i<vm_pageout_page_count;i++) { 195 if( ms[i] = vm_page_lookup( object, offset+i*NBPG)) { 196 if((((ms[i]->flags & (PG_CLEAN|PG_INACTIVE|PG_BUSY)) == PG_INACTIVE) 197 || (( ms[i]->flags & PG_CLEAN) == 0 && sync == VM_PAGEOUT_FORCE)) 198 && (ms[i]->wire_count == 0) 199 && (ms[i]->hold_count == 0)) 200 pageout_count++; 201 else 202 break; 203 } else 204 break; 205 } 206 for(i=0;i<pageout_count;i++) { 207 ms[i]->flags |= PG_BUSY; 208 pmap_page_protect(VM_PAGE_TO_PHYS(ms[i]), VM_PROT_READ); 209 } 210 object->paging_in_progress += pageout_count; 211 cnt.v_pageouts += pageout_count; 212 } else { 213 214 m->flags |= PG_BUSY; 215 216 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_READ); 217 218 cnt.v_pageouts++; 219 220 object->paging_in_progress++; 221 222 pager = vm_pager_allocate(PG_DFLT, (caddr_t)0, 223 object->size, VM_PROT_ALL, 0); 224 if (pager != NULL) { 225 vm_object_setpager(object, pager, 0, FALSE); 226 } 227 } 228 229 /* 230 * If there is no pager for the page, 231 * use the default pager. If there's 232 * no place to put the page at the 233 * moment, leave it in the laundry and 234 * hope that there will be paging space 235 * later. 236 */ 237 238 if ((pager && pager->pg_type == PG_SWAP) || 239 cnt.v_free_count >= vm_pageout_free_min) { 240 if( pageout_count == 1) { 241 pageout_status[0] = pager ? 242 vm_pager_put(pager, m, 243 ((sync || (object == kernel_object)) ? TRUE: FALSE)) : 244 VM_PAGER_FAIL; 245 } else { 246 if( !pager) { 247 for(i=0;i<pageout_count;i++) 248 pageout_status[i] = VM_PAGER_FAIL; 249 } else { 250 vm_pager_put_pages(pager, ms, pageout_count, 251 ((sync || (object == kernel_object)) ? TRUE : FALSE), 252 pageout_status); 253 } 254 } 255 256 } else { 257 for(i=0;i<pageout_count;i++) 258 pageout_status[i] = VM_PAGER_FAIL; 259 } 260 261 for(i=0;i<pageout_count;i++) { 262 switch (pageout_status[i]) { 263 case VM_PAGER_OK: 264 ms[i]->flags &= ~PG_LAUNDRY; 265 ++anyok; 266 break; 267 case VM_PAGER_PEND: 268 ms[i]->flags &= ~PG_LAUNDRY; 269 ++anyok; 270 break; 271 case VM_PAGER_BAD: 272 /* 273 * Page outside of range of object. 274 * Right now we essentially lose the 275 * changes by pretending it worked. 276 */ 277 ms[i]->flags &= ~PG_LAUNDRY; 278 ms[i]->flags |= PG_CLEAN; 279 pmap_clear_modify(VM_PAGE_TO_PHYS(ms[i])); 280 break; 281 case VM_PAGER_ERROR: 282 case VM_PAGER_FAIL: 283 /* 284 * If page couldn't be paged out, then 285 * reactivate the page so it doesn't 286 * clog the inactive list. (We will 287 * try paging out it again later). 288 */ 289 if (ms[i]->flags & PG_INACTIVE) 290 vm_page_activate(ms[i]); 291 break; 292 case VM_PAGER_AGAIN: 293 break; 294 } 295 296 297 /* 298 * If the operation is still going, leave 299 * the page busy to block all other accesses. 300 * Also, leave the paging in progress 301 * indicator set so that we don't attempt an 302 * object collapse. 303 */ 304 if (pageout_status[i] != VM_PAGER_PEND) { 305 PAGE_WAKEUP(ms[i]); 306 if (--object->paging_in_progress == 0) 307 wakeup((caddr_t) object); 308 if (pmap_is_referenced(VM_PAGE_TO_PHYS(ms[i]))) { 309 pmap_clear_reference(VM_PAGE_TO_PHYS(ms[i])); 310 if( ms[i]->flags & PG_INACTIVE) 311 vm_page_activate(ms[i]); 312 } 313 } 314 } 315 return anyok; 316 } 317 318 /* 319 * vm_pageout_object_deactivate_pages 320 * 321 * deactivate enough pages to satisfy the inactive target 322 * requirements or if vm_page_proc_limit is set, then 323 * deactivate all of the pages in the object and its 324 * shadows. 325 * 326 * The object and map must be locked. 327 */ 328 int 329 vm_pageout_object_deactivate_pages(map, object, count) 330 vm_map_t map; 331 vm_object_t object; 332 int count; 333 { 334 register vm_page_t p, next; 335 int rcount; 336 int s; 337 int dcount; 338 339 dcount = 0; 340 if (count == 0) 341 count = 1; 342 343 if (object->shadow) { 344 int scount = count; 345 if( object->shadow->ref_count > 1) 346 scount /= object->shadow->ref_count; 347 if( scount) 348 dcount += vm_pageout_object_deactivate_pages(map, object->shadow, scount); 349 } 350 351 if (object->paging_in_progress) 352 return dcount; 353 354 /* 355 * scan the objects entire memory queue 356 */ 357 rcount = object->resident_page_count; 358 p = object->memq.tqh_first; 359 while (p && (rcount-- > 0)) { 360 next = p->listq.tqe_next; 361 vm_page_lock_queues(); 362 /* 363 * if a page is active, not wired and is in the processes pmap, 364 * then deactivate the page. 365 */ 366 if ((p->flags & (PG_ACTIVE|PG_BUSY)) == PG_ACTIVE && 367 p->wire_count == 0 && 368 p->hold_count == 0 && 369 pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) { 370 if (!pmap_is_referenced(VM_PAGE_TO_PHYS(p))) { 371 p->act_count -= min(p->act_count, ACT_DECLINE); 372 /* 373 * if the page act_count is zero -- then we deactivate 374 */ 375 if (!p->act_count) { 376 vm_page_deactivate(p); 377 pmap_page_protect(VM_PAGE_TO_PHYS(p), 378 VM_PROT_NONE); 379 /* 380 * else if on the next go-around we will deactivate the page 381 * we need to place the page on the end of the queue to age 382 * the other pages in memory. 383 */ 384 } else { 385 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 386 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 387 TAILQ_REMOVE(&object->memq, p, listq); 388 TAILQ_INSERT_TAIL(&object->memq, p, listq); 389 } 390 /* 391 * see if we are done yet 392 */ 393 if (p->flags & PG_INACTIVE) { 394 --count; 395 ++dcount; 396 if (count <= 0 && 397 cnt.v_inactive_count > cnt.v_inactive_target) { 398 vm_page_unlock_queues(); 399 return dcount; 400 } 401 } 402 403 } else { 404 /* 405 * Move the page to the bottom of the queue. 406 */ 407 pmap_clear_reference(VM_PAGE_TO_PHYS(p)); 408 if (p->act_count < ACT_MAX) 409 p->act_count += ACT_ADVANCE; 410 411 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 412 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 413 TAILQ_REMOVE(&object->memq, p, listq); 414 TAILQ_INSERT_TAIL(&object->memq, p, listq); 415 } 416 } 417 418 vm_page_unlock_queues(); 419 p = next; 420 } 421 return dcount; 422 } 423 424 425 /* 426 * deactivate some number of pages in a map, try to do it fairly, but 427 * that is really hard to do. 428 */ 429 430 void 431 vm_pageout_map_deactivate_pages(map, entry, count, freeer) 432 vm_map_t map; 433 vm_map_entry_t entry; 434 int *count; 435 int (*freeer)(vm_map_t, vm_object_t, int); 436 { 437 vm_map_t tmpm; 438 vm_map_entry_t tmpe; 439 vm_object_t obj; 440 if (*count <= 0) 441 return; 442 vm_map_reference(map); 443 if (!lock_try_read(&map->lock)) { 444 vm_map_deallocate(map); 445 return; 446 } 447 if (entry == 0) { 448 tmpe = map->header.next; 449 while (tmpe != &map->header && *count > 0) { 450 vm_pageout_map_deactivate_pages(map, tmpe, count, freeer); 451 tmpe = tmpe->next; 452 }; 453 } else if (entry->is_sub_map || entry->is_a_map) { 454 tmpm = entry->object.share_map; 455 tmpe = tmpm->header.next; 456 while (tmpe != &tmpm->header && *count > 0) { 457 vm_pageout_map_deactivate_pages(tmpm, tmpe, count, freeer); 458 tmpe = tmpe->next; 459 }; 460 } else if (obj = entry->object.vm_object) { 461 *count -= (*freeer)(map, obj, *count); 462 } 463 lock_read_done(&map->lock); 464 vm_map_deallocate(map); 465 return; 466 } 467 468 /* 469 * vm_pageout_scan does the dirty work for the pageout daemon. 470 */ 471 int 472 vm_pageout_scan() 473 { 474 vm_page_t m; 475 int page_shortage, maxscan, maxlaunder; 476 int pages_freed, free, nproc; 477 int desired_free; 478 vm_page_t next; 479 struct proc *p; 480 vm_object_t object; 481 int s; 482 int force_wakeup = 0; 483 484 morefree: 485 /* 486 * scan the processes for exceeding their rlimits or if process 487 * is swapped out -- deactivate pages 488 */ 489 490 rescanproc1: 491 for (p = (struct proc *)allproc; p != NULL; p = p->p_next) { 492 vm_offset_t size; 493 int overage; 494 vm_offset_t limit; 495 496 /* 497 * if this is a system process or if we have already 498 * looked at this process, skip it. 499 */ 500 if (p->p_flag & (P_SYSTEM|P_WEXIT)) { 501 continue; 502 } 503 504 /* 505 * if the process is in a non-running type state, 506 * don't touch it. 507 */ 508 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 509 continue; 510 } 511 512 /* 513 * get a limit 514 */ 515 limit = min(p->p_rlimit[RLIMIT_RSS].rlim_cur, 516 p->p_rlimit[RLIMIT_RSS].rlim_max); 517 518 /* 519 * let processes that are swapped out really be swapped out 520 * set the limit to nothing (will force a swap-out.) 521 */ 522 if ((p->p_flag & P_INMEM) == 0) 523 limit = 0; 524 525 size = p->p_vmspace->vm_pmap.pm_stats.resident_count * NBPG; 526 if (size >= limit) { 527 overage = (size - limit) / NBPG; 528 vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, 529 (vm_map_entry_t) 0, &overage, vm_pageout_object_deactivate_pages); 530 } 531 532 } 533 534 if (((cnt.v_free_count + cnt.v_inactive_count) >= 535 (cnt.v_inactive_target + cnt.v_free_target)) && 536 (cnt.v_free_count >= cnt.v_free_target)) 537 return force_wakeup; 538 539 pages_freed = 0; 540 desired_free = cnt.v_free_target; 541 542 /* 543 * Start scanning the inactive queue for pages we can free. 544 * We keep scanning until we have enough free pages or 545 * we have scanned through the entire queue. If we 546 * encounter dirty pages, we start cleaning them. 547 */ 548 549 maxlaunder = (cnt.v_free_target - cnt.v_free_count); 550 maxscan = cnt.v_inactive_count; 551 rescan1: 552 m = vm_page_queue_inactive.tqh_first; 553 while (m && (maxscan-- > 0) && 554 (cnt.v_free_count < desired_free) ) { 555 vm_page_t next; 556 557 next = m->pageq.tqe_next; 558 559 if( (m->flags & PG_INACTIVE) == 0) { 560 printf("vm_pageout_scan: page not inactive?"); 561 continue; 562 } 563 564 /* 565 * activate held pages 566 */ 567 if (m->hold_count != 0) { 568 vm_page_activate(m); 569 m = next; 570 continue; 571 } 572 573 /* 574 * dont mess with busy pages 575 */ 576 if (m->flags & PG_BUSY) { 577 m = next; 578 continue; 579 } 580 581 /* 582 * if page is clean and but the page has been referenced, 583 * then reactivate the page, but if we are very low on memory 584 * or the page has not been referenced, then we free it to the 585 * vm system. 586 */ 587 if (m->flags & PG_CLEAN) { 588 if ((cnt.v_free_count > vm_pageout_free_min) /* XXX */ 589 && pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { 590 vm_page_activate(m); 591 } else if (!m->act_count) { 592 pmap_page_protect(VM_PAGE_TO_PHYS(m), 593 VM_PROT_NONE); 594 vm_page_free(m); 595 ++pages_freed; 596 } else { 597 m->act_count -= min(m->act_count, ACT_DECLINE); 598 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 599 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 600 } 601 } else if ((m->flags & PG_LAUNDRY) && maxlaunder > 0) { 602 int written; 603 if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { 604 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 605 vm_page_activate(m); 606 m = next; 607 continue; 608 } 609 /* 610 * If a page is dirty, then it is either 611 * being washed (but not yet cleaned) 612 * or it is still in the laundry. If it is 613 * still in the laundry, then we start the 614 * cleaning operation. 615 */ 616 617 if (written = vm_pageout_clean(m,0)) { 618 maxlaunder -= written; 619 } 620 if (!next) 621 break; 622 /* 623 * if the next page has been re-activated, start scanning again 624 */ 625 if ((next->flags & PG_INACTIVE) == 0) 626 goto rescan1; 627 } else if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { 628 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 629 vm_page_activate(m); 630 } 631 m = next; 632 } 633 634 /* 635 * now check malloc area or swap processes out if we are in low 636 * memory conditions 637 */ 638 if (cnt.v_free_count <= cnt.v_free_min) { 639 /* 640 * swap out inactive processes 641 */ 642 swapout_threads(); 643 } 644 645 /* 646 * Compute the page shortage. If we are still very low on memory 647 * be sure that we will move a minimal amount of pages from active 648 * to inactive. 649 */ 650 651 page_shortage = cnt.v_inactive_target - 652 (cnt.v_free_count + cnt.v_inactive_count); 653 654 if (page_shortage <= 0) { 655 if (pages_freed == 0) { 656 if( cnt.v_free_count < cnt.v_free_min) { 657 page_shortage = cnt.v_free_min - cnt.v_free_count; 658 } else if(((cnt.v_free_count + cnt.v_inactive_count) < 659 (cnt.v_free_min + cnt.v_inactive_target))) { 660 page_shortage = 1; 661 } else { 662 page_shortage = 0; 663 } 664 } 665 666 } 667 668 maxscan = cnt.v_active_count; 669 m = vm_page_queue_active.tqh_first; 670 while (m && maxscan-- && (page_shortage > 0)) { 671 672 next = m->pageq.tqe_next; 673 674 /* 675 * Don't deactivate pages that are busy. 676 */ 677 if ((m->flags & PG_BUSY) || (m->hold_count != 0)) { 678 m = next; 679 continue; 680 } 681 682 if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { 683 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 684 if (m->act_count < ACT_MAX) 685 m->act_count += ACT_ADVANCE; 686 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 687 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 688 TAILQ_REMOVE(&m->object->memq, m, listq); 689 TAILQ_INSERT_TAIL(&m->object->memq, m, listq); 690 } else { 691 m->act_count -= min(m->act_count, ACT_DECLINE); 692 693 /* 694 * if the page act_count is zero -- then we deactivate 695 */ 696 if (!m->act_count) { 697 vm_page_deactivate(m); 698 --page_shortage; 699 /* 700 * else if on the next go-around we will deactivate the page 701 * we need to place the page on the end of the queue to age 702 * the other pages in memory. 703 */ 704 } else { 705 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 706 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 707 TAILQ_REMOVE(&m->object->memq, m, listq); 708 TAILQ_INSERT_TAIL(&m->object->memq, m, listq); 709 } 710 } 711 m = next; 712 } 713 714 /* 715 * if we have not freed any pages and we are desparate for memory 716 * then we keep trying until we get some (any) memory. 717 */ 718 719 if( !force_wakeup && (swap_pager_full || !force_wakeup || 720 (pages_freed == 0 && (cnt.v_free_count < cnt.v_free_min)))){ 721 vm_pager_sync(); 722 force_wakeup = 1; 723 goto morefree; 724 } 725 vm_page_pagesfreed += pages_freed; 726 return force_wakeup; 727 } 728 729 /* 730 * vm_pageout is the high level pageout daemon. 731 */ 732 void 733 vm_pageout() 734 { 735 extern swiopend; 736 static int nowakeup; 737 (void) spl0(); 738 739 /* 740 * Initialize some paging parameters. 741 */ 742 743 vmretry: 744 cnt.v_free_min = 12; 745 cnt.v_free_reserved = 8; 746 if (cnt.v_free_min < 8) 747 cnt.v_free_min = 8; 748 if (cnt.v_free_min > 32) 749 cnt.v_free_min = 32; 750 vm_pageout_free_min = 4; 751 cnt.v_free_target = 2*cnt.v_free_min + cnt.v_free_reserved; 752 cnt.v_inactive_target = cnt.v_free_count / 12; 753 cnt.v_free_min += cnt.v_free_reserved; 754 755 /* XXX does not really belong here */ 756 if (vm_page_max_wired == 0) 757 vm_page_max_wired = cnt.v_free_count / 3; 758 759 760 (void) swap_pager_alloc(0, 0, 0, 0); 761 762 /* 763 * The pageout daemon is never done, so loop 764 * forever. 765 */ 766 while (TRUE) { 767 int force_wakeup; 768 /* 769 cnt.v_free_min = 12 + averunnable.ldavg[0] / 1024; 770 cnt.v_free_target = 2*cnt.v_free_min + cnt.v_free_reserved; 771 cnt.v_inactive_target = cnt.v_free_target*2; 772 */ 773 774 tsleep((caddr_t) &vm_pages_needed, PVM, "psleep", 0); 775 776 vm_pager_sync(); 777 /* 778 * The force wakeup hack added to eliminate delays and potiential 779 * deadlock. It was possible for the page daemon to indefintely 780 * postpone waking up a process that it might be waiting for memory 781 * on. The putmulti stuff seems to have aggravated the situation. 782 */ 783 force_wakeup = vm_pageout_scan(); 784 vm_pager_sync(); 785 if( force_wakeup) 786 wakeup( (caddr_t) &cnt.v_free_count); 787 cnt.v_scan++; 788 wakeup((caddr_t) kmem_map); 789 } 790 } 791 792