1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $Id: vm_pageout.c,v 1.37 1995/02/22 09:15:32 davidg Exp $ 69 */ 70 71 /* 72 * The proverbial page-out daemon. 73 */ 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/proc.h> 78 #include <sys/resourcevar.h> 79 #include <sys/malloc.h> 80 #include <sys/kernel.h> 81 #include <sys/signalvar.h> 82 83 #include <vm/vm.h> 84 #include <vm/vm_page.h> 85 #include <vm/vm_pageout.h> 86 #include <vm/swap_pager.h> 87 88 extern vm_map_t kmem_map; 89 int vm_pages_needed; /* Event on which pageout daemon sleeps */ 90 int vm_pagescanner; /* Event on which pagescanner sleeps */ 91 92 int vm_pageout_pages_needed = 0;/* flag saying that the pageout daemon needs pages */ 93 int vm_page_pagesfreed; 94 95 extern int npendingio; 96 int vm_pageout_proc_limit; 97 int vm_pageout_req_swapout; 98 int vm_daemon_needed; 99 extern int nswiodone; 100 extern int swap_pager_full; 101 extern int vm_swap_size; 102 extern int swap_pager_ready(); 103 104 #define MAXSCAN 1024 /* maximum number of pages to scan in queues */ 105 106 #define MAXLAUNDER (cnt.v_page_count > 1800 ? 32 : 16) 107 108 #define VM_PAGEOUT_PAGE_COUNT 8 109 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 110 int vm_pageout_req_do_stats; 111 112 int vm_page_max_wired = 0; /* XXX max # of wired pages system-wide */ 113 114 /* 115 * vm_pageout_clean: 116 * cleans a vm_page 117 */ 118 int 119 vm_pageout_clean(m, sync) 120 register vm_page_t m; 121 int sync; 122 { 123 /* 124 * Clean the page and remove it from the laundry. 125 * 126 * We set the busy bit to cause potential page faults on this page to 127 * block. 128 * 129 * And we set pageout-in-progress to keep the object from disappearing 130 * during pageout. This guarantees that the page won't move from the 131 * inactive queue. (However, any other page on the inactive queue may 132 * move!) 133 */ 134 135 register vm_object_t object; 136 register vm_pager_t pager; 137 int pageout_status[VM_PAGEOUT_PAGE_COUNT]; 138 vm_page_t ms[VM_PAGEOUT_PAGE_COUNT]; 139 int pageout_count; 140 int anyok = 0; 141 int i; 142 vm_offset_t offset = m->offset; 143 144 object = m->object; 145 if (!object) { 146 printf("pager: object missing\n"); 147 return 0; 148 } 149 if (!object->pager && (object->flags & OBJ_INTERNAL) == 0) { 150 printf("pager: non internal obj without pager\n"); 151 } 152 /* 153 * Try to collapse the object before making a pager for it. We must 154 * unlock the page queues first. We try to defer the creation of a 155 * pager until all shadows are not paging. This allows 156 * vm_object_collapse to work better and helps control swap space 157 * size. (J. Dyson 11 Nov 93) 158 */ 159 160 if (!object->pager && 161 (cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min) 162 return 0; 163 164 if ((!sync && m->bmapped != 0 && m->hold_count != 0) || 165 ((m->busy != 0) || (m->flags & PG_BUSY))) 166 return 0; 167 168 if (!sync && object->shadow) { 169 vm_object_collapse(object); 170 } 171 pageout_count = 1; 172 ms[0] = m; 173 174 pager = object->pager; 175 if (pager) { 176 for (i = 1; i < vm_pageout_page_count; i++) { 177 vm_page_t mt; 178 179 ms[i] = mt = vm_page_lookup(object, offset + i * NBPG); 180 if (mt) { 181 vm_page_test_dirty(mt); 182 /* 183 * we can cluster ONLY if: ->> the page is NOT 184 * busy, and is NOT clean the page is not 185 * wired, busy, held, or mapped into a buffer. 186 * and one of the following: 1) The page is 187 * inactive, or a seldom used active page. 2) 188 * or we force the issue. 189 */ 190 if ((mt->dirty & mt->valid) != 0 191 && (((mt->flags & (PG_BUSY | PG_INACTIVE)) == PG_INACTIVE) 192 || sync == VM_PAGEOUT_FORCE) 193 && (mt->wire_count == 0) 194 && (mt->busy == 0) 195 && (mt->hold_count == 0) 196 && (mt->bmapped == 0)) 197 pageout_count++; 198 else 199 break; 200 } else 201 break; 202 } 203 /* 204 * we allow reads during pageouts... 205 */ 206 for (i = 0; i < pageout_count; i++) { 207 ms[i]->flags |= PG_BUSY; 208 pmap_page_protect(VM_PAGE_TO_PHYS(ms[i]), VM_PROT_READ); 209 } 210 object->paging_in_progress += pageout_count; 211 } else { 212 213 m->flags |= PG_BUSY; 214 215 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_READ); 216 217 object->paging_in_progress++; 218 219 pager = vm_pager_allocate(PG_DFLT, (caddr_t) 0, 220 object->size, VM_PROT_ALL, 0); 221 if (pager != NULL) { 222 vm_object_setpager(object, pager, 0, FALSE); 223 } 224 } 225 226 /* 227 * If there is no pager for the page, use the default pager. If 228 * there's no place to put the page at the moment, leave it in the 229 * laundry and hope that there will be paging space later. 230 */ 231 232 if ((pager && pager->pg_type == PG_SWAP) || 233 (cnt.v_free_count + cnt.v_cache_count) >= cnt.v_pageout_free_min) { 234 if (pageout_count == 1) { 235 pageout_status[0] = pager ? 236 vm_pager_put(pager, m, 237 ((sync || (object == kernel_object)) ? TRUE : FALSE)) : 238 VM_PAGER_FAIL; 239 } else { 240 if (!pager) { 241 for (i = 0; i < pageout_count; i++) 242 pageout_status[i] = VM_PAGER_FAIL; 243 } else { 244 vm_pager_put_pages(pager, ms, pageout_count, 245 ((sync || (object == kernel_object)) ? TRUE : FALSE), 246 pageout_status); 247 } 248 } 249 } else { 250 for (i = 0; i < pageout_count; i++) 251 pageout_status[i] = VM_PAGER_FAIL; 252 } 253 254 for (i = 0; i < pageout_count; i++) { 255 switch (pageout_status[i]) { 256 case VM_PAGER_OK: 257 ++anyok; 258 break; 259 case VM_PAGER_PEND: 260 ++anyok; 261 break; 262 case VM_PAGER_BAD: 263 /* 264 * Page outside of range of object. Right now we 265 * essentially lose the changes by pretending it 266 * worked. 267 */ 268 pmap_clear_modify(VM_PAGE_TO_PHYS(ms[i])); 269 ms[i]->dirty = 0; 270 break; 271 case VM_PAGER_ERROR: 272 case VM_PAGER_FAIL: 273 /* 274 * If page couldn't be paged out, then reactivate the 275 * page so it doesn't clog the inactive list. (We 276 * will try paging out it again later). 277 */ 278 if (ms[i]->flags & PG_INACTIVE) 279 vm_page_activate(ms[i]); 280 break; 281 case VM_PAGER_AGAIN: 282 break; 283 } 284 285 286 /* 287 * If the operation is still going, leave the page busy to 288 * block all other accesses. Also, leave the paging in 289 * progress indicator set so that we don't attempt an object 290 * collapse. 291 */ 292 if (pageout_status[i] != VM_PAGER_PEND) { 293 if ((--object->paging_in_progress == 0) && 294 (object->flags & OBJ_PIPWNT)) { 295 object->flags &= ~OBJ_PIPWNT; 296 wakeup((caddr_t) object); 297 } 298 if ((ms[i]->flags & (PG_REFERENCED|PG_WANTED)) || 299 pmap_is_referenced(VM_PAGE_TO_PHYS(ms[i]))) { 300 pmap_clear_reference(VM_PAGE_TO_PHYS(ms[i])); 301 ms[i]->flags &= ~PG_REFERENCED; 302 if (ms[i]->flags & PG_INACTIVE) 303 vm_page_activate(ms[i]); 304 } 305 PAGE_WAKEUP(ms[i]); 306 } 307 } 308 return anyok; 309 } 310 311 /* 312 * vm_pageout_object_deactivate_pages 313 * 314 * deactivate enough pages to satisfy the inactive target 315 * requirements or if vm_page_proc_limit is set, then 316 * deactivate all of the pages in the object and its 317 * shadows. 318 * 319 * The object and map must be locked. 320 */ 321 int 322 vm_pageout_object_deactivate_pages(map, object, count, map_remove_only) 323 vm_map_t map; 324 vm_object_t object; 325 int count; 326 int map_remove_only; 327 { 328 register vm_page_t p, next; 329 int rcount; 330 int dcount; 331 332 dcount = 0; 333 if (count == 0) 334 count = 1; 335 336 if (object->pager && (object->pager->pg_type == PG_DEVICE)) 337 return 0; 338 339 if (object->shadow) { 340 if (object->shadow->ref_count == 1) 341 dcount += vm_pageout_object_deactivate_pages(map, object->shadow, count / 2 + 1, map_remove_only); 342 else 343 vm_pageout_object_deactivate_pages(map, object->shadow, count, 1); 344 } 345 if (object->paging_in_progress || !vm_object_lock_try(object)) 346 return dcount; 347 348 /* 349 * scan the objects entire memory queue 350 */ 351 rcount = object->resident_page_count; 352 p = object->memq.tqh_first; 353 while (p && (rcount-- > 0)) { 354 next = p->listq.tqe_next; 355 cnt.v_pdpages++; 356 vm_page_lock_queues(); 357 if (p->wire_count != 0 || 358 p->hold_count != 0 || 359 p->bmapped != 0 || 360 p->busy != 0 || 361 !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) { 362 p = next; 363 continue; 364 } 365 /* 366 * if a page is active, not wired and is in the processes 367 * pmap, then deactivate the page. 368 */ 369 if ((p->flags & (PG_ACTIVE | PG_BUSY)) == PG_ACTIVE) { 370 if (!pmap_is_referenced(VM_PAGE_TO_PHYS(p)) && 371 (p->flags & (PG_REFERENCED|PG_WANTED)) == 0) { 372 p->act_count -= min(p->act_count, ACT_DECLINE); 373 /* 374 * if the page act_count is zero -- then we 375 * deactivate 376 */ 377 if (!p->act_count) { 378 if (!map_remove_only) 379 vm_page_deactivate(p); 380 pmap_page_protect(VM_PAGE_TO_PHYS(p), 381 VM_PROT_NONE); 382 /* 383 * else if on the next go-around we 384 * will deactivate the page we need to 385 * place the page on the end of the 386 * queue to age the other pages in 387 * memory. 388 */ 389 } else { 390 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 391 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 392 TAILQ_REMOVE(&object->memq, p, listq); 393 TAILQ_INSERT_TAIL(&object->memq, p, listq); 394 } 395 /* 396 * see if we are done yet 397 */ 398 if (p->flags & PG_INACTIVE) { 399 --count; 400 ++dcount; 401 if (count <= 0 && 402 cnt.v_inactive_count > cnt.v_inactive_target) { 403 vm_page_unlock_queues(); 404 vm_object_unlock(object); 405 return dcount; 406 } 407 } 408 } else { 409 /* 410 * Move the page to the bottom of the queue. 411 */ 412 pmap_clear_reference(VM_PAGE_TO_PHYS(p)); 413 p->flags &= ~PG_REFERENCED; 414 if (p->act_count < ACT_MAX) 415 p->act_count += ACT_ADVANCE; 416 417 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 418 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 419 TAILQ_REMOVE(&object->memq, p, listq); 420 TAILQ_INSERT_TAIL(&object->memq, p, listq); 421 } 422 } else if ((p->flags & (PG_INACTIVE | PG_BUSY)) == PG_INACTIVE) { 423 pmap_page_protect(VM_PAGE_TO_PHYS(p), 424 VM_PROT_NONE); 425 } 426 vm_page_unlock_queues(); 427 p = next; 428 } 429 vm_object_unlock(object); 430 return dcount; 431 } 432 433 434 /* 435 * deactivate some number of pages in a map, try to do it fairly, but 436 * that is really hard to do. 437 */ 438 439 void 440 vm_pageout_map_deactivate_pages(map, entry, count, freeer) 441 vm_map_t map; 442 vm_map_entry_t entry; 443 int *count; 444 int (*freeer) (vm_map_t, vm_object_t, int); 445 { 446 vm_map_t tmpm; 447 vm_map_entry_t tmpe; 448 vm_object_t obj; 449 450 if (*count <= 0) 451 return; 452 vm_map_reference(map); 453 if (!lock_try_read(&map->lock)) { 454 vm_map_deallocate(map); 455 return; 456 } 457 if (entry == 0) { 458 tmpe = map->header.next; 459 while (tmpe != &map->header && *count > 0) { 460 vm_pageout_map_deactivate_pages(map, tmpe, count, freeer, 0); 461 tmpe = tmpe->next; 462 }; 463 } else if (entry->is_sub_map || entry->is_a_map) { 464 tmpm = entry->object.share_map; 465 tmpe = tmpm->header.next; 466 while (tmpe != &tmpm->header && *count > 0) { 467 vm_pageout_map_deactivate_pages(tmpm, tmpe, count, freeer, 0); 468 tmpe = tmpe->next; 469 }; 470 } else if ((obj = entry->object.vm_object) != 0) { 471 *count -= (*freeer) (map, obj, *count); 472 } 473 lock_read_done(&map->lock); 474 vm_map_deallocate(map); 475 return; 476 } 477 478 void 479 vm_req_vmdaemon() 480 { 481 extern int ticks; 482 static int lastrun = 0; 483 484 if ((ticks > (lastrun + hz / 10)) || (ticks < lastrun)) { 485 wakeup((caddr_t) &vm_daemon_needed); 486 lastrun = ticks; 487 } 488 } 489 490 /* 491 * vm_pageout_scan does the dirty work for the pageout daemon. 492 */ 493 int 494 vm_pageout_scan() 495 { 496 vm_page_t m; 497 int page_shortage, maxscan, maxlaunder; 498 int pages_freed; 499 vm_page_t next; 500 struct proc *p, *bigproc; 501 vm_offset_t size, bigsize; 502 vm_object_t object; 503 int force_wakeup = 0; 504 505 /* calculate the total cached size */ 506 507 if ((cnt.v_inactive_count + cnt.v_free_count + cnt.v_cache_count) < 508 (cnt.v_inactive_target + cnt.v_free_min)) { 509 vm_req_vmdaemon(); 510 } 511 /* 512 * now swap processes out if we are in low memory conditions 513 */ 514 if ((cnt.v_free_count <= cnt.v_free_min) && 515 !swap_pager_full && vm_swap_size && vm_pageout_req_swapout == 0) { 516 vm_pageout_req_swapout = 1; 517 vm_req_vmdaemon(); 518 } 519 pages_freed = 0; 520 521 /* 522 * Start scanning the inactive queue for pages we can free. We keep 523 * scanning until we have enough free pages or we have scanned through 524 * the entire queue. If we encounter dirty pages, we start cleaning 525 * them. 526 */ 527 528 maxlaunder = (cnt.v_inactive_target > MAXLAUNDER) ? 529 MAXLAUNDER : cnt.v_inactive_target; 530 531 rescan1: 532 maxscan = min(cnt.v_inactive_count, MAXSCAN); 533 m = vm_page_queue_inactive.tqh_first; 534 while (m && (maxscan-- > 0) && 535 (cnt.v_cache_count < (cnt.v_cache_min + cnt.v_free_target))) { 536 vm_page_t next; 537 538 cnt.v_pdpages++; 539 next = m->pageq.tqe_next; 540 541 #if defined(VM_DIAGNOSE) 542 if ((m->flags & PG_INACTIVE) == 0) { 543 printf("vm_pageout_scan: page not inactive?\n"); 544 break; 545 } 546 #endif 547 548 /* 549 * dont mess with busy pages 550 */ 551 if (m->hold_count || m->busy || (m->flags & PG_BUSY) || 552 m->bmapped != 0) { 553 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 554 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 555 m = next; 556 continue; 557 } 558 if (((m->flags & PG_REFERENCED) == 0) && 559 pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { 560 m->flags |= PG_REFERENCED; 561 } 562 if (m->object->ref_count == 0) { 563 m->flags &= ~PG_REFERENCED; 564 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 565 } 566 if ((m->flags & (PG_REFERENCED|PG_WANTED)) != 0) { 567 m->flags &= ~PG_REFERENCED; 568 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 569 vm_page_activate(m); 570 if (m->act_count < ACT_MAX) 571 m->act_count += ACT_ADVANCE; 572 m = next; 573 continue; 574 } 575 vm_page_test_dirty(m); 576 577 if ((m->dirty & m->valid) == 0) { 578 if (m->valid == 0) { 579 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); 580 vm_page_free(m); 581 } else { 582 vm_page_cache(m); 583 } 584 } else if (maxlaunder > 0) { 585 int written; 586 587 object = m->object; 588 if ((object->flags & OBJ_DEAD) || !vm_object_lock_try(object)) { 589 m = next; 590 continue; 591 } 592 /* 593 * If a page is dirty, then it is either being washed 594 * (but not yet cleaned) or it is still in the 595 * laundry. If it is still in the laundry, then we 596 * start the cleaning operation. 597 */ 598 written = vm_pageout_clean(m, 0); 599 vm_object_unlock(object); 600 601 if (!next) { 602 break; 603 } 604 maxlaunder -= written; 605 /* 606 * if the next page has been re-activated, start 607 * scanning again 608 */ 609 if ((next->flags & PG_INACTIVE) == 0) { 610 goto rescan1; 611 } 612 } 613 m = next; 614 } 615 616 /* 617 * Compute the page shortage. If we are still very low on memory be 618 * sure that we will move a minimal amount of pages from active to 619 * inactive. 620 */ 621 622 page_shortage = cnt.v_inactive_target - 623 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 624 if (page_shortage <= 0) { 625 if (pages_freed == 0) { 626 page_shortage = cnt.v_free_min - cnt.v_inactive_count; 627 } 628 } 629 maxscan = min(cnt.v_active_count, MAXSCAN); 630 m = vm_page_queue_active.tqh_first; 631 while (m && (maxscan-- > 0) && (page_shortage > 0)) { 632 633 cnt.v_pdpages++; 634 next = m->pageq.tqe_next; 635 636 /* 637 * Don't deactivate pages that are busy. 638 */ 639 if ((m->busy != 0) || 640 (m->flags & PG_BUSY) || 641 (m->hold_count != 0) || 642 (m->bmapped != 0)) { 643 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 644 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 645 m = next; 646 continue; 647 } 648 if (m->object->ref_count && ((m->flags & (PG_REFERENCED|PG_WANTED)) || 649 pmap_is_referenced(VM_PAGE_TO_PHYS(m)))) { 650 int s; 651 652 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 653 m->flags &= ~PG_REFERENCED; 654 if (m->act_count < ACT_MAX) { 655 m->act_count += ACT_ADVANCE; 656 } 657 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 658 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 659 s = splhigh(); 660 TAILQ_REMOVE(&m->object->memq, m, listq); 661 TAILQ_INSERT_TAIL(&m->object->memq, m, listq); 662 splx(s); 663 } else { 664 m->flags &= ~PG_REFERENCED; 665 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 666 m->act_count -= min(m->act_count, ACT_DECLINE); 667 668 /* 669 * if the page act_count is zero -- then we deactivate 670 */ 671 if (!m->act_count && (page_shortage > 0)) { 672 if (m->object->ref_count == 0) { 673 vm_page_test_dirty(m); 674 --page_shortage; 675 if ((m->dirty & m->valid) == 0) { 676 m->act_count = 0; 677 vm_page_cache(m); 678 } else { 679 vm_page_deactivate(m); 680 } 681 } else { 682 vm_page_deactivate(m); 683 --page_shortage; 684 } 685 } else if (m->act_count) { 686 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 687 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 688 } 689 } 690 m = next; 691 } 692 693 /* 694 * We try to maintain some *really* free pages, this allows interrupt 695 * code to be guaranteed space. 696 */ 697 while (cnt.v_free_count < cnt.v_free_reserved) { 698 m = vm_page_queue_cache.tqh_first; 699 if (!m) 700 break; 701 vm_page_free(m); 702 } 703 704 /* 705 * make sure that we have swap space -- if we are low on memory and 706 * swap -- then kill the biggest process. 707 */ 708 if ((vm_swap_size == 0 || swap_pager_full) && 709 ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) { 710 bigproc = NULL; 711 bigsize = 0; 712 for (p = (struct proc *) allproc; p != NULL; p = p->p_next) { 713 /* 714 * if this is a system process, skip it 715 */ 716 if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 717 ((p->p_pid < 48) && (vm_swap_size != 0))) { 718 continue; 719 } 720 /* 721 * if the process is in a non-running type state, 722 * don't touch it. 723 */ 724 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 725 continue; 726 } 727 /* 728 * get the process size 729 */ 730 size = p->p_vmspace->vm_pmap.pm_stats.resident_count; 731 /* 732 * if the this process is bigger than the biggest one 733 * remember it. 734 */ 735 if (size > bigsize) { 736 bigproc = p; 737 bigsize = size; 738 } 739 } 740 if (bigproc != NULL) { 741 printf("Process %lu killed by vm_pageout -- out of swap\n", (u_long) bigproc->p_pid); 742 psignal(bigproc, SIGKILL); 743 bigproc->p_estcpu = 0; 744 bigproc->p_nice = PRIO_MIN; 745 resetpriority(bigproc); 746 wakeup((caddr_t) &cnt.v_free_count); 747 } 748 } 749 vm_page_pagesfreed += pages_freed; 750 return force_wakeup; 751 } 752 753 /* 754 * vm_pageout is the high level pageout daemon. 755 */ 756 void 757 vm_pageout() 758 { 759 (void) spl0(); 760 761 /* 762 * Initialize some paging parameters. 763 */ 764 765 if (cnt.v_page_count > 1024) 766 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 767 else 768 cnt.v_free_min = 4; 769 /* 770 * free_reserved needs to include enough for the largest swap pager 771 * structures plus enough for any pv_entry structs when paging. 772 */ 773 cnt.v_pageout_free_min = 6 + cnt.v_page_count / 1024; 774 cnt.v_free_reserved = cnt.v_pageout_free_min + 2; 775 cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved; 776 cnt.v_free_min += cnt.v_free_reserved; 777 778 if (cnt.v_page_count > 1024) { 779 cnt.v_cache_max = (cnt.v_free_count - 1024) / 2; 780 cnt.v_cache_min = (cnt.v_free_count - 1024) / 8; 781 cnt.v_inactive_target = 2*cnt.v_cache_min + 192; 782 } else { 783 cnt.v_cache_min = 0; 784 cnt.v_cache_max = 0; 785 cnt.v_inactive_target = cnt.v_free_count / 4; 786 } 787 788 /* XXX does not really belong here */ 789 if (vm_page_max_wired == 0) 790 vm_page_max_wired = cnt.v_free_count / 3; 791 792 cnt.v_interrupt_free_min = 2; 793 794 795 (void) swap_pager_alloc(0, 0, 0, 0); 796 /* 797 * The pageout daemon is never done, so loop forever. 798 */ 799 while (TRUE) { 800 tsleep((caddr_t) &vm_pages_needed, PVM, "psleep", 0); 801 cnt.v_pdwakeups++; 802 vm_pager_sync(); 803 vm_pageout_scan(); 804 vm_pager_sync(); 805 wakeup((caddr_t) &cnt.v_free_count); 806 wakeup((caddr_t) kmem_map); 807 } 808 } 809 810 void 811 vm_daemon __P((void)) 812 { 813 vm_object_t object; 814 struct proc *p; 815 816 while (TRUE) { 817 tsleep((caddr_t) &vm_daemon_needed, PUSER, "psleep", 0); 818 swapout_threads(); 819 /* 820 * scan the processes for exceeding their rlimits or if 821 * process is swapped out -- deactivate pages 822 */ 823 824 for (p = (struct proc *) allproc; p != NULL; p = p->p_next) { 825 int overage; 826 quad_t limit; 827 vm_offset_t size; 828 829 /* 830 * if this is a system process or if we have already 831 * looked at this process, skip it. 832 */ 833 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 834 continue; 835 } 836 /* 837 * if the process is in a non-running type state, 838 * don't touch it. 839 */ 840 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 841 continue; 842 } 843 /* 844 * get a limit 845 */ 846 limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 847 p->p_rlimit[RLIMIT_RSS].rlim_max); 848 849 /* 850 * let processes that are swapped out really be 851 * swapped out set the limit to nothing (will force a 852 * swap-out.) 853 */ 854 if ((p->p_flag & P_INMEM) == 0) 855 limit = 0; /* XXX */ 856 857 size = p->p_vmspace->vm_pmap.pm_stats.resident_count * NBPG; 858 if (limit >= 0 && size >= limit) { 859 overage = (size - limit) / NBPG; 860 vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, 861 (vm_map_entry_t) 0, &overage, vm_pageout_object_deactivate_pages); 862 } 863 } 864 } 865 866 /* 867 * we remove cached objects that have no RSS... 868 */ 869 restart: 870 vm_object_cache_lock(); 871 object = vm_object_cached_list.tqh_first; 872 while (object) { 873 vm_object_cache_unlock(); 874 /* 875 * if there are no resident pages -- get rid of the object 876 */ 877 if (object->resident_page_count == 0) { 878 if (object != vm_object_lookup(object->pager)) 879 panic("vm_object_cache_trim: I'm sooo confused."); 880 pager_cache(object, FALSE); 881 goto restart; 882 } 883 object = object->cached_list.tqe_next; 884 vm_object_cache_lock(); 885 } 886 vm_object_cache_unlock(); 887 } 888