1 /*- 2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * Copyright (c) 1994 John S. Dyson 7 * All rights reserved. 8 * Copyright (c) 1994 David Greenman 9 * All rights reserved. 10 * Copyright (c) 2005 Yahoo! Technologies Norway AS 11 * All rights reserved. 12 * 13 * This code is derived from software contributed to Berkeley by 14 * The Mach Operating System project at Carnegie-Mellon University. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. All advertising materials mentioning features or use of this software 25 * must display the following acknowledgement: 26 * This product includes software developed by the University of 27 * California, Berkeley and its contributors. 28 * 4. Neither the name of the University nor the names of its contributors 29 * may be used to endorse or promote products derived from this software 30 * without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 42 * SUCH DAMAGE. 43 * 44 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 45 * 46 * 47 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 48 * All rights reserved. 49 * 50 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 51 * 52 * Permission to use, copy, modify and distribute this software and 53 * its documentation is hereby granted, provided that both the copyright 54 * notice and this permission notice appear in all copies of the 55 * software, derivative works or modified versions, and any portions 56 * thereof, and that both notices appear in supporting documentation. 57 * 58 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 59 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 60 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 61 * 62 * Carnegie Mellon requests users of this software to return to 63 * 64 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 65 * School of Computer Science 66 * Carnegie Mellon University 67 * Pittsburgh PA 15213-3890 68 * 69 * any improvements or extensions that they make and grant Carnegie the 70 * rights to redistribute these changes. 71 */ 72 73 #include <sys/cdefs.h> 74 __FBSDID("$FreeBSD$"); 75 76 #include "opt_kstack_pages.h" 77 #include "opt_kstack_max_pages.h" 78 #include "opt_vm.h" 79 80 #include <sys/param.h> 81 #include <sys/systm.h> 82 #include <sys/limits.h> 83 #include <sys/kernel.h> 84 #include <sys/eventhandler.h> 85 #include <sys/lock.h> 86 #include <sys/mutex.h> 87 #include <sys/proc.h> 88 #include <sys/kthread.h> 89 #include <sys/ktr.h> 90 #include <sys/mount.h> 91 #include <sys/racct.h> 92 #include <sys/resourcevar.h> 93 #include <sys/refcount.h> 94 #include <sys/sched.h> 95 #include <sys/sdt.h> 96 #include <sys/signalvar.h> 97 #include <sys/smp.h> 98 #include <sys/time.h> 99 #include <sys/vnode.h> 100 #include <sys/vmmeter.h> 101 #include <sys/rwlock.h> 102 #include <sys/sx.h> 103 #include <sys/sysctl.h> 104 105 #include <vm/vm.h> 106 #include <vm/vm_param.h> 107 #include <vm/vm_object.h> 108 #include <vm/vm_page.h> 109 #include <vm/vm_map.h> 110 #include <vm/vm_pageout.h> 111 #include <vm/vm_pager.h> 112 #include <vm/vm_phys.h> 113 #include <vm/swap_pager.h> 114 #include <vm/vm_extern.h> 115 #include <vm/uma.h> 116 117 /* the kernel process "vm_daemon" */ 118 static void vm_daemon(void); 119 static struct proc *vmproc; 120 121 static struct kproc_desc vm_kp = { 122 "vmdaemon", 123 vm_daemon, 124 &vmproc 125 }; 126 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp); 127 128 static int vm_swap_enabled = 1; 129 static int vm_swap_idle_enabled = 0; 130 131 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, CTLFLAG_RW, 132 &vm_swap_enabled, 0, 133 "Enable entire process swapout"); 134 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, CTLFLAG_RW, 135 &vm_swap_idle_enabled, 0, 136 "Allow swapout on idle criteria"); 137 138 /* 139 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 140 */ 141 static int swap_idle_threshold1 = 2; 142 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW, 143 &swap_idle_threshold1, 0, 144 "Guaranteed swapped in time for a process"); 145 146 /* 147 * Swap_idle_threshold2 is the time that a process can be idle before 148 * it will be swapped out, if idle swapping is enabled. 149 */ 150 static int swap_idle_threshold2 = 10; 151 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW, 152 &swap_idle_threshold2, 0, 153 "Time before a process will be swapped out"); 154 155 static int vm_pageout_req_swapout; /* XXX */ 156 static int vm_daemon_needed; 157 static struct mtx vm_daemon_mtx; 158 /* Allow for use by vm_pageout before vm_daemon is initialized. */ 159 MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF); 160 161 static int swapped_cnt; 162 static int swap_inprogress; /* Pending swap-ins done outside swapper. */ 163 static int last_swapin; 164 165 static void swapclear(struct proc *); 166 static int swapout(struct proc *); 167 static void vm_swapout_map_deactivate_pages(vm_map_t, long); 168 static void vm_swapout_object_deactivate(pmap_t, vm_object_t, long); 169 static void swapout_procs(int action); 170 static void vm_req_vmdaemon(int req); 171 static void vm_thread_swapout(struct thread *td); 172 173 static void 174 vm_swapout_object_deactivate_page(pmap_t pmap, vm_page_t m, bool unmap) 175 { 176 177 /* 178 * Ignore unreclaimable wired pages. Repeat the check after busying 179 * since a busy holder may wire the page. 180 */ 181 if (vm_page_wired(m) || !vm_page_tryxbusy(m)) 182 return; 183 184 if (vm_page_wired(m) || !pmap_page_exists_quick(pmap, m)) { 185 vm_page_xunbusy(m); 186 return; 187 } 188 if (!pmap_is_referenced(m)) { 189 if (!vm_page_active(m)) 190 (void)vm_page_try_remove_all(m); 191 else if (unmap && vm_page_try_remove_all(m)) 192 vm_page_deactivate(m); 193 } 194 vm_page_xunbusy(m); 195 } 196 197 /* 198 * vm_swapout_object_deactivate 199 * 200 * Deactivate enough pages to satisfy the inactive target 201 * requirements. 202 * 203 * The object and map must be locked. 204 */ 205 static void 206 vm_swapout_object_deactivate(pmap_t pmap, vm_object_t first_object, 207 long desired) 208 { 209 vm_object_t backing_object, object; 210 vm_page_t m; 211 bool unmap; 212 213 VM_OBJECT_ASSERT_LOCKED(first_object); 214 if ((first_object->flags & OBJ_FICTITIOUS) != 0) 215 return; 216 for (object = first_object;; object = backing_object) { 217 if (pmap_resident_count(pmap) <= desired) 218 goto unlock_return; 219 VM_OBJECT_ASSERT_LOCKED(object); 220 if ((object->flags & OBJ_UNMANAGED) != 0 || 221 blockcount_read(&object->paging_in_progress) > 0) 222 goto unlock_return; 223 224 unmap = true; 225 if (object->shadow_count > 1) 226 unmap = false; 227 228 /* 229 * Scan the object's entire memory queue. 230 */ 231 TAILQ_FOREACH(m, &object->memq, listq) { 232 if (pmap_resident_count(pmap) <= desired) 233 goto unlock_return; 234 if (should_yield()) 235 goto unlock_return; 236 vm_swapout_object_deactivate_page(pmap, m, unmap); 237 } 238 if ((backing_object = object->backing_object) == NULL) 239 goto unlock_return; 240 VM_OBJECT_RLOCK(backing_object); 241 if (object != first_object) 242 VM_OBJECT_RUNLOCK(object); 243 } 244 unlock_return: 245 if (object != first_object) 246 VM_OBJECT_RUNLOCK(object); 247 } 248 249 /* 250 * deactivate some number of pages in a map, try to do it fairly, but 251 * that is really hard to do. 252 */ 253 static void 254 vm_swapout_map_deactivate_pages(vm_map_t map, long desired) 255 { 256 vm_map_entry_t tmpe; 257 vm_object_t obj, bigobj; 258 int nothingwired; 259 260 if (!vm_map_trylock_read(map)) 261 return; 262 263 bigobj = NULL; 264 nothingwired = TRUE; 265 266 /* 267 * first, search out the biggest object, and try to free pages from 268 * that. 269 */ 270 VM_MAP_ENTRY_FOREACH(tmpe, map) { 271 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 272 obj = tmpe->object.vm_object; 273 if (obj != NULL && VM_OBJECT_TRYRLOCK(obj)) { 274 if (obj->shadow_count <= 1 && 275 (bigobj == NULL || 276 bigobj->resident_page_count < 277 obj->resident_page_count)) { 278 if (bigobj != NULL) 279 VM_OBJECT_RUNLOCK(bigobj); 280 bigobj = obj; 281 } else 282 VM_OBJECT_RUNLOCK(obj); 283 } 284 } 285 if (tmpe->wired_count > 0) 286 nothingwired = FALSE; 287 } 288 289 if (bigobj != NULL) { 290 vm_swapout_object_deactivate(map->pmap, bigobj, desired); 291 VM_OBJECT_RUNLOCK(bigobj); 292 } 293 /* 294 * Next, hunt around for other pages to deactivate. We actually 295 * do this search sort of wrong -- .text first is not the best idea. 296 */ 297 VM_MAP_ENTRY_FOREACH(tmpe, map) { 298 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 299 break; 300 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 301 obj = tmpe->object.vm_object; 302 if (obj != NULL) { 303 VM_OBJECT_RLOCK(obj); 304 vm_swapout_object_deactivate(map->pmap, obj, 305 desired); 306 VM_OBJECT_RUNLOCK(obj); 307 } 308 } 309 } 310 311 /* 312 * Remove all mappings if a process is swapped out, this will free page 313 * table pages. 314 */ 315 if (desired == 0 && nothingwired) { 316 pmap_remove(vm_map_pmap(map), vm_map_min(map), 317 vm_map_max(map)); 318 } 319 320 vm_map_unlock_read(map); 321 } 322 323 /* 324 * Swap out requests 325 */ 326 #define VM_SWAP_NORMAL 1 327 #define VM_SWAP_IDLE 2 328 329 void 330 vm_swapout_run(void) 331 { 332 333 if (vm_swap_enabled) 334 vm_req_vmdaemon(VM_SWAP_NORMAL); 335 } 336 337 /* 338 * Idle process swapout -- run once per second when pagedaemons are 339 * reclaiming pages. 340 */ 341 void 342 vm_swapout_run_idle(void) 343 { 344 static long lsec; 345 346 if (!vm_swap_idle_enabled || time_second == lsec) 347 return; 348 vm_req_vmdaemon(VM_SWAP_IDLE); 349 lsec = time_second; 350 } 351 352 static void 353 vm_req_vmdaemon(int req) 354 { 355 static int lastrun = 0; 356 357 mtx_lock(&vm_daemon_mtx); 358 vm_pageout_req_swapout |= req; 359 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 360 wakeup(&vm_daemon_needed); 361 lastrun = ticks; 362 } 363 mtx_unlock(&vm_daemon_mtx); 364 } 365 366 static void 367 vm_daemon(void) 368 { 369 struct rlimit rsslim; 370 struct proc *p; 371 struct thread *td; 372 struct vmspace *vm; 373 int breakout, swapout_flags, tryagain, attempts; 374 #ifdef RACCT 375 uint64_t rsize, ravailable; 376 #endif 377 378 while (TRUE) { 379 mtx_lock(&vm_daemon_mtx); 380 msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", 381 #ifdef RACCT 382 racct_enable ? hz : 0 383 #else 384 0 385 #endif 386 ); 387 swapout_flags = vm_pageout_req_swapout; 388 vm_pageout_req_swapout = 0; 389 mtx_unlock(&vm_daemon_mtx); 390 if (swapout_flags != 0) { 391 /* 392 * Drain the per-CPU page queue batches as a deadlock 393 * avoidance measure. 394 */ 395 if ((swapout_flags & VM_SWAP_NORMAL) != 0) 396 vm_page_pqbatch_drain(); 397 swapout_procs(swapout_flags); 398 } 399 400 /* 401 * scan the processes for exceeding their rlimits or if 402 * process is swapped out -- deactivate pages 403 */ 404 tryagain = 0; 405 attempts = 0; 406 again: 407 attempts++; 408 sx_slock(&allproc_lock); 409 FOREACH_PROC_IN_SYSTEM(p) { 410 vm_pindex_t limit, size; 411 412 /* 413 * if this is a system process or if we have already 414 * looked at this process, skip it. 415 */ 416 PROC_LOCK(p); 417 if (p->p_state != PRS_NORMAL || 418 p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) { 419 PROC_UNLOCK(p); 420 continue; 421 } 422 /* 423 * if the process is in a non-running type state, 424 * don't touch it. 425 */ 426 breakout = 0; 427 FOREACH_THREAD_IN_PROC(p, td) { 428 thread_lock(td); 429 if (!TD_ON_RUNQ(td) && 430 !TD_IS_RUNNING(td) && 431 !TD_IS_SLEEPING(td) && 432 !TD_IS_SUSPENDED(td)) { 433 thread_unlock(td); 434 breakout = 1; 435 break; 436 } 437 thread_unlock(td); 438 } 439 if (breakout) { 440 PROC_UNLOCK(p); 441 continue; 442 } 443 /* 444 * get a limit 445 */ 446 lim_rlimit_proc(p, RLIMIT_RSS, &rsslim); 447 limit = OFF_TO_IDX( 448 qmin(rsslim.rlim_cur, rsslim.rlim_max)); 449 450 /* 451 * let processes that are swapped out really be 452 * swapped out set the limit to nothing (will force a 453 * swap-out.) 454 */ 455 if ((p->p_flag & P_INMEM) == 0) 456 limit = 0; /* XXX */ 457 vm = vmspace_acquire_ref(p); 458 _PHOLD_LITE(p); 459 PROC_UNLOCK(p); 460 if (vm == NULL) { 461 PRELE(p); 462 continue; 463 } 464 sx_sunlock(&allproc_lock); 465 466 size = vmspace_resident_count(vm); 467 if (size >= limit) { 468 vm_swapout_map_deactivate_pages( 469 &vm->vm_map, limit); 470 size = vmspace_resident_count(vm); 471 } 472 #ifdef RACCT 473 if (racct_enable) { 474 rsize = IDX_TO_OFF(size); 475 PROC_LOCK(p); 476 if (p->p_state == PRS_NORMAL) 477 racct_set(p, RACCT_RSS, rsize); 478 ravailable = racct_get_available(p, RACCT_RSS); 479 PROC_UNLOCK(p); 480 if (rsize > ravailable) { 481 /* 482 * Don't be overly aggressive; this 483 * might be an innocent process, 484 * and the limit could've been exceeded 485 * by some memory hog. Don't try 486 * to deactivate more than 1/4th 487 * of process' resident set size. 488 */ 489 if (attempts <= 8) { 490 if (ravailable < rsize - 491 (rsize / 4)) { 492 ravailable = rsize - 493 (rsize / 4); 494 } 495 } 496 vm_swapout_map_deactivate_pages( 497 &vm->vm_map, 498 OFF_TO_IDX(ravailable)); 499 /* Update RSS usage after paging out. */ 500 size = vmspace_resident_count(vm); 501 rsize = IDX_TO_OFF(size); 502 PROC_LOCK(p); 503 if (p->p_state == PRS_NORMAL) 504 racct_set(p, RACCT_RSS, rsize); 505 PROC_UNLOCK(p); 506 if (rsize > ravailable) 507 tryagain = 1; 508 } 509 } 510 #endif 511 vmspace_free(vm); 512 sx_slock(&allproc_lock); 513 PRELE(p); 514 } 515 sx_sunlock(&allproc_lock); 516 if (tryagain != 0 && attempts <= 10) { 517 maybe_yield(); 518 goto again; 519 } 520 } 521 } 522 523 /* 524 * Allow a thread's kernel stack to be paged out. 525 */ 526 static void 527 vm_thread_swapout(struct thread *td) 528 { 529 vm_object_t ksobj; 530 vm_page_t m; 531 int i, pages; 532 533 cpu_thread_swapout(td); 534 pages = td->td_kstack_pages; 535 ksobj = td->td_kstack_obj; 536 pmap_qremove(td->td_kstack, pages); 537 VM_OBJECT_WLOCK(ksobj); 538 for (i = 0; i < pages; i++) { 539 m = vm_page_lookup(ksobj, i); 540 if (m == NULL) 541 panic("vm_thread_swapout: kstack already missing?"); 542 vm_page_dirty(m); 543 vm_page_xunbusy_unchecked(m); 544 vm_page_unwire(m, PQ_LAUNDRY); 545 } 546 VM_OBJECT_WUNLOCK(ksobj); 547 } 548 549 /* 550 * Bring the kernel stack for a specified thread back in. 551 */ 552 static void 553 vm_thread_swapin(struct thread *td, int oom_alloc) 554 { 555 vm_object_t ksobj; 556 vm_page_t ma[KSTACK_MAX_PAGES]; 557 int a, count, i, j, pages, rv; 558 559 pages = td->td_kstack_pages; 560 ksobj = td->td_kstack_obj; 561 VM_OBJECT_WLOCK(ksobj); 562 (void)vm_page_grab_pages(ksobj, 0, oom_alloc | VM_ALLOC_WIRED, ma, 563 pages); 564 VM_OBJECT_WUNLOCK(ksobj); 565 for (i = 0; i < pages;) { 566 vm_page_assert_xbusied(ma[i]); 567 if (vm_page_all_valid(ma[i])) { 568 i++; 569 continue; 570 } 571 vm_object_pip_add(ksobj, 1); 572 for (j = i + 1; j < pages; j++) 573 if (vm_page_all_valid(ma[j])) 574 break; 575 VM_OBJECT_WLOCK(ksobj); 576 rv = vm_pager_has_page(ksobj, ma[i]->pindex, NULL, &a); 577 VM_OBJECT_WUNLOCK(ksobj); 578 KASSERT(rv == 1, ("%s: missing page %p", __func__, ma[i])); 579 count = min(a + 1, j - i); 580 rv = vm_pager_get_pages(ksobj, ma + i, count, NULL, NULL); 581 KASSERT(rv == VM_PAGER_OK, ("%s: cannot get kstack for proc %d", 582 __func__, td->td_proc->p_pid)); 583 vm_object_pip_wakeup(ksobj); 584 i += count; 585 } 586 pmap_qenter(td->td_kstack, ma, pages); 587 cpu_thread_swapin(td); 588 } 589 590 void 591 faultin(struct proc *p) 592 { 593 struct thread *td; 594 int oom_alloc; 595 596 PROC_LOCK_ASSERT(p, MA_OWNED); 597 598 /* 599 * If another process is swapping in this process, 600 * just wait until it finishes. 601 */ 602 if (p->p_flag & P_SWAPPINGIN) { 603 while (p->p_flag & P_SWAPPINGIN) 604 msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0); 605 return; 606 } 607 608 if ((p->p_flag & P_INMEM) == 0) { 609 oom_alloc = (p->p_flag & P_WKILLED) != 0 ? VM_ALLOC_SYSTEM : 610 VM_ALLOC_NORMAL; 611 612 /* 613 * Don't let another thread swap process p out while we are 614 * busy swapping it in. 615 */ 616 ++p->p_lock; 617 p->p_flag |= P_SWAPPINGIN; 618 PROC_UNLOCK(p); 619 sx_xlock(&allproc_lock); 620 MPASS(swapped_cnt > 0); 621 swapped_cnt--; 622 if (curthread != &thread0) 623 swap_inprogress++; 624 sx_xunlock(&allproc_lock); 625 626 /* 627 * We hold no lock here because the list of threads 628 * can not change while all threads in the process are 629 * swapped out. 630 */ 631 FOREACH_THREAD_IN_PROC(p, td) 632 vm_thread_swapin(td, oom_alloc); 633 634 if (curthread != &thread0) { 635 sx_xlock(&allproc_lock); 636 MPASS(swap_inprogress > 0); 637 swap_inprogress--; 638 last_swapin = ticks; 639 sx_xunlock(&allproc_lock); 640 } 641 PROC_LOCK(p); 642 swapclear(p); 643 p->p_swtick = ticks; 644 645 /* Allow other threads to swap p out now. */ 646 wakeup(&p->p_flag); 647 --p->p_lock; 648 } 649 } 650 651 /* 652 * This swapin algorithm attempts to swap-in processes only if there 653 * is enough space for them. Of course, if a process waits for a long 654 * time, it will be swapped in anyway. 655 */ 656 657 static struct proc * 658 swapper_selector(bool wkilled_only) 659 { 660 struct proc *p, *res; 661 struct thread *td; 662 int ppri, pri, slptime, swtime; 663 664 sx_assert(&allproc_lock, SA_SLOCKED); 665 if (swapped_cnt == 0) 666 return (NULL); 667 res = NULL; 668 ppri = INT_MIN; 669 FOREACH_PROC_IN_SYSTEM(p) { 670 PROC_LOCK(p); 671 if (p->p_state == PRS_NEW || (p->p_flag & (P_SWAPPINGOUT | 672 P_SWAPPINGIN | P_INMEM)) != 0) { 673 PROC_UNLOCK(p); 674 continue; 675 } 676 if (p->p_state == PRS_NORMAL && (p->p_flag & P_WKILLED) != 0) { 677 /* 678 * A swapped-out process might have mapped a 679 * large portion of the system's pages as 680 * anonymous memory. There is no other way to 681 * release the memory other than to kill the 682 * process, for which we need to swap it in. 683 */ 684 return (p); 685 } 686 if (wkilled_only) { 687 PROC_UNLOCK(p); 688 continue; 689 } 690 swtime = (ticks - p->p_swtick) / hz; 691 FOREACH_THREAD_IN_PROC(p, td) { 692 /* 693 * An otherwise runnable thread of a process 694 * swapped out has only the TDI_SWAPPED bit set. 695 */ 696 thread_lock(td); 697 if (td->td_inhibitors == TDI_SWAPPED) { 698 slptime = (ticks - td->td_slptick) / hz; 699 pri = swtime + slptime; 700 if ((td->td_flags & TDF_SWAPINREQ) == 0) 701 pri -= p->p_nice * 8; 702 /* 703 * if this thread is higher priority 704 * and there is enough space, then select 705 * this process instead of the previous 706 * selection. 707 */ 708 if (pri > ppri) { 709 res = p; 710 ppri = pri; 711 } 712 } 713 thread_unlock(td); 714 } 715 PROC_UNLOCK(p); 716 } 717 718 if (res != NULL) 719 PROC_LOCK(res); 720 return (res); 721 } 722 723 #define SWAPIN_INTERVAL (MAXSLP * hz / 2) 724 725 /* 726 * Limit swapper to swap in one non-WKILLED process in MAXSLP/2 727 * interval, assuming that there is: 728 * - at least one domain that is not suffering from a shortage of free memory; 729 * - no parallel swap-ins; 730 * - no other swap-ins in the current SWAPIN_INTERVAL. 731 */ 732 static bool 733 swapper_wkilled_only(void) 734 { 735 736 return (vm_page_count_min_set(&all_domains) || swap_inprogress > 0 || 737 (u_int)(ticks - last_swapin) < SWAPIN_INTERVAL); 738 } 739 740 void 741 swapper(void) 742 { 743 struct proc *p; 744 745 for (;;) { 746 sx_slock(&allproc_lock); 747 p = swapper_selector(swapper_wkilled_only()); 748 sx_sunlock(&allproc_lock); 749 750 if (p == NULL) { 751 tsleep(&proc0, PVM, "swapin", SWAPIN_INTERVAL); 752 } else { 753 PROC_LOCK_ASSERT(p, MA_OWNED); 754 755 /* 756 * Another process may be bringing or may have 757 * already brought this process in while we 758 * traverse all threads. Or, this process may 759 * have exited or even being swapped out 760 * again. 761 */ 762 if (p->p_state == PRS_NORMAL && (p->p_flag & (P_INMEM | 763 P_SWAPPINGOUT | P_SWAPPINGIN)) == 0) { 764 faultin(p); 765 } 766 PROC_UNLOCK(p); 767 } 768 } 769 } 770 771 /* 772 * First, if any processes have been sleeping or stopped for at least 773 * "swap_idle_threshold1" seconds, they are swapped out. If, however, 774 * no such processes exist, then the longest-sleeping or stopped 775 * process is swapped out. Finally, and only as a last resort, if 776 * there are no sleeping or stopped processes, the longest-resident 777 * process is swapped out. 778 */ 779 static void 780 swapout_procs(int action) 781 { 782 struct proc *p; 783 struct thread *td; 784 int slptime; 785 bool didswap, doswap; 786 787 MPASS((action & (VM_SWAP_NORMAL | VM_SWAP_IDLE)) != 0); 788 789 didswap = false; 790 sx_slock(&allproc_lock); 791 FOREACH_PROC_IN_SYSTEM(p) { 792 /* 793 * Filter out not yet fully constructed processes. Do 794 * not swap out held processes. Avoid processes which 795 * are system, exiting, execing, traced, already swapped 796 * out or are in the process of being swapped in or out. 797 */ 798 PROC_LOCK(p); 799 if (p->p_state != PRS_NORMAL || p->p_lock != 0 || (p->p_flag & 800 (P_SYSTEM | P_WEXIT | P_INEXEC | P_STOPPED_SINGLE | 801 P_TRACED | P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) != 802 P_INMEM) { 803 PROC_UNLOCK(p); 804 continue; 805 } 806 807 /* 808 * Further consideration of this process for swap out 809 * requires iterating over its threads. We release 810 * allproc_lock here so that process creation and 811 * destruction are not blocked while we iterate. 812 * 813 * To later reacquire allproc_lock and resume 814 * iteration over the allproc list, we will first have 815 * to release the lock on the process. We place a 816 * hold on the process so that it remains in the 817 * allproc list while it is unlocked. 818 */ 819 _PHOLD_LITE(p); 820 sx_sunlock(&allproc_lock); 821 822 /* 823 * Do not swapout a realtime process. 824 * Guarantee swap_idle_threshold1 time in memory. 825 * If the system is under memory stress, or if we are 826 * swapping idle processes >= swap_idle_threshold2, 827 * then swap the process out. 828 */ 829 doswap = true; 830 FOREACH_THREAD_IN_PROC(p, td) { 831 thread_lock(td); 832 slptime = (ticks - td->td_slptick) / hz; 833 if (PRI_IS_REALTIME(td->td_pri_class) || 834 slptime < swap_idle_threshold1 || 835 !thread_safetoswapout(td) || 836 ((action & VM_SWAP_NORMAL) == 0 && 837 slptime < swap_idle_threshold2)) 838 doswap = false; 839 thread_unlock(td); 840 if (!doswap) 841 break; 842 } 843 if (doswap && swapout(p) == 0) 844 didswap = true; 845 846 PROC_UNLOCK(p); 847 if (didswap) { 848 sx_xlock(&allproc_lock); 849 swapped_cnt++; 850 sx_downgrade(&allproc_lock); 851 } else 852 sx_slock(&allproc_lock); 853 PRELE(p); 854 } 855 sx_sunlock(&allproc_lock); 856 857 /* 858 * If we swapped something out, and another process needed memory, 859 * then wakeup the sched process. 860 */ 861 if (didswap) 862 wakeup(&proc0); 863 } 864 865 static void 866 swapclear(struct proc *p) 867 { 868 struct thread *td; 869 870 PROC_LOCK_ASSERT(p, MA_OWNED); 871 872 FOREACH_THREAD_IN_PROC(p, td) { 873 thread_lock(td); 874 td->td_flags |= TDF_INMEM; 875 td->td_flags &= ~TDF_SWAPINREQ; 876 TD_CLR_SWAPPED(td); 877 if (TD_CAN_RUN(td)) { 878 if (setrunnable(td, 0)) { 879 #ifdef INVARIANTS 880 /* 881 * XXX: We just cleared TDI_SWAPPED 882 * above and set TDF_INMEM, so this 883 * should never happen. 884 */ 885 panic("not waking up swapper"); 886 #endif 887 } 888 } else 889 thread_unlock(td); 890 } 891 p->p_flag &= ~(P_SWAPPINGIN | P_SWAPPINGOUT); 892 p->p_flag |= P_INMEM; 893 } 894 895 static int 896 swapout(struct proc *p) 897 { 898 struct thread *td; 899 900 PROC_LOCK_ASSERT(p, MA_OWNED); 901 902 /* 903 * The states of this process and its threads may have changed 904 * by now. Assuming that there is only one pageout daemon thread, 905 * this process should still be in memory. 906 */ 907 KASSERT((p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) == 908 P_INMEM, ("swapout: lost a swapout race?")); 909 910 /* 911 * Remember the resident count. 912 */ 913 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 914 915 /* 916 * Check and mark all threads before we proceed. 917 */ 918 p->p_flag &= ~P_INMEM; 919 p->p_flag |= P_SWAPPINGOUT; 920 FOREACH_THREAD_IN_PROC(p, td) { 921 thread_lock(td); 922 if (!thread_safetoswapout(td)) { 923 thread_unlock(td); 924 swapclear(p); 925 return (EBUSY); 926 } 927 td->td_flags &= ~TDF_INMEM; 928 TD_SET_SWAPPED(td); 929 thread_unlock(td); 930 } 931 td = FIRST_THREAD_IN_PROC(p); 932 ++td->td_ru.ru_nswap; 933 PROC_UNLOCK(p); 934 935 /* 936 * This list is stable because all threads are now prevented from 937 * running. The list is only modified in the context of a running 938 * thread in this process. 939 */ 940 FOREACH_THREAD_IN_PROC(p, td) 941 vm_thread_swapout(td); 942 943 PROC_LOCK(p); 944 p->p_flag &= ~P_SWAPPINGOUT; 945 p->p_swtick = ticks; 946 return (0); 947 } 948