1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 * 62 * $FreeBSD$ 63 */ 64 65 #include "opt_vm.h" 66 67 #include <sys/param.h> 68 #include <sys/systm.h> 69 #include <sys/limits.h> 70 #include <sys/lock.h> 71 #include <sys/mutex.h> 72 #include <sys/proc.h> 73 #include <sys/resourcevar.h> 74 #include <sys/shm.h> 75 #include <sys/vmmeter.h> 76 #include <sys/sx.h> 77 #include <sys/sysctl.h> 78 79 #include <sys/kernel.h> 80 #include <sys/ktr.h> 81 #include <sys/unistd.h> 82 83 #include <vm/vm.h> 84 #include <vm/vm_param.h> 85 #include <vm/pmap.h> 86 #include <vm/vm_map.h> 87 #include <vm/vm_page.h> 88 #include <vm/vm_pageout.h> 89 #include <vm/vm_object.h> 90 #include <vm/vm_kern.h> 91 #include <vm/vm_extern.h> 92 #include <vm/vm_pager.h> 93 #include <vm/swap_pager.h> 94 95 #include <sys/user.h> 96 97 extern int maxslp; 98 99 /* 100 * System initialization 101 * 102 * Note: proc0 from proc.h 103 */ 104 static void vm_init_limits(void *); 105 SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0) 106 107 /* 108 * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 109 * 110 * Note: run scheduling should be divorced from the vm system. 111 */ 112 static void scheduler(void *); 113 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL) 114 115 #ifndef NO_SWAPPING 116 static void swapout(struct proc *); 117 static void vm_proc_swapin(struct proc *p); 118 static void vm_proc_swapout(struct proc *p); 119 #endif 120 121 /* 122 * MPSAFE 123 * 124 * WARNING! This code calls vm_map_check_protection() which only checks 125 * the associated vm_map_entry range. It does not determine whether the 126 * contents of the memory is actually readable or writable. In most cases 127 * just checking the vm_map_entry is sufficient within the kernel's address 128 * space. 129 */ 130 int 131 kernacc(addr, len, rw) 132 void *addr; 133 int len, rw; 134 { 135 boolean_t rv; 136 vm_offset_t saddr, eaddr; 137 vm_prot_t prot; 138 139 KASSERT((rw & ~VM_PROT_ALL) == 0, 140 ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 141 prot = rw; 142 saddr = trunc_page((vm_offset_t)addr); 143 eaddr = round_page((vm_offset_t)addr + len); 144 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 145 return (rv == TRUE); 146 } 147 148 /* 149 * MPSAFE 150 * 151 * WARNING! This code calls vm_map_check_protection() which only checks 152 * the associated vm_map_entry range. It does not determine whether the 153 * contents of the memory is actually readable or writable. vmapbuf(), 154 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be 155 * used in conjuction with this call. 156 */ 157 int 158 useracc(addr, len, rw) 159 void *addr; 160 int len, rw; 161 { 162 boolean_t rv; 163 vm_prot_t prot; 164 vm_map_t map; 165 166 KASSERT((rw & ~VM_PROT_ALL) == 0, 167 ("illegal ``rw'' argument to useracc (%x)\n", rw)); 168 prot = rw; 169 map = &curproc->p_vmspace->vm_map; 170 if ((vm_offset_t)addr + len > vm_map_max(map) || 171 (vm_offset_t)addr + len < (vm_offset_t)addr) { 172 return (FALSE); 173 } 174 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 175 round_page((vm_offset_t)addr + len), prot); 176 return (rv == TRUE); 177 } 178 179 /* 180 * MPSAFE 181 */ 182 void 183 vslock(addr, len) 184 void *addr; 185 u_int len; 186 { 187 188 vm_map_wire(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr), 189 round_page((vm_offset_t)addr + len), FALSE); 190 } 191 192 /* 193 * MPSAFE 194 */ 195 void 196 vsunlock(addr, len) 197 void *addr; 198 u_int len; 199 { 200 201 vm_map_unwire(&curproc->p_vmspace->vm_map, 202 trunc_page((vm_offset_t)addr), 203 round_page((vm_offset_t)addr + len), FALSE); 204 } 205 206 /* 207 * Create the U area for a new process. 208 * This routine directly affects the fork perf for a process. 209 */ 210 void 211 vm_proc_new(struct proc *p) 212 { 213 vm_page_t ma[UAREA_PAGES]; 214 vm_object_t upobj; 215 vm_offset_t up; 216 vm_page_t m; 217 u_int i; 218 219 /* 220 * Allocate object for the upage. 221 */ 222 upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES); 223 p->p_upages_obj = upobj; 224 225 /* 226 * Get a kernel virtual address for the U area for this process. 227 */ 228 up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE); 229 if (up == 0) 230 panic("vm_proc_new: upage allocation failed"); 231 p->p_uarea = (struct user *)up; 232 233 for (i = 0; i < UAREA_PAGES; i++) { 234 /* 235 * Get a uarea page. 236 */ 237 m = vm_page_grab(upobj, i, 238 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 239 ma[i] = m; 240 241 vm_page_lock_queues(); 242 vm_page_wakeup(m); 243 vm_page_flag_clear(m, PG_ZERO); 244 m->valid = VM_PAGE_BITS_ALL; 245 vm_page_unlock_queues(); 246 } 247 248 /* 249 * Enter the pages into the kernel address space. 250 */ 251 pmap_qenter(up, ma, UAREA_PAGES); 252 } 253 254 /* 255 * Dispose the U area for a process that has exited. 256 * This routine directly impacts the exit perf of a process. 257 * XXX proc_zone is marked UMA_ZONE_NOFREE, so this should never be called. 258 */ 259 void 260 vm_proc_dispose(struct proc *p) 261 { 262 vm_object_t upobj; 263 vm_offset_t up; 264 vm_page_t m; 265 266 upobj = p->p_upages_obj; 267 VM_OBJECT_LOCK(upobj); 268 if (upobj->resident_page_count != UAREA_PAGES) 269 panic("vm_proc_dispose: incorrect number of pages in upobj"); 270 vm_page_lock_queues(); 271 while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) { 272 vm_page_busy(m); 273 vm_page_unwire(m, 0); 274 vm_page_free(m); 275 } 276 vm_page_unlock_queues(); 277 VM_OBJECT_UNLOCK(upobj); 278 up = (vm_offset_t)p->p_uarea; 279 pmap_qremove(up, UAREA_PAGES); 280 kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE); 281 vm_object_deallocate(upobj); 282 } 283 284 #ifndef NO_SWAPPING 285 /* 286 * Allow the U area for a process to be prejudicially paged out. 287 */ 288 static void 289 vm_proc_swapout(struct proc *p) 290 { 291 vm_object_t upobj; 292 vm_offset_t up; 293 vm_page_t m; 294 295 upobj = p->p_upages_obj; 296 VM_OBJECT_LOCK(upobj); 297 if (upobj->resident_page_count != UAREA_PAGES) 298 panic("vm_proc_dispose: incorrect number of pages in upobj"); 299 vm_page_lock_queues(); 300 TAILQ_FOREACH(m, &upobj->memq, listq) { 301 vm_page_dirty(m); 302 vm_page_unwire(m, 0); 303 } 304 vm_page_unlock_queues(); 305 VM_OBJECT_UNLOCK(upobj); 306 up = (vm_offset_t)p->p_uarea; 307 pmap_qremove(up, UAREA_PAGES); 308 } 309 310 /* 311 * Bring the U area for a specified process back in. 312 */ 313 static void 314 vm_proc_swapin(struct proc *p) 315 { 316 vm_page_t ma[UAREA_PAGES]; 317 vm_object_t upobj; 318 vm_offset_t up; 319 vm_page_t m; 320 int rv; 321 int i; 322 323 upobj = p->p_upages_obj; 324 for (i = 0; i < UAREA_PAGES; i++) { 325 m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 326 if (m->valid != VM_PAGE_BITS_ALL) { 327 rv = vm_pager_get_pages(upobj, &m, 1, 0); 328 if (rv != VM_PAGER_OK) 329 panic("vm_proc_swapin: cannot get upage"); 330 } 331 ma[i] = m; 332 } 333 VM_OBJECT_LOCK(upobj); 334 if (upobj->resident_page_count != UAREA_PAGES) 335 panic("vm_proc_swapin: lost pages from upobj"); 336 vm_page_lock_queues(); 337 TAILQ_FOREACH(m, &upobj->memq, listq) { 338 m->valid = VM_PAGE_BITS_ALL; 339 vm_page_wire(m); 340 vm_page_wakeup(m); 341 } 342 vm_page_unlock_queues(); 343 VM_OBJECT_UNLOCK(upobj); 344 up = (vm_offset_t)p->p_uarea; 345 pmap_qenter(up, ma, UAREA_PAGES); 346 } 347 348 /* 349 * Swap in the UAREAs of all processes swapped out to the given device. 350 * The pages in the UAREA are marked dirty and their swap metadata is freed. 351 */ 352 void 353 vm_proc_swapin_all(int devidx) 354 { 355 struct proc *p; 356 vm_object_t object; 357 vm_page_t m; 358 359 retry: 360 sx_slock(&allproc_lock); 361 FOREACH_PROC_IN_SYSTEM(p) { 362 PROC_LOCK(p); 363 object = p->p_upages_obj; 364 if (object != NULL) { 365 VM_OBJECT_LOCK(object); 366 if (swap_pager_isswapped(object, devidx)) { 367 VM_OBJECT_UNLOCK(object); 368 sx_sunlock(&allproc_lock); 369 faultin(p); 370 PROC_UNLOCK(p); 371 VM_OBJECT_LOCK(object); 372 vm_page_lock_queues(); 373 TAILQ_FOREACH(m, &object->memq, listq) 374 vm_page_dirty(m); 375 vm_page_unlock_queues(); 376 swap_pager_freespace(object, 0, 377 object->un_pager.swp.swp_bcount); 378 VM_OBJECT_UNLOCK(object); 379 goto retry; 380 } 381 VM_OBJECT_UNLOCK(object); 382 } 383 PROC_UNLOCK(p); 384 } 385 sx_sunlock(&allproc_lock); 386 } 387 #endif 388 389 /* 390 * Implement fork's actions on an address space. 391 * Here we arrange for the address space to be copied or referenced, 392 * allocate a user struct (pcb and kernel stack), then call the 393 * machine-dependent layer to fill those in and make the new process 394 * ready to run. The new process is set up so that it returns directly 395 * to user mode to avoid stack copying and relocation problems. 396 */ 397 void 398 vm_forkproc(td, p2, td2, flags) 399 struct thread *td; 400 struct proc *p2; 401 struct thread *td2; 402 int flags; 403 { 404 struct proc *p1 = td->td_proc; 405 struct user *up; 406 407 GIANT_REQUIRED; 408 409 if ((flags & RFPROC) == 0) { 410 /* 411 * Divorce the memory, if it is shared, essentially 412 * this changes shared memory amongst threads, into 413 * COW locally. 414 */ 415 if ((flags & RFMEM) == 0) { 416 if (p1->p_vmspace->vm_refcnt > 1) { 417 vmspace_unshare(p1); 418 } 419 } 420 cpu_fork(td, p2, td2, flags); 421 return; 422 } 423 424 if (flags & RFMEM) { 425 p2->p_vmspace = p1->p_vmspace; 426 p1->p_vmspace->vm_refcnt++; 427 } 428 429 while (vm_page_count_severe()) { 430 VM_WAIT; 431 } 432 433 if ((flags & RFMEM) == 0) { 434 p2->p_vmspace = vmspace_fork(p1->p_vmspace); 435 436 pmap_pinit2(vmspace_pmap(p2->p_vmspace)); 437 438 if (p1->p_vmspace->vm_shm) 439 shmfork(p1, p2); 440 } 441 442 /* XXXKSE this is unsatisfactory but should be adequate */ 443 up = p2->p_uarea; 444 445 /* 446 * p_stats currently points at fields in the user struct 447 * but not at &u, instead at p_addr. Copy parts of 448 * p_stats; zero the rest of p_stats (statistics). 449 * 450 * If procsig->ps_refcnt is 1 and p2->p_sigacts is NULL we dont' need 451 * to share sigacts, so we use the up->u_sigacts. 452 */ 453 p2->p_stats = &up->u_stats; 454 if (p2->p_sigacts == NULL) { 455 if (p2->p_procsig->ps_refcnt != 1) 456 printf ("PID:%d NULL sigacts with refcnt not 1!\n",p2->p_pid); 457 p2->p_sigacts = &up->u_sigacts; 458 up->u_sigacts = *p1->p_sigacts; 459 } 460 461 bzero(&up->u_stats.pstat_startzero, 462 (unsigned) ((caddr_t) &up->u_stats.pstat_endzero - 463 (caddr_t) &up->u_stats.pstat_startzero)); 464 bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy, 465 ((caddr_t) &up->u_stats.pstat_endcopy - 466 (caddr_t) &up->u_stats.pstat_startcopy)); 467 468 469 /* 470 * cpu_fork will copy and update the pcb, set up the kernel stack, 471 * and make the child ready to run. 472 */ 473 cpu_fork(td, p2, td2, flags); 474 } 475 476 /* 477 * Called after process has been wait(2)'ed apon and is being reaped. 478 * The idea is to reclaim resources that we could not reclaim while 479 * the process was still executing. 480 */ 481 void 482 vm_waitproc(p) 483 struct proc *p; 484 { 485 486 GIANT_REQUIRED; 487 cpu_wait(p); 488 vmspace_exitfree(p); /* and clean-out the vmspace */ 489 } 490 491 /* 492 * Set default limits for VM system. 493 * Called for proc 0, and then inherited by all others. 494 * 495 * XXX should probably act directly on proc0. 496 */ 497 static void 498 vm_init_limits(udata) 499 void *udata; 500 { 501 struct proc *p = udata; 502 int rss_limit; 503 504 /* 505 * Set up the initial limits on process VM. Set the maximum resident 506 * set size to be half of (reasonably) available memory. Since this 507 * is a soft limit, it comes into effect only when the system is out 508 * of memory - half of main memory helps to favor smaller processes, 509 * and reduces thrashing of the object cache. 510 */ 511 p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; 512 p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz; 513 p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; 514 p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; 515 /* limit the limit to no less than 2MB */ 516 rss_limit = max(cnt.v_free_count, 512); 517 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 518 p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 519 } 520 521 void 522 faultin(p) 523 struct proc *p; 524 { 525 #ifdef NO_SWAPPING 526 527 PROC_LOCK_ASSERT(p, MA_OWNED); 528 if ((p->p_sflag & PS_INMEM) == 0) 529 panic("faultin: proc swapped out with NO_SWAPPING!"); 530 #else /* !NO_SWAPPING */ 531 struct thread *td; 532 533 GIANT_REQUIRED; 534 PROC_LOCK_ASSERT(p, MA_OWNED); 535 /* 536 * If another process is swapping in this process, 537 * just wait until it finishes. 538 */ 539 if (p->p_sflag & PS_SWAPPINGIN) 540 msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0); 541 else if ((p->p_sflag & PS_INMEM) == 0) { 542 /* 543 * Don't let another thread swap process p out while we are 544 * busy swapping it in. 545 */ 546 ++p->p_lock; 547 mtx_lock_spin(&sched_lock); 548 p->p_sflag |= PS_SWAPPINGIN; 549 mtx_unlock_spin(&sched_lock); 550 PROC_UNLOCK(p); 551 552 vm_proc_swapin(p); 553 FOREACH_THREAD_IN_PROC(p, td) 554 pmap_swapin_thread(td); 555 556 PROC_LOCK(p); 557 mtx_lock_spin(&sched_lock); 558 p->p_sflag &= ~PS_SWAPPINGIN; 559 p->p_sflag |= PS_INMEM; 560 FOREACH_THREAD_IN_PROC(p, td) { 561 TD_CLR_SWAPPED(td); 562 if (TD_CAN_RUN(td)) 563 setrunnable(td); 564 } 565 mtx_unlock_spin(&sched_lock); 566 567 wakeup(&p->p_sflag); 568 569 /* Allow other threads to swap p out now. */ 570 --p->p_lock; 571 } 572 #endif /* NO_SWAPPING */ 573 } 574 575 /* 576 * This swapin algorithm attempts to swap-in processes only if there 577 * is enough space for them. Of course, if a process waits for a long 578 * time, it will be swapped in anyway. 579 * 580 * XXXKSE - process with the thread with highest priority counts.. 581 * 582 * Giant is still held at this point, to be released in tsleep. 583 */ 584 /* ARGSUSED*/ 585 static void 586 scheduler(dummy) 587 void *dummy; 588 { 589 struct proc *p; 590 struct thread *td; 591 int pri; 592 struct proc *pp; 593 int ppri; 594 595 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED); 596 /* GIANT_REQUIRED */ 597 598 loop: 599 if (vm_page_count_min()) { 600 VM_WAIT; 601 goto loop; 602 } 603 604 pp = NULL; 605 ppri = INT_MIN; 606 sx_slock(&allproc_lock); 607 FOREACH_PROC_IN_SYSTEM(p) { 608 struct ksegrp *kg; 609 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { 610 continue; 611 } 612 mtx_lock_spin(&sched_lock); 613 FOREACH_THREAD_IN_PROC(p, td) { 614 /* 615 * An otherwise runnable thread of a process 616 * swapped out has only the TDI_SWAPPED bit set. 617 * 618 */ 619 if (td->td_inhibitors == TDI_SWAPPED) { 620 kg = td->td_ksegrp; 621 pri = p->p_swtime + kg->kg_slptime; 622 if ((p->p_sflag & PS_SWAPINREQ) == 0) { 623 pri -= kg->kg_nice * 8; 624 } 625 626 /* 627 * if this ksegrp is higher priority 628 * and there is enough space, then select 629 * this process instead of the previous 630 * selection. 631 */ 632 if (pri > ppri) { 633 pp = p; 634 ppri = pri; 635 } 636 } 637 } 638 mtx_unlock_spin(&sched_lock); 639 } 640 sx_sunlock(&allproc_lock); 641 642 /* 643 * Nothing to do, back to sleep. 644 */ 645 if ((p = pp) == NULL) { 646 tsleep(&proc0, PVM, "sched", maxslp * hz / 2); 647 goto loop; 648 } 649 PROC_LOCK(p); 650 651 /* 652 * Another process may be bringing or may have already 653 * brought this process in while we traverse all threads. 654 * Or, this process may even be being swapped out again. 655 */ 656 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { 657 PROC_UNLOCK(p); 658 goto loop; 659 } 660 661 mtx_lock_spin(&sched_lock); 662 p->p_sflag &= ~PS_SWAPINREQ; 663 mtx_unlock_spin(&sched_lock); 664 665 /* 666 * We would like to bring someone in. (only if there is space). 667 * [What checks the space? ] 668 */ 669 faultin(p); 670 PROC_UNLOCK(p); 671 mtx_lock_spin(&sched_lock); 672 p->p_swtime = 0; 673 mtx_unlock_spin(&sched_lock); 674 goto loop; 675 } 676 677 #ifndef NO_SWAPPING 678 679 /* 680 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 681 */ 682 static int swap_idle_threshold1 = 2; 683 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW, 684 &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process"); 685 686 /* 687 * Swap_idle_threshold2 is the time that a process can be idle before 688 * it will be swapped out, if idle swapping is enabled. 689 */ 690 static int swap_idle_threshold2 = 10; 691 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW, 692 &swap_idle_threshold2, 0, "Time before a process will be swapped out"); 693 694 /* 695 * Swapout is driven by the pageout daemon. Very simple, we find eligible 696 * procs and unwire their u-areas. We try to always "swap" at least one 697 * process in case we need the room for a swapin. 698 * If any procs have been sleeping/stopped for at least maxslp seconds, 699 * they are swapped. Else, we swap the longest-sleeping or stopped process, 700 * if any, otherwise the longest-resident process. 701 */ 702 void 703 swapout_procs(action) 704 int action; 705 { 706 struct proc *p; 707 struct thread *td; 708 struct ksegrp *kg; 709 struct proc *outp, *outp2; 710 int outpri, outpri2; 711 int didswap = 0; 712 713 GIANT_REQUIRED; 714 715 outp = outp2 = NULL; 716 outpri = outpri2 = INT_MIN; 717 retry: 718 sx_slock(&allproc_lock); 719 FOREACH_PROC_IN_SYSTEM(p) { 720 struct vmspace *vm; 721 int minslptime = 100000; 722 723 /* 724 * Watch out for a process in 725 * creation. It may have no 726 * address space or lock yet. 727 */ 728 mtx_lock_spin(&sched_lock); 729 if (p->p_state == PRS_NEW) { 730 mtx_unlock_spin(&sched_lock); 731 continue; 732 } 733 mtx_unlock_spin(&sched_lock); 734 735 /* 736 * An aio daemon switches its 737 * address space while running. 738 * Perform a quick check whether 739 * a process has P_SYSTEM. 740 */ 741 if ((p->p_flag & P_SYSTEM) != 0) 742 continue; 743 744 /* 745 * Do not swapout a process that 746 * is waiting for VM data 747 * structures as there is a possible 748 * deadlock. Test this first as 749 * this may block. 750 * 751 * Lock the map until swapout 752 * finishes, or a thread of this 753 * process may attempt to alter 754 * the map. 755 */ 756 PROC_LOCK(p); 757 vm = p->p_vmspace; 758 KASSERT(vm != NULL, 759 ("swapout_procs: a process has no address space")); 760 ++vm->vm_refcnt; 761 PROC_UNLOCK(p); 762 if (!vm_map_trylock(&vm->vm_map)) 763 goto nextproc1; 764 765 PROC_LOCK(p); 766 if (p->p_lock != 0 || 767 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT) 768 ) != 0) { 769 goto nextproc2; 770 } 771 /* 772 * only aiod changes vmspace, however it will be 773 * skipped because of the if statement above checking 774 * for P_SYSTEM 775 */ 776 if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM) 777 goto nextproc2; 778 779 switch (p->p_state) { 780 default: 781 /* Don't swap out processes in any sort 782 * of 'special' state. */ 783 break; 784 785 case PRS_NORMAL: 786 mtx_lock_spin(&sched_lock); 787 /* 788 * do not swapout a realtime process 789 * Check all the thread groups.. 790 */ 791 FOREACH_KSEGRP_IN_PROC(p, kg) { 792 if (PRI_IS_REALTIME(kg->kg_pri_class)) 793 goto nextproc; 794 795 /* 796 * Guarantee swap_idle_threshold1 797 * time in memory. 798 */ 799 if (kg->kg_slptime < swap_idle_threshold1) 800 goto nextproc; 801 802 /* 803 * Do not swapout a process if it is 804 * waiting on a critical event of some 805 * kind or there is a thread whose 806 * pageable memory may be accessed. 807 * 808 * This could be refined to support 809 * swapping out a thread. 810 */ 811 FOREACH_THREAD_IN_GROUP(kg, td) { 812 if ((td->td_priority) < PSOCK || 813 !thread_safetoswapout(td)) 814 goto nextproc; 815 } 816 /* 817 * If the system is under memory stress, 818 * or if we are swapping 819 * idle processes >= swap_idle_threshold2, 820 * then swap the process out. 821 */ 822 if (((action & VM_SWAP_NORMAL) == 0) && 823 (((action & VM_SWAP_IDLE) == 0) || 824 (kg->kg_slptime < swap_idle_threshold2))) 825 goto nextproc; 826 827 if (minslptime > kg->kg_slptime) 828 minslptime = kg->kg_slptime; 829 } 830 831 /* 832 * If the process has been asleep for awhile and had 833 * most of its pages taken away already, swap it out. 834 */ 835 if ((action & VM_SWAP_NORMAL) || 836 ((action & VM_SWAP_IDLE) && 837 (minslptime > swap_idle_threshold2))) { 838 swapout(p); 839 didswap++; 840 mtx_unlock_spin(&sched_lock); 841 PROC_UNLOCK(p); 842 vm_map_unlock(&vm->vm_map); 843 vmspace_free(vm); 844 sx_sunlock(&allproc_lock); 845 goto retry; 846 } 847 nextproc: 848 mtx_unlock_spin(&sched_lock); 849 } 850 nextproc2: 851 PROC_UNLOCK(p); 852 vm_map_unlock(&vm->vm_map); 853 nextproc1: 854 vmspace_free(vm); 855 continue; 856 } 857 sx_sunlock(&allproc_lock); 858 /* 859 * If we swapped something out, and another process needed memory, 860 * then wakeup the sched process. 861 */ 862 if (didswap) 863 wakeup(&proc0); 864 } 865 866 static void 867 swapout(p) 868 struct proc *p; 869 { 870 struct thread *td; 871 872 PROC_LOCK_ASSERT(p, MA_OWNED); 873 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); 874 #if defined(SWAP_DEBUG) 875 printf("swapping out %d\n", p->p_pid); 876 #endif 877 878 /* 879 * The states of this process and its threads may have changed 880 * by now. Assuming that there is only one pageout daemon thread, 881 * this process should still be in memory. 882 */ 883 KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM, 884 ("swapout: lost a swapout race?")); 885 886 #if defined(INVARIANTS) 887 /* 888 * Make sure that all threads are safe to be swapped out. 889 * 890 * Alternatively, we could swap out only safe threads. 891 */ 892 FOREACH_THREAD_IN_PROC(p, td) { 893 KASSERT(thread_safetoswapout(td), 894 ("swapout: there is a thread not safe for swapout")); 895 } 896 #endif /* INVARIANTS */ 897 898 ++p->p_stats->p_ru.ru_nswap; 899 /* 900 * remember the process resident count 901 */ 902 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 903 904 p->p_sflag &= ~PS_INMEM; 905 p->p_sflag |= PS_SWAPPINGOUT; 906 PROC_UNLOCK(p); 907 FOREACH_THREAD_IN_PROC(p, td) 908 TD_SET_SWAPPED(td); 909 mtx_unlock_spin(&sched_lock); 910 911 vm_proc_swapout(p); 912 FOREACH_THREAD_IN_PROC(p, td) 913 pmap_swapout_thread(td); 914 915 PROC_LOCK(p); 916 mtx_lock_spin(&sched_lock); 917 p->p_sflag &= ~PS_SWAPPINGOUT; 918 p->p_swtime = 0; 919 } 920 #endif /* !NO_SWAPPING */ 921