1 /*- 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Permission to use, copy, modify and distribute this software and 39 * its documentation is hereby granted, provided that both the copyright 40 * notice and this permission notice appear in all copies of the 41 * software, derivative works or modified versions, and any portions 42 * thereof, and that both notices appear in supporting documentation. 43 * 44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 47 * 48 * Carnegie Mellon requests users of this software to return to 49 * 50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 51 * School of Computer Science 52 * Carnegie Mellon University 53 * Pittsburgh PA 15213-3890 54 * 55 * any improvements or extensions that they make and grant Carnegie the 56 * rights to redistribute these changes. 57 */ 58 59 #include <sys/cdefs.h> 60 __FBSDID("$FreeBSD$"); 61 62 #include "opt_vm.h" 63 #include "opt_kstack_pages.h" 64 #include "opt_kstack_max_pages.h" 65 66 #include <sys/param.h> 67 #include <sys/systm.h> 68 #include <sys/limits.h> 69 #include <sys/lock.h> 70 #include <sys/mutex.h> 71 #include <sys/proc.h> 72 #include <sys/resourcevar.h> 73 #include <sys/sched.h> 74 #include <sys/sf_buf.h> 75 #include <sys/shm.h> 76 #include <sys/vmmeter.h> 77 #include <sys/sx.h> 78 #include <sys/sysctl.h> 79 80 #include <sys/kernel.h> 81 #include <sys/ktr.h> 82 #include <sys/unistd.h> 83 84 #include <vm/vm.h> 85 #include <vm/vm_param.h> 86 #include <vm/pmap.h> 87 #include <vm/vm_map.h> 88 #include <vm/vm_page.h> 89 #include <vm/vm_pageout.h> 90 #include <vm/vm_object.h> 91 #include <vm/vm_kern.h> 92 #include <vm/vm_extern.h> 93 #include <vm/vm_pager.h> 94 #include <vm/swap_pager.h> 95 96 extern int maxslp; 97 98 /* 99 * System initialization 100 * 101 * Note: proc0 from proc.h 102 */ 103 static void vm_init_limits(void *); 104 SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0) 105 106 /* 107 * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 108 * 109 * Note: run scheduling should be divorced from the vm system. 110 */ 111 static void scheduler(void *); 112 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL) 113 114 #ifndef NO_SWAPPING 115 static void swapout(struct proc *); 116 #endif 117 118 119 static volatile int proc0_rescan; 120 121 122 /* 123 * MPSAFE 124 * 125 * WARNING! This code calls vm_map_check_protection() which only checks 126 * the associated vm_map_entry range. It does not determine whether the 127 * contents of the memory is actually readable or writable. In most cases 128 * just checking the vm_map_entry is sufficient within the kernel's address 129 * space. 130 */ 131 int 132 kernacc(addr, len, rw) 133 void *addr; 134 int len, rw; 135 { 136 boolean_t rv; 137 vm_offset_t saddr, eaddr; 138 vm_prot_t prot; 139 140 KASSERT((rw & ~VM_PROT_ALL) == 0, 141 ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 142 143 if ((vm_offset_t)addr + len > kernel_map->max_offset || 144 (vm_offset_t)addr + len < (vm_offset_t)addr) 145 return (FALSE); 146 147 prot = rw; 148 saddr = trunc_page((vm_offset_t)addr); 149 eaddr = round_page((vm_offset_t)addr + len); 150 vm_map_lock_read(kernel_map); 151 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 152 vm_map_unlock_read(kernel_map); 153 return (rv == TRUE); 154 } 155 156 /* 157 * MPSAFE 158 * 159 * WARNING! This code calls vm_map_check_protection() which only checks 160 * the associated vm_map_entry range. It does not determine whether the 161 * contents of the memory is actually readable or writable. vmapbuf(), 162 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be 163 * used in conjuction with this call. 164 */ 165 int 166 useracc(addr, len, rw) 167 void *addr; 168 int len, rw; 169 { 170 boolean_t rv; 171 vm_prot_t prot; 172 vm_map_t map; 173 174 KASSERT((rw & ~VM_PROT_ALL) == 0, 175 ("illegal ``rw'' argument to useracc (%x)\n", rw)); 176 prot = rw; 177 map = &curproc->p_vmspace->vm_map; 178 if ((vm_offset_t)addr + len > vm_map_max(map) || 179 (vm_offset_t)addr + len < (vm_offset_t)addr) { 180 return (FALSE); 181 } 182 vm_map_lock_read(map); 183 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 184 round_page((vm_offset_t)addr + len), prot); 185 vm_map_unlock_read(map); 186 return (rv == TRUE); 187 } 188 189 int 190 vslock(void *addr, size_t len) 191 { 192 vm_offset_t end, last, start; 193 vm_size_t npages; 194 int error; 195 196 last = (vm_offset_t)addr + len; 197 start = trunc_page((vm_offset_t)addr); 198 end = round_page(last); 199 if (last < (vm_offset_t)addr || end < (vm_offset_t)addr) 200 return (EINVAL); 201 npages = atop(end - start); 202 if (npages > vm_page_max_wired) 203 return (ENOMEM); 204 PROC_LOCK(curproc); 205 if (ptoa(npages + 206 pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))) > 207 lim_cur(curproc, RLIMIT_MEMLOCK)) { 208 PROC_UNLOCK(curproc); 209 return (ENOMEM); 210 } 211 PROC_UNLOCK(curproc); 212 #if 0 213 /* 214 * XXX - not yet 215 * 216 * The limit for transient usage of wired pages should be 217 * larger than for "permanent" wired pages (mlock()). 218 * 219 * Also, the sysctl code, which is the only present user 220 * of vslock(), does a hard loop on EAGAIN. 221 */ 222 if (npages + cnt.v_wire_count > vm_page_max_wired) 223 return (EAGAIN); 224 #endif 225 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end, 226 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 227 /* 228 * Return EFAULT on error to match copy{in,out}() behaviour 229 * rather than returning ENOMEM like mlock() would. 230 */ 231 return (error == KERN_SUCCESS ? 0 : EFAULT); 232 } 233 234 void 235 vsunlock(void *addr, size_t len) 236 { 237 238 /* Rely on the parameter sanity checks performed by vslock(). */ 239 (void)vm_map_unwire(&curproc->p_vmspace->vm_map, 240 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), 241 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 242 } 243 244 /* 245 * Pin the page contained within the given object at the given offset. If the 246 * page is not resident, allocate and load it using the given object's pager. 247 * Return the pinned page if successful; otherwise, return NULL. 248 */ 249 static vm_page_t 250 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset) 251 { 252 vm_page_t m, ma[1]; 253 vm_pindex_t pindex; 254 int rv; 255 256 VM_OBJECT_LOCK(object); 257 pindex = OFF_TO_IDX(offset); 258 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 259 if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 260 ma[0] = m; 261 rv = vm_pager_get_pages(object, ma, 1, 0); 262 m = vm_page_lookup(object, pindex); 263 if (m == NULL) 264 goto out; 265 if (m->valid == 0 || rv != VM_PAGER_OK) { 266 vm_page_lock_queues(); 267 vm_page_free(m); 268 vm_page_unlock_queues(); 269 m = NULL; 270 goto out; 271 } 272 } 273 vm_page_lock_queues(); 274 vm_page_hold(m); 275 vm_page_unlock_queues(); 276 vm_page_wakeup(m); 277 out: 278 VM_OBJECT_UNLOCK(object); 279 return (m); 280 } 281 282 /* 283 * Return a CPU private mapping to the page at the given offset within the 284 * given object. The page is pinned before it is mapped. 285 */ 286 struct sf_buf * 287 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset) 288 { 289 vm_page_t m; 290 291 m = vm_imgact_hold_page(object, offset); 292 if (m == NULL) 293 return (NULL); 294 sched_pin(); 295 return (sf_buf_alloc(m, SFB_CPUPRIVATE)); 296 } 297 298 /* 299 * Destroy the given CPU private mapping and unpin the page that it mapped. 300 */ 301 void 302 vm_imgact_unmap_page(struct sf_buf *sf) 303 { 304 vm_page_t m; 305 306 m = sf_buf_page(sf); 307 sf_buf_free(sf); 308 sched_unpin(); 309 vm_page_lock_queues(); 310 vm_page_unhold(m); 311 vm_page_unlock_queues(); 312 } 313 314 #ifndef KSTACK_MAX_PAGES 315 #define KSTACK_MAX_PAGES 32 316 #endif 317 318 /* 319 * Create the kernel stack (including pcb for i386) for a new thread. 320 * This routine directly affects the fork perf for a process and 321 * create performance for a thread. 322 */ 323 void 324 vm_thread_new(struct thread *td, int pages) 325 { 326 vm_object_t ksobj; 327 vm_offset_t ks; 328 vm_page_t m, ma[KSTACK_MAX_PAGES]; 329 int i; 330 331 /* Bounds check */ 332 if (pages <= 1) 333 pages = KSTACK_PAGES; 334 else if (pages > KSTACK_MAX_PAGES) 335 pages = KSTACK_MAX_PAGES; 336 /* 337 * Allocate an object for the kstack. 338 */ 339 ksobj = vm_object_allocate(OBJT_DEFAULT, pages); 340 td->td_kstack_obj = ksobj; 341 /* 342 * Get a kernel virtual address for this thread's kstack. 343 */ 344 ks = kmem_alloc_nofault(kernel_map, 345 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 346 if (ks == 0) 347 panic("vm_thread_new: kstack allocation failed"); 348 if (KSTACK_GUARD_PAGES != 0) { 349 pmap_qremove(ks, KSTACK_GUARD_PAGES); 350 ks += KSTACK_GUARD_PAGES * PAGE_SIZE; 351 } 352 td->td_kstack = ks; 353 /* 354 * Knowing the number of pages allocated is useful when you 355 * want to deallocate them. 356 */ 357 td->td_kstack_pages = pages; 358 /* 359 * For the length of the stack, link in a real page of ram for each 360 * page of stack. 361 */ 362 VM_OBJECT_LOCK(ksobj); 363 for (i = 0; i < pages; i++) { 364 /* 365 * Get a kernel stack page. 366 */ 367 m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY | 368 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 369 ma[i] = m; 370 m->valid = VM_PAGE_BITS_ALL; 371 } 372 VM_OBJECT_UNLOCK(ksobj); 373 pmap_qenter(ks, ma, pages); 374 } 375 376 /* 377 * Dispose of a thread's kernel stack. 378 */ 379 void 380 vm_thread_dispose(struct thread *td) 381 { 382 vm_object_t ksobj; 383 vm_offset_t ks; 384 vm_page_t m; 385 int i, pages; 386 387 pages = td->td_kstack_pages; 388 ksobj = td->td_kstack_obj; 389 ks = td->td_kstack; 390 pmap_qremove(ks, pages); 391 VM_OBJECT_LOCK(ksobj); 392 for (i = 0; i < pages; i++) { 393 m = vm_page_lookup(ksobj, i); 394 if (m == NULL) 395 panic("vm_thread_dispose: kstack already missing?"); 396 vm_page_lock_queues(); 397 vm_page_unwire(m, 0); 398 vm_page_free(m); 399 vm_page_unlock_queues(); 400 } 401 VM_OBJECT_UNLOCK(ksobj); 402 vm_object_deallocate(ksobj); 403 kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE), 404 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 405 } 406 407 /* 408 * Allow a thread's kernel stack to be paged out. 409 */ 410 void 411 vm_thread_swapout(struct thread *td) 412 { 413 vm_object_t ksobj; 414 vm_page_t m; 415 int i, pages; 416 417 cpu_thread_swapout(td); 418 pages = td->td_kstack_pages; 419 ksobj = td->td_kstack_obj; 420 pmap_qremove(td->td_kstack, pages); 421 VM_OBJECT_LOCK(ksobj); 422 for (i = 0; i < pages; i++) { 423 m = vm_page_lookup(ksobj, i); 424 if (m == NULL) 425 panic("vm_thread_swapout: kstack already missing?"); 426 vm_page_lock_queues(); 427 vm_page_dirty(m); 428 vm_page_unwire(m, 0); 429 vm_page_unlock_queues(); 430 } 431 VM_OBJECT_UNLOCK(ksobj); 432 } 433 434 /* 435 * Bring the kernel stack for a specified thread back in. 436 */ 437 void 438 vm_thread_swapin(struct thread *td) 439 { 440 vm_object_t ksobj; 441 vm_page_t m, ma[KSTACK_MAX_PAGES]; 442 int i, pages, rv; 443 444 pages = td->td_kstack_pages; 445 ksobj = td->td_kstack_obj; 446 VM_OBJECT_LOCK(ksobj); 447 for (i = 0; i < pages; i++) { 448 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 449 if (m->valid != VM_PAGE_BITS_ALL) { 450 rv = vm_pager_get_pages(ksobj, &m, 1, 0); 451 if (rv != VM_PAGER_OK) 452 panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid); 453 m = vm_page_lookup(ksobj, i); 454 m->valid = VM_PAGE_BITS_ALL; 455 } 456 ma[i] = m; 457 vm_page_lock_queues(); 458 vm_page_wire(m); 459 vm_page_unlock_queues(); 460 vm_page_wakeup(m); 461 } 462 VM_OBJECT_UNLOCK(ksobj); 463 pmap_qenter(td->td_kstack, ma, pages); 464 cpu_thread_swapin(td); 465 } 466 467 /* 468 * Set up a variable-sized alternate kstack. 469 */ 470 void 471 vm_thread_new_altkstack(struct thread *td, int pages) 472 { 473 474 td->td_altkstack = td->td_kstack; 475 td->td_altkstack_obj = td->td_kstack_obj; 476 td->td_altkstack_pages = td->td_kstack_pages; 477 478 vm_thread_new(td, pages); 479 } 480 481 /* 482 * Restore the original kstack. 483 */ 484 void 485 vm_thread_dispose_altkstack(struct thread *td) 486 { 487 488 vm_thread_dispose(td); 489 490 td->td_kstack = td->td_altkstack; 491 td->td_kstack_obj = td->td_altkstack_obj; 492 td->td_kstack_pages = td->td_altkstack_pages; 493 td->td_altkstack = 0; 494 td->td_altkstack_obj = NULL; 495 td->td_altkstack_pages = 0; 496 } 497 498 /* 499 * Implement fork's actions on an address space. 500 * Here we arrange for the address space to be copied or referenced, 501 * allocate a user struct (pcb and kernel stack), then call the 502 * machine-dependent layer to fill those in and make the new process 503 * ready to run. The new process is set up so that it returns directly 504 * to user mode to avoid stack copying and relocation problems. 505 */ 506 void 507 vm_forkproc(td, p2, td2, flags) 508 struct thread *td; 509 struct proc *p2; 510 struct thread *td2; 511 int flags; 512 { 513 struct proc *p1 = td->td_proc; 514 515 if ((flags & RFPROC) == 0) { 516 /* 517 * Divorce the memory, if it is shared, essentially 518 * this changes shared memory amongst threads, into 519 * COW locally. 520 */ 521 if ((flags & RFMEM) == 0) { 522 if (p1->p_vmspace->vm_refcnt > 1) { 523 vmspace_unshare(p1); 524 } 525 } 526 cpu_fork(td, p2, td2, flags); 527 return; 528 } 529 530 if (flags & RFMEM) { 531 p2->p_vmspace = p1->p_vmspace; 532 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1); 533 } 534 535 while (vm_page_count_severe()) { 536 VM_WAIT; 537 } 538 539 if ((flags & RFMEM) == 0) { 540 p2->p_vmspace = vmspace_fork(p1->p_vmspace); 541 if (p1->p_vmspace->vm_shm) 542 shmfork(p1, p2); 543 } 544 545 /* 546 * cpu_fork will copy and update the pcb, set up the kernel stack, 547 * and make the child ready to run. 548 */ 549 cpu_fork(td, p2, td2, flags); 550 } 551 552 /* 553 * Called after process has been wait(2)'ed apon and is being reaped. 554 * The idea is to reclaim resources that we could not reclaim while 555 * the process was still executing. 556 */ 557 void 558 vm_waitproc(p) 559 struct proc *p; 560 { 561 562 vmspace_exitfree(p); /* and clean-out the vmspace */ 563 } 564 565 /* 566 * Set default limits for VM system. 567 * Called for proc 0, and then inherited by all others. 568 * 569 * XXX should probably act directly on proc0. 570 */ 571 static void 572 vm_init_limits(udata) 573 void *udata; 574 { 575 struct proc *p = udata; 576 struct plimit *limp; 577 int rss_limit; 578 579 /* 580 * Set up the initial limits on process VM. Set the maximum resident 581 * set size to be half of (reasonably) available memory. Since this 582 * is a soft limit, it comes into effect only when the system is out 583 * of memory - half of main memory helps to favor smaller processes, 584 * and reduces thrashing of the object cache. 585 */ 586 limp = p->p_limit; 587 limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; 588 limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz; 589 limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; 590 limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; 591 /* limit the limit to no less than 2MB */ 592 rss_limit = max(cnt.v_free_count, 512); 593 limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 594 limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 595 } 596 597 void 598 faultin(p) 599 struct proc *p; 600 { 601 #ifdef NO_SWAPPING 602 603 PROC_LOCK_ASSERT(p, MA_OWNED); 604 if ((p->p_sflag & PS_INMEM) == 0) 605 panic("faultin: proc swapped out with NO_SWAPPING!"); 606 #else /* !NO_SWAPPING */ 607 struct thread *td; 608 609 PROC_LOCK_ASSERT(p, MA_OWNED); 610 /* 611 * If another process is swapping in this process, 612 * just wait until it finishes. 613 */ 614 if (p->p_sflag & PS_SWAPPINGIN) 615 msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0); 616 else if ((p->p_sflag & PS_INMEM) == 0) { 617 /* 618 * Don't let another thread swap process p out while we are 619 * busy swapping it in. 620 */ 621 ++p->p_lock; 622 mtx_lock_spin(&sched_lock); 623 p->p_sflag |= PS_SWAPPINGIN; 624 mtx_unlock_spin(&sched_lock); 625 PROC_UNLOCK(p); 626 627 FOREACH_THREAD_IN_PROC(p, td) 628 vm_thread_swapin(td); 629 630 PROC_LOCK(p); 631 mtx_lock_spin(&sched_lock); 632 p->p_sflag &= ~PS_SWAPPINGIN; 633 p->p_sflag |= PS_INMEM; 634 FOREACH_THREAD_IN_PROC(p, td) { 635 TD_CLR_SWAPPED(td); 636 if (TD_CAN_RUN(td)) 637 setrunnable(td); 638 } 639 mtx_unlock_spin(&sched_lock); 640 641 wakeup(&p->p_sflag); 642 643 /* Allow other threads to swap p out now. */ 644 --p->p_lock; 645 } 646 #endif /* NO_SWAPPING */ 647 } 648 649 /* 650 * This swapin algorithm attempts to swap-in processes only if there 651 * is enough space for them. Of course, if a process waits for a long 652 * time, it will be swapped in anyway. 653 * 654 * XXXKSE - process with the thread with highest priority counts.. 655 * 656 * Giant is held on entry. 657 */ 658 /* ARGSUSED*/ 659 static void 660 scheduler(dummy) 661 void *dummy; 662 { 663 struct proc *p; 664 struct thread *td; 665 int pri; 666 struct proc *pp; 667 int ppri; 668 669 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED); 670 mtx_unlock(&Giant); 671 672 loop: 673 if (vm_page_count_min()) { 674 VM_WAIT; 675 mtx_lock_spin(&sched_lock); 676 proc0_rescan = 0; 677 mtx_unlock_spin(&sched_lock); 678 goto loop; 679 } 680 681 pp = NULL; 682 ppri = INT_MIN; 683 sx_slock(&allproc_lock); 684 FOREACH_PROC_IN_SYSTEM(p) { 685 #ifdef KSE 686 struct ksegrp *kg; 687 #endif 688 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { 689 continue; 690 } 691 mtx_lock_spin(&sched_lock); 692 FOREACH_THREAD_IN_PROC(p, td) { 693 /* 694 * An otherwise runnable thread of a process 695 * swapped out has only the TDI_SWAPPED bit set. 696 * 697 */ 698 if (td->td_inhibitors == TDI_SWAPPED) { 699 #ifdef KSE 700 kg = td->td_ksegrp; 701 pri = p->p_swtime + kg->kg_slptime; 702 #else 703 pri = p->p_swtime + td->td_slptime; 704 #endif 705 if ((p->p_sflag & PS_SWAPINREQ) == 0) { 706 pri -= p->p_nice * 8; 707 } 708 709 /* 710 * if this ksegrp/thread is higher priority 711 * and there is enough space, then select 712 * this process instead of the previous 713 * selection. 714 */ 715 if (pri > ppri) { 716 pp = p; 717 ppri = pri; 718 } 719 } 720 } 721 mtx_unlock_spin(&sched_lock); 722 } 723 sx_sunlock(&allproc_lock); 724 725 /* 726 * Nothing to do, back to sleep. 727 */ 728 if ((p = pp) == NULL) { 729 mtx_lock_spin(&sched_lock); 730 if (!proc0_rescan) { 731 TD_SET_IWAIT(&thread0); 732 mi_switch(SW_VOL, NULL); 733 } 734 proc0_rescan = 0; 735 mtx_unlock_spin(&sched_lock); 736 goto loop; 737 } 738 PROC_LOCK(p); 739 740 /* 741 * Another process may be bringing or may have already 742 * brought this process in while we traverse all threads. 743 * Or, this process may even be being swapped out again. 744 */ 745 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { 746 PROC_UNLOCK(p); 747 mtx_lock_spin(&sched_lock); 748 proc0_rescan = 0; 749 mtx_unlock_spin(&sched_lock); 750 goto loop; 751 } 752 753 mtx_lock_spin(&sched_lock); 754 p->p_sflag &= ~PS_SWAPINREQ; 755 mtx_unlock_spin(&sched_lock); 756 757 /* 758 * We would like to bring someone in. (only if there is space). 759 * [What checks the space? ] 760 */ 761 faultin(p); 762 PROC_UNLOCK(p); 763 mtx_lock_spin(&sched_lock); 764 p->p_swtime = 0; 765 proc0_rescan = 0; 766 mtx_unlock_spin(&sched_lock); 767 goto loop; 768 } 769 770 void kick_proc0(void) 771 { 772 struct thread *td = &thread0; 773 774 775 if (TD_AWAITING_INTR(td)) { 776 CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, 0); 777 TD_CLR_IWAIT(td); 778 setrunqueue(td, SRQ_INTR); 779 } else { 780 proc0_rescan = 1; 781 CTR2(KTR_INTR, "%s: state %d", 782 __func__, td->td_state); 783 } 784 785 } 786 787 788 #ifndef NO_SWAPPING 789 790 /* 791 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 792 */ 793 static int swap_idle_threshold1 = 2; 794 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW, 795 &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process"); 796 797 /* 798 * Swap_idle_threshold2 is the time that a process can be idle before 799 * it will be swapped out, if idle swapping is enabled. 800 */ 801 static int swap_idle_threshold2 = 10; 802 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW, 803 &swap_idle_threshold2, 0, "Time before a process will be swapped out"); 804 805 /* 806 * Swapout is driven by the pageout daemon. Very simple, we find eligible 807 * procs and unwire their u-areas. We try to always "swap" at least one 808 * process in case we need the room for a swapin. 809 * If any procs have been sleeping/stopped for at least maxslp seconds, 810 * they are swapped. Else, we swap the longest-sleeping or stopped process, 811 * if any, otherwise the longest-resident process. 812 */ 813 void 814 swapout_procs(action) 815 int action; 816 { 817 struct proc *p; 818 struct thread *td; 819 #ifdef KSE 820 struct ksegrp *kg; 821 #endif 822 int didswap = 0; 823 824 retry: 825 sx_slock(&allproc_lock); 826 FOREACH_PROC_IN_SYSTEM(p) { 827 struct vmspace *vm; 828 int minslptime = 100000; 829 830 /* 831 * Watch out for a process in 832 * creation. It may have no 833 * address space or lock yet. 834 */ 835 mtx_lock_spin(&sched_lock); 836 if (p->p_state == PRS_NEW) { 837 mtx_unlock_spin(&sched_lock); 838 continue; 839 } 840 mtx_unlock_spin(&sched_lock); 841 842 /* 843 * An aio daemon switches its 844 * address space while running. 845 * Perform a quick check whether 846 * a process has P_SYSTEM. 847 */ 848 if ((p->p_flag & P_SYSTEM) != 0) 849 continue; 850 851 /* 852 * Do not swapout a process that 853 * is waiting for VM data 854 * structures as there is a possible 855 * deadlock. Test this first as 856 * this may block. 857 * 858 * Lock the map until swapout 859 * finishes, or a thread of this 860 * process may attempt to alter 861 * the map. 862 */ 863 vm = vmspace_acquire_ref(p); 864 if (vm == NULL) 865 continue; 866 if (!vm_map_trylock(&vm->vm_map)) 867 goto nextproc1; 868 869 PROC_LOCK(p); 870 if (p->p_lock != 0 || 871 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT) 872 ) != 0) { 873 goto nextproc2; 874 } 875 /* 876 * only aiod changes vmspace, however it will be 877 * skipped because of the if statement above checking 878 * for P_SYSTEM 879 */ 880 if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM) 881 goto nextproc2; 882 883 switch (p->p_state) { 884 default: 885 /* Don't swap out processes in any sort 886 * of 'special' state. */ 887 break; 888 889 case PRS_NORMAL: 890 mtx_lock_spin(&sched_lock); 891 /* 892 * do not swapout a realtime process 893 * Check all the thread groups.. 894 */ 895 #ifdef KSE 896 FOREACH_KSEGRP_IN_PROC(p, kg) { 897 if (PRI_IS_REALTIME(kg->kg_pri_class)) 898 #else 899 FOREACH_THREAD_IN_PROC(p, td) { 900 if (PRI_IS_REALTIME(td->td_pri_class)) 901 #endif 902 goto nextproc; 903 904 /* 905 * Guarantee swap_idle_threshold1 906 * time in memory. 907 */ 908 #ifdef KSE 909 if (kg->kg_slptime < swap_idle_threshold1) 910 #else 911 if (td->td_slptime < swap_idle_threshold1) 912 #endif 913 goto nextproc; 914 915 /* 916 * Do not swapout a process if it is 917 * waiting on a critical event of some 918 * kind or there is a thread whose 919 * pageable memory may be accessed. 920 * 921 * This could be refined to support 922 * swapping out a thread. 923 */ 924 #ifdef KSE 925 FOREACH_THREAD_IN_GROUP(kg, td) { 926 if ((td->td_priority) < PSOCK || 927 !thread_safetoswapout(td)) 928 goto nextproc; 929 } 930 #else 931 if ((td->td_priority) < PSOCK || !thread_safetoswapout(td)) 932 goto nextproc; 933 #endif 934 /* 935 * If the system is under memory stress, 936 * or if we are swapping 937 * idle processes >= swap_idle_threshold2, 938 * then swap the process out. 939 */ 940 if (((action & VM_SWAP_NORMAL) == 0) && 941 (((action & VM_SWAP_IDLE) == 0) || 942 #ifdef KSE 943 (kg->kg_slptime < swap_idle_threshold2))) 944 #else 945 (td->td_slptime < swap_idle_threshold2))) 946 #endif 947 goto nextproc; 948 949 #ifdef KSE 950 if (minslptime > kg->kg_slptime) 951 minslptime = kg->kg_slptime; 952 #else 953 if (minslptime > td->td_slptime) 954 minslptime = td->td_slptime; 955 #endif 956 } 957 958 /* 959 * If the pageout daemon didn't free enough pages, 960 * or if this process is idle and the system is 961 * configured to swap proactively, swap it out. 962 */ 963 if ((action & VM_SWAP_NORMAL) || 964 ((action & VM_SWAP_IDLE) && 965 (minslptime > swap_idle_threshold2))) { 966 swapout(p); 967 didswap++; 968 mtx_unlock_spin(&sched_lock); 969 PROC_UNLOCK(p); 970 vm_map_unlock(&vm->vm_map); 971 vmspace_free(vm); 972 sx_sunlock(&allproc_lock); 973 goto retry; 974 } 975 nextproc: 976 mtx_unlock_spin(&sched_lock); 977 } 978 nextproc2: 979 PROC_UNLOCK(p); 980 vm_map_unlock(&vm->vm_map); 981 nextproc1: 982 vmspace_free(vm); 983 continue; 984 } 985 sx_sunlock(&allproc_lock); 986 /* 987 * If we swapped something out, and another process needed memory, 988 * then wakeup the sched process. 989 */ 990 if (didswap) 991 wakeup(&proc0); 992 } 993 994 static void 995 swapout(p) 996 struct proc *p; 997 { 998 struct thread *td; 999 1000 PROC_LOCK_ASSERT(p, MA_OWNED); 1001 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); 1002 #if defined(SWAP_DEBUG) 1003 printf("swapping out %d\n", p->p_pid); 1004 #endif 1005 1006 /* 1007 * The states of this process and its threads may have changed 1008 * by now. Assuming that there is only one pageout daemon thread, 1009 * this process should still be in memory. 1010 */ 1011 KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM, 1012 ("swapout: lost a swapout race?")); 1013 1014 #if defined(INVARIANTS) 1015 /* 1016 * Make sure that all threads are safe to be swapped out. 1017 * 1018 * Alternatively, we could swap out only safe threads. 1019 */ 1020 FOREACH_THREAD_IN_PROC(p, td) { 1021 KASSERT(thread_safetoswapout(td), 1022 ("swapout: there is a thread not safe for swapout")); 1023 } 1024 #endif /* INVARIANTS */ 1025 1026 ++p->p_stats->p_ru.ru_nswap; 1027 /* 1028 * remember the process resident count 1029 */ 1030 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 1031 1032 p->p_sflag &= ~PS_INMEM; 1033 p->p_sflag |= PS_SWAPPINGOUT; 1034 PROC_UNLOCK(p); 1035 FOREACH_THREAD_IN_PROC(p, td) 1036 TD_SET_SWAPPED(td); 1037 mtx_unlock_spin(&sched_lock); 1038 1039 FOREACH_THREAD_IN_PROC(p, td) 1040 vm_thread_swapout(td); 1041 1042 PROC_LOCK(p); 1043 mtx_lock_spin(&sched_lock); 1044 p->p_sflag &= ~PS_SWAPPINGOUT; 1045 p->p_swtime = 0; 1046 } 1047 #endif /* !NO_SWAPPING */ 1048