1 /*- 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Permission to use, copy, modify and distribute this software and 39 * its documentation is hereby granted, provided that both the copyright 40 * notice and this permission notice appear in all copies of the 41 * software, derivative works or modified versions, and any portions 42 * thereof, and that both notices appear in supporting documentation. 43 * 44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 47 * 48 * Carnegie Mellon requests users of this software to return to 49 * 50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 51 * School of Computer Science 52 * Carnegie Mellon University 53 * Pittsburgh PA 15213-3890 54 * 55 * any improvements or extensions that they make and grant Carnegie the 56 * rights to redistribute these changes. 57 */ 58 59 #include <sys/cdefs.h> 60 __FBSDID("$FreeBSD$"); 61 62 #include "opt_vm.h" 63 #include "opt_kstack_pages.h" 64 #include "opt_kstack_max_pages.h" 65 66 #include <sys/param.h> 67 #include <sys/systm.h> 68 #include <sys/limits.h> 69 #include <sys/lock.h> 70 #include <sys/malloc.h> 71 #include <sys/mutex.h> 72 #include <sys/proc.h> 73 #include <sys/racct.h> 74 #include <sys/resourcevar.h> 75 #include <sys/rwlock.h> 76 #include <sys/sched.h> 77 #include <sys/sf_buf.h> 78 #include <sys/shm.h> 79 #include <sys/vmmeter.h> 80 #include <sys/vmem.h> 81 #include <sys/sx.h> 82 #include <sys/sysctl.h> 83 #include <sys/_kstack_cache.h> 84 #include <sys/eventhandler.h> 85 #include <sys/kernel.h> 86 #include <sys/ktr.h> 87 #include <sys/unistd.h> 88 89 #include <vm/vm.h> 90 #include <vm/vm_param.h> 91 #include <vm/pmap.h> 92 #include <vm/vm_map.h> 93 #include <vm/vm_page.h> 94 #include <vm/vm_pageout.h> 95 #include <vm/vm_object.h> 96 #include <vm/vm_kern.h> 97 #include <vm/vm_extern.h> 98 #include <vm/vm_pager.h> 99 #include <vm/swap_pager.h> 100 101 #ifndef NO_SWAPPING 102 static int swapout(struct proc *); 103 static void swapclear(struct proc *); 104 static void vm_thread_swapin(struct thread *td); 105 static void vm_thread_swapout(struct thread *td); 106 #endif 107 108 /* 109 * MPSAFE 110 * 111 * WARNING! This code calls vm_map_check_protection() which only checks 112 * the associated vm_map_entry range. It does not determine whether the 113 * contents of the memory is actually readable or writable. In most cases 114 * just checking the vm_map_entry is sufficient within the kernel's address 115 * space. 116 */ 117 int 118 kernacc(addr, len, rw) 119 void *addr; 120 int len, rw; 121 { 122 boolean_t rv; 123 vm_offset_t saddr, eaddr; 124 vm_prot_t prot; 125 126 KASSERT((rw & ~VM_PROT_ALL) == 0, 127 ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 128 129 if ((vm_offset_t)addr + len > kernel_map->max_offset || 130 (vm_offset_t)addr + len < (vm_offset_t)addr) 131 return (FALSE); 132 133 prot = rw; 134 saddr = trunc_page((vm_offset_t)addr); 135 eaddr = round_page((vm_offset_t)addr + len); 136 vm_map_lock_read(kernel_map); 137 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 138 vm_map_unlock_read(kernel_map); 139 return (rv == TRUE); 140 } 141 142 /* 143 * MPSAFE 144 * 145 * WARNING! This code calls vm_map_check_protection() which only checks 146 * the associated vm_map_entry range. It does not determine whether the 147 * contents of the memory is actually readable or writable. vmapbuf(), 148 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be 149 * used in conjuction with this call. 150 */ 151 int 152 useracc(addr, len, rw) 153 void *addr; 154 int len, rw; 155 { 156 boolean_t rv; 157 vm_prot_t prot; 158 vm_map_t map; 159 160 KASSERT((rw & ~VM_PROT_ALL) == 0, 161 ("illegal ``rw'' argument to useracc (%x)\n", rw)); 162 prot = rw; 163 map = &curproc->p_vmspace->vm_map; 164 if ((vm_offset_t)addr + len > vm_map_max(map) || 165 (vm_offset_t)addr + len < (vm_offset_t)addr) { 166 return (FALSE); 167 } 168 vm_map_lock_read(map); 169 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 170 round_page((vm_offset_t)addr + len), prot); 171 vm_map_unlock_read(map); 172 return (rv == TRUE); 173 } 174 175 int 176 vslock(void *addr, size_t len) 177 { 178 vm_offset_t end, last, start; 179 vm_size_t npages; 180 int error; 181 182 last = (vm_offset_t)addr + len; 183 start = trunc_page((vm_offset_t)addr); 184 end = round_page(last); 185 if (last < (vm_offset_t)addr || end < (vm_offset_t)addr) 186 return (EINVAL); 187 npages = atop(end - start); 188 if (npages > vm_page_max_wired) 189 return (ENOMEM); 190 #if 0 191 /* 192 * XXX - not yet 193 * 194 * The limit for transient usage of wired pages should be 195 * larger than for "permanent" wired pages (mlock()). 196 * 197 * Also, the sysctl code, which is the only present user 198 * of vslock(), does a hard loop on EAGAIN. 199 */ 200 if (npages + vm_cnt.v_wire_count > vm_page_max_wired) 201 return (EAGAIN); 202 #endif 203 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end, 204 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 205 /* 206 * Return EFAULT on error to match copy{in,out}() behaviour 207 * rather than returning ENOMEM like mlock() would. 208 */ 209 return (error == KERN_SUCCESS ? 0 : EFAULT); 210 } 211 212 void 213 vsunlock(void *addr, size_t len) 214 { 215 216 /* Rely on the parameter sanity checks performed by vslock(). */ 217 (void)vm_map_unwire(&curproc->p_vmspace->vm_map, 218 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), 219 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 220 } 221 222 /* 223 * Pin the page contained within the given object at the given offset. If the 224 * page is not resident, allocate and load it using the given object's pager. 225 * Return the pinned page if successful; otherwise, return NULL. 226 */ 227 static vm_page_t 228 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset) 229 { 230 vm_page_t m, ma[1]; 231 vm_pindex_t pindex; 232 int rv; 233 234 VM_OBJECT_WLOCK(object); 235 pindex = OFF_TO_IDX(offset); 236 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL); 237 if (m->valid != VM_PAGE_BITS_ALL) { 238 ma[0] = m; 239 rv = vm_pager_get_pages(object, ma, 1, 0); 240 m = vm_page_lookup(object, pindex); 241 if (m == NULL) 242 goto out; 243 if (rv != VM_PAGER_OK) { 244 vm_page_lock(m); 245 vm_page_free(m); 246 vm_page_unlock(m); 247 m = NULL; 248 goto out; 249 } 250 } 251 vm_page_xunbusy(m); 252 vm_page_lock(m); 253 vm_page_hold(m); 254 vm_page_activate(m); 255 vm_page_unlock(m); 256 out: 257 VM_OBJECT_WUNLOCK(object); 258 return (m); 259 } 260 261 /* 262 * Return a CPU private mapping to the page at the given offset within the 263 * given object. The page is pinned before it is mapped. 264 */ 265 struct sf_buf * 266 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset) 267 { 268 vm_page_t m; 269 270 m = vm_imgact_hold_page(object, offset); 271 if (m == NULL) 272 return (NULL); 273 sched_pin(); 274 return (sf_buf_alloc(m, SFB_CPUPRIVATE)); 275 } 276 277 /* 278 * Destroy the given CPU private mapping and unpin the page that it mapped. 279 */ 280 void 281 vm_imgact_unmap_page(struct sf_buf *sf) 282 { 283 vm_page_t m; 284 285 m = sf_buf_page(sf); 286 sf_buf_free(sf); 287 sched_unpin(); 288 vm_page_lock(m); 289 vm_page_unhold(m); 290 vm_page_unlock(m); 291 } 292 293 void 294 vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz) 295 { 296 297 pmap_sync_icache(map->pmap, va, sz); 298 } 299 300 struct kstack_cache_entry *kstack_cache; 301 static int kstack_cache_size = 128; 302 static int kstacks; 303 static struct mtx kstack_cache_mtx; 304 MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF); 305 306 SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0, 307 ""); 308 SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0, 309 ""); 310 311 #ifndef KSTACK_MAX_PAGES 312 #define KSTACK_MAX_PAGES 32 313 #endif 314 315 /* 316 * Create the kernel stack (including pcb for i386) for a new thread. 317 * This routine directly affects the fork perf for a process and 318 * create performance for a thread. 319 */ 320 int 321 vm_thread_new(struct thread *td, int pages) 322 { 323 vm_object_t ksobj; 324 vm_offset_t ks; 325 vm_page_t m, ma[KSTACK_MAX_PAGES]; 326 struct kstack_cache_entry *ks_ce; 327 int i; 328 329 /* Bounds check */ 330 if (pages <= 1) 331 pages = KSTACK_PAGES; 332 else if (pages > KSTACK_MAX_PAGES) 333 pages = KSTACK_MAX_PAGES; 334 335 if (pages == KSTACK_PAGES) { 336 mtx_lock(&kstack_cache_mtx); 337 if (kstack_cache != NULL) { 338 ks_ce = kstack_cache; 339 kstack_cache = ks_ce->next_ks_entry; 340 mtx_unlock(&kstack_cache_mtx); 341 342 td->td_kstack_obj = ks_ce->ksobj; 343 td->td_kstack = (vm_offset_t)ks_ce; 344 td->td_kstack_pages = KSTACK_PAGES; 345 return (1); 346 } 347 mtx_unlock(&kstack_cache_mtx); 348 } 349 350 /* 351 * Allocate an object for the kstack. 352 */ 353 ksobj = vm_object_allocate(OBJT_DEFAULT, pages); 354 355 /* 356 * Get a kernel virtual address for this thread's kstack. 357 */ 358 #if defined(__mips__) 359 /* 360 * We need to align the kstack's mapped address to fit within 361 * a single TLB entry. 362 */ 363 if (vmem_xalloc(kernel_arena, (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE, 364 PAGE_SIZE * 2, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, 365 M_BESTFIT | M_NOWAIT, &ks)) { 366 ks = 0; 367 } 368 #else 369 ks = kva_alloc((pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 370 #endif 371 if (ks == 0) { 372 printf("vm_thread_new: kstack allocation failed\n"); 373 vm_object_deallocate(ksobj); 374 return (0); 375 } 376 377 atomic_add_int(&kstacks, 1); 378 if (KSTACK_GUARD_PAGES != 0) { 379 pmap_qremove(ks, KSTACK_GUARD_PAGES); 380 ks += KSTACK_GUARD_PAGES * PAGE_SIZE; 381 } 382 td->td_kstack_obj = ksobj; 383 td->td_kstack = ks; 384 /* 385 * Knowing the number of pages allocated is useful when you 386 * want to deallocate them. 387 */ 388 td->td_kstack_pages = pages; 389 /* 390 * For the length of the stack, link in a real page of ram for each 391 * page of stack. 392 */ 393 VM_OBJECT_WLOCK(ksobj); 394 for (i = 0; i < pages; i++) { 395 /* 396 * Get a kernel stack page. 397 */ 398 m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY | 399 VM_ALLOC_NORMAL | VM_ALLOC_WIRED); 400 ma[i] = m; 401 m->valid = VM_PAGE_BITS_ALL; 402 } 403 VM_OBJECT_WUNLOCK(ksobj); 404 pmap_qenter(ks, ma, pages); 405 return (1); 406 } 407 408 static void 409 vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages) 410 { 411 vm_page_t m; 412 int i; 413 414 atomic_add_int(&kstacks, -1); 415 pmap_qremove(ks, pages); 416 VM_OBJECT_WLOCK(ksobj); 417 for (i = 0; i < pages; i++) { 418 m = vm_page_lookup(ksobj, i); 419 if (m == NULL) 420 panic("vm_thread_dispose: kstack already missing?"); 421 vm_page_lock(m); 422 vm_page_unwire(m, PQ_INACTIVE); 423 vm_page_free(m); 424 vm_page_unlock(m); 425 } 426 VM_OBJECT_WUNLOCK(ksobj); 427 vm_object_deallocate(ksobj); 428 kva_free(ks - (KSTACK_GUARD_PAGES * PAGE_SIZE), 429 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 430 } 431 432 /* 433 * Dispose of a thread's kernel stack. 434 */ 435 void 436 vm_thread_dispose(struct thread *td) 437 { 438 vm_object_t ksobj; 439 vm_offset_t ks; 440 struct kstack_cache_entry *ks_ce; 441 int pages; 442 443 pages = td->td_kstack_pages; 444 ksobj = td->td_kstack_obj; 445 ks = td->td_kstack; 446 td->td_kstack = 0; 447 td->td_kstack_pages = 0; 448 if (pages == KSTACK_PAGES && kstacks <= kstack_cache_size) { 449 ks_ce = (struct kstack_cache_entry *)ks; 450 ks_ce->ksobj = ksobj; 451 mtx_lock(&kstack_cache_mtx); 452 ks_ce->next_ks_entry = kstack_cache; 453 kstack_cache = ks_ce; 454 mtx_unlock(&kstack_cache_mtx); 455 return; 456 } 457 vm_thread_stack_dispose(ksobj, ks, pages); 458 } 459 460 static void 461 vm_thread_stack_lowmem(void *nulll) 462 { 463 struct kstack_cache_entry *ks_ce, *ks_ce1; 464 465 mtx_lock(&kstack_cache_mtx); 466 ks_ce = kstack_cache; 467 kstack_cache = NULL; 468 mtx_unlock(&kstack_cache_mtx); 469 470 while (ks_ce != NULL) { 471 ks_ce1 = ks_ce; 472 ks_ce = ks_ce->next_ks_entry; 473 474 vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1, 475 KSTACK_PAGES); 476 } 477 } 478 479 static void 480 kstack_cache_init(void *nulll) 481 { 482 483 EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL, 484 EVENTHANDLER_PRI_ANY); 485 } 486 487 SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL); 488 489 #ifndef NO_SWAPPING 490 /* 491 * Allow a thread's kernel stack to be paged out. 492 */ 493 static void 494 vm_thread_swapout(struct thread *td) 495 { 496 vm_object_t ksobj; 497 vm_page_t m; 498 int i, pages; 499 500 cpu_thread_swapout(td); 501 pages = td->td_kstack_pages; 502 ksobj = td->td_kstack_obj; 503 pmap_qremove(td->td_kstack, pages); 504 VM_OBJECT_WLOCK(ksobj); 505 for (i = 0; i < pages; i++) { 506 m = vm_page_lookup(ksobj, i); 507 if (m == NULL) 508 panic("vm_thread_swapout: kstack already missing?"); 509 vm_page_dirty(m); 510 vm_page_lock(m); 511 vm_page_unwire(m, PQ_INACTIVE); 512 vm_page_unlock(m); 513 } 514 VM_OBJECT_WUNLOCK(ksobj); 515 } 516 517 /* 518 * Bring the kernel stack for a specified thread back in. 519 */ 520 static void 521 vm_thread_swapin(struct thread *td) 522 { 523 vm_object_t ksobj; 524 vm_page_t ma[KSTACK_MAX_PAGES]; 525 int i, j, k, pages, rv; 526 527 pages = td->td_kstack_pages; 528 ksobj = td->td_kstack_obj; 529 VM_OBJECT_WLOCK(ksobj); 530 for (i = 0; i < pages; i++) 531 ma[i] = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | 532 VM_ALLOC_WIRED); 533 for (i = 0; i < pages; i++) { 534 if (ma[i]->valid != VM_PAGE_BITS_ALL) { 535 vm_page_assert_xbusied(ma[i]); 536 vm_object_pip_add(ksobj, 1); 537 for (j = i + 1; j < pages; j++) { 538 if (ma[j]->valid != VM_PAGE_BITS_ALL) 539 vm_page_assert_xbusied(ma[j]); 540 if (ma[j]->valid == VM_PAGE_BITS_ALL) 541 break; 542 } 543 rv = vm_pager_get_pages(ksobj, ma + i, j - i, 0); 544 if (rv != VM_PAGER_OK) 545 panic("vm_thread_swapin: cannot get kstack for proc: %d", 546 td->td_proc->p_pid); 547 vm_object_pip_wakeup(ksobj); 548 for (k = i; k < j; k++) 549 ma[k] = vm_page_lookup(ksobj, k); 550 vm_page_xunbusy(ma[i]); 551 } else if (vm_page_xbusied(ma[i])) 552 vm_page_xunbusy(ma[i]); 553 } 554 VM_OBJECT_WUNLOCK(ksobj); 555 pmap_qenter(td->td_kstack, ma, pages); 556 cpu_thread_swapin(td); 557 } 558 #endif /* !NO_SWAPPING */ 559 560 /* 561 * Implement fork's actions on an address space. 562 * Here we arrange for the address space to be copied or referenced, 563 * allocate a user struct (pcb and kernel stack), then call the 564 * machine-dependent layer to fill those in and make the new process 565 * ready to run. The new process is set up so that it returns directly 566 * to user mode to avoid stack copying and relocation problems. 567 */ 568 int 569 vm_forkproc(td, p2, td2, vm2, flags) 570 struct thread *td; 571 struct proc *p2; 572 struct thread *td2; 573 struct vmspace *vm2; 574 int flags; 575 { 576 struct proc *p1 = td->td_proc; 577 int error; 578 579 if ((flags & RFPROC) == 0) { 580 /* 581 * Divorce the memory, if it is shared, essentially 582 * this changes shared memory amongst threads, into 583 * COW locally. 584 */ 585 if ((flags & RFMEM) == 0) { 586 if (p1->p_vmspace->vm_refcnt > 1) { 587 error = vmspace_unshare(p1); 588 if (error) 589 return (error); 590 } 591 } 592 cpu_fork(td, p2, td2, flags); 593 return (0); 594 } 595 596 if (flags & RFMEM) { 597 p2->p_vmspace = p1->p_vmspace; 598 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1); 599 } 600 601 while (vm_page_count_severe()) { 602 VM_WAIT; 603 } 604 605 if ((flags & RFMEM) == 0) { 606 p2->p_vmspace = vm2; 607 if (p1->p_vmspace->vm_shm) 608 shmfork(p1, p2); 609 } 610 611 /* 612 * cpu_fork will copy and update the pcb, set up the kernel stack, 613 * and make the child ready to run. 614 */ 615 cpu_fork(td, p2, td2, flags); 616 return (0); 617 } 618 619 /* 620 * Called after process has been wait(2)'ed apon and is being reaped. 621 * The idea is to reclaim resources that we could not reclaim while 622 * the process was still executing. 623 */ 624 void 625 vm_waitproc(p) 626 struct proc *p; 627 { 628 629 vmspace_exitfree(p); /* and clean-out the vmspace */ 630 } 631 632 void 633 faultin(p) 634 struct proc *p; 635 { 636 #ifdef NO_SWAPPING 637 638 PROC_LOCK_ASSERT(p, MA_OWNED); 639 if ((p->p_flag & P_INMEM) == 0) 640 panic("faultin: proc swapped out with NO_SWAPPING!"); 641 #else /* !NO_SWAPPING */ 642 struct thread *td; 643 644 PROC_LOCK_ASSERT(p, MA_OWNED); 645 /* 646 * If another process is swapping in this process, 647 * just wait until it finishes. 648 */ 649 if (p->p_flag & P_SWAPPINGIN) { 650 while (p->p_flag & P_SWAPPINGIN) 651 msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0); 652 return; 653 } 654 if ((p->p_flag & P_INMEM) == 0) { 655 /* 656 * Don't let another thread swap process p out while we are 657 * busy swapping it in. 658 */ 659 ++p->p_lock; 660 p->p_flag |= P_SWAPPINGIN; 661 PROC_UNLOCK(p); 662 663 /* 664 * We hold no lock here because the list of threads 665 * can not change while all threads in the process are 666 * swapped out. 667 */ 668 FOREACH_THREAD_IN_PROC(p, td) 669 vm_thread_swapin(td); 670 PROC_LOCK(p); 671 swapclear(p); 672 p->p_swtick = ticks; 673 674 wakeup(&p->p_flag); 675 676 /* Allow other threads to swap p out now. */ 677 --p->p_lock; 678 } 679 #endif /* NO_SWAPPING */ 680 } 681 682 /* 683 * This swapin algorithm attempts to swap-in processes only if there 684 * is enough space for them. Of course, if a process waits for a long 685 * time, it will be swapped in anyway. 686 * 687 * Giant is held on entry. 688 */ 689 void 690 swapper(void) 691 { 692 struct proc *p; 693 struct thread *td; 694 struct proc *pp; 695 int slptime; 696 int swtime; 697 int ppri; 698 int pri; 699 700 loop: 701 if (vm_page_count_min()) { 702 VM_WAIT; 703 goto loop; 704 } 705 706 pp = NULL; 707 ppri = INT_MIN; 708 sx_slock(&allproc_lock); 709 FOREACH_PROC_IN_SYSTEM(p) { 710 PROC_LOCK(p); 711 if (p->p_state == PRS_NEW || 712 p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) { 713 PROC_UNLOCK(p); 714 continue; 715 } 716 swtime = (ticks - p->p_swtick) / hz; 717 FOREACH_THREAD_IN_PROC(p, td) { 718 /* 719 * An otherwise runnable thread of a process 720 * swapped out has only the TDI_SWAPPED bit set. 721 * 722 */ 723 thread_lock(td); 724 if (td->td_inhibitors == TDI_SWAPPED) { 725 slptime = (ticks - td->td_slptick) / hz; 726 pri = swtime + slptime; 727 if ((td->td_flags & TDF_SWAPINREQ) == 0) 728 pri -= p->p_nice * 8; 729 /* 730 * if this thread is higher priority 731 * and there is enough space, then select 732 * this process instead of the previous 733 * selection. 734 */ 735 if (pri > ppri) { 736 pp = p; 737 ppri = pri; 738 } 739 } 740 thread_unlock(td); 741 } 742 PROC_UNLOCK(p); 743 } 744 sx_sunlock(&allproc_lock); 745 746 /* 747 * Nothing to do, back to sleep. 748 */ 749 if ((p = pp) == NULL) { 750 tsleep(&proc0, PVM, "swapin", MAXSLP * hz / 2); 751 goto loop; 752 } 753 PROC_LOCK(p); 754 755 /* 756 * Another process may be bringing or may have already 757 * brought this process in while we traverse all threads. 758 * Or, this process may even be being swapped out again. 759 */ 760 if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) { 761 PROC_UNLOCK(p); 762 goto loop; 763 } 764 765 /* 766 * We would like to bring someone in. (only if there is space). 767 * [What checks the space? ] 768 */ 769 faultin(p); 770 PROC_UNLOCK(p); 771 goto loop; 772 } 773 774 void 775 kick_proc0(void) 776 { 777 778 wakeup(&proc0); 779 } 780 781 #ifndef NO_SWAPPING 782 783 /* 784 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 785 */ 786 static int swap_idle_threshold1 = 2; 787 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW, 788 &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process"); 789 790 /* 791 * Swap_idle_threshold2 is the time that a process can be idle before 792 * it will be swapped out, if idle swapping is enabled. 793 */ 794 static int swap_idle_threshold2 = 10; 795 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW, 796 &swap_idle_threshold2, 0, "Time before a process will be swapped out"); 797 798 /* 799 * First, if any processes have been sleeping or stopped for at least 800 * "swap_idle_threshold1" seconds, they are swapped out. If, however, 801 * no such processes exist, then the longest-sleeping or stopped 802 * process is swapped out. Finally, and only as a last resort, if 803 * there are no sleeping or stopped processes, the longest-resident 804 * process is swapped out. 805 */ 806 void 807 swapout_procs(action) 808 int action; 809 { 810 struct proc *p; 811 struct thread *td; 812 int didswap = 0; 813 814 retry: 815 sx_slock(&allproc_lock); 816 FOREACH_PROC_IN_SYSTEM(p) { 817 struct vmspace *vm; 818 int minslptime = 100000; 819 int slptime; 820 821 /* 822 * Watch out for a process in 823 * creation. It may have no 824 * address space or lock yet. 825 */ 826 if (p->p_state == PRS_NEW) 827 continue; 828 /* 829 * An aio daemon switches its 830 * address space while running. 831 * Perform a quick check whether 832 * a process has P_SYSTEM. 833 */ 834 if ((p->p_flag & P_SYSTEM) != 0) 835 continue; 836 /* 837 * Do not swapout a process that 838 * is waiting for VM data 839 * structures as there is a possible 840 * deadlock. Test this first as 841 * this may block. 842 * 843 * Lock the map until swapout 844 * finishes, or a thread of this 845 * process may attempt to alter 846 * the map. 847 */ 848 vm = vmspace_acquire_ref(p); 849 if (vm == NULL) 850 continue; 851 if (!vm_map_trylock(&vm->vm_map)) 852 goto nextproc1; 853 854 PROC_LOCK(p); 855 if (p->p_lock != 0 || 856 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT) 857 ) != 0) { 858 goto nextproc; 859 } 860 /* 861 * only aiod changes vmspace, however it will be 862 * skipped because of the if statement above checking 863 * for P_SYSTEM 864 */ 865 if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM) 866 goto nextproc; 867 868 switch (p->p_state) { 869 default: 870 /* Don't swap out processes in any sort 871 * of 'special' state. */ 872 break; 873 874 case PRS_NORMAL: 875 /* 876 * do not swapout a realtime process 877 * Check all the thread groups.. 878 */ 879 FOREACH_THREAD_IN_PROC(p, td) { 880 thread_lock(td); 881 if (PRI_IS_REALTIME(td->td_pri_class)) { 882 thread_unlock(td); 883 goto nextproc; 884 } 885 slptime = (ticks - td->td_slptick) / hz; 886 /* 887 * Guarantee swap_idle_threshold1 888 * time in memory. 889 */ 890 if (slptime < swap_idle_threshold1) { 891 thread_unlock(td); 892 goto nextproc; 893 } 894 895 /* 896 * Do not swapout a process if it is 897 * waiting on a critical event of some 898 * kind or there is a thread whose 899 * pageable memory may be accessed. 900 * 901 * This could be refined to support 902 * swapping out a thread. 903 */ 904 if (!thread_safetoswapout(td)) { 905 thread_unlock(td); 906 goto nextproc; 907 } 908 /* 909 * If the system is under memory stress, 910 * or if we are swapping 911 * idle processes >= swap_idle_threshold2, 912 * then swap the process out. 913 */ 914 if (((action & VM_SWAP_NORMAL) == 0) && 915 (((action & VM_SWAP_IDLE) == 0) || 916 (slptime < swap_idle_threshold2))) { 917 thread_unlock(td); 918 goto nextproc; 919 } 920 921 if (minslptime > slptime) 922 minslptime = slptime; 923 thread_unlock(td); 924 } 925 926 /* 927 * If the pageout daemon didn't free enough pages, 928 * or if this process is idle and the system is 929 * configured to swap proactively, swap it out. 930 */ 931 if ((action & VM_SWAP_NORMAL) || 932 ((action & VM_SWAP_IDLE) && 933 (minslptime > swap_idle_threshold2))) { 934 if (swapout(p) == 0) 935 didswap++; 936 PROC_UNLOCK(p); 937 vm_map_unlock(&vm->vm_map); 938 vmspace_free(vm); 939 sx_sunlock(&allproc_lock); 940 goto retry; 941 } 942 } 943 nextproc: 944 PROC_UNLOCK(p); 945 vm_map_unlock(&vm->vm_map); 946 nextproc1: 947 vmspace_free(vm); 948 continue; 949 } 950 sx_sunlock(&allproc_lock); 951 /* 952 * If we swapped something out, and another process needed memory, 953 * then wakeup the sched process. 954 */ 955 if (didswap) 956 wakeup(&proc0); 957 } 958 959 static void 960 swapclear(p) 961 struct proc *p; 962 { 963 struct thread *td; 964 965 PROC_LOCK_ASSERT(p, MA_OWNED); 966 967 FOREACH_THREAD_IN_PROC(p, td) { 968 thread_lock(td); 969 td->td_flags |= TDF_INMEM; 970 td->td_flags &= ~TDF_SWAPINREQ; 971 TD_CLR_SWAPPED(td); 972 if (TD_CAN_RUN(td)) 973 if (setrunnable(td)) { 974 #ifdef INVARIANTS 975 /* 976 * XXX: We just cleared TDI_SWAPPED 977 * above and set TDF_INMEM, so this 978 * should never happen. 979 */ 980 panic("not waking up swapper"); 981 #endif 982 } 983 thread_unlock(td); 984 } 985 p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT); 986 p->p_flag |= P_INMEM; 987 } 988 989 static int 990 swapout(p) 991 struct proc *p; 992 { 993 struct thread *td; 994 995 PROC_LOCK_ASSERT(p, MA_OWNED); 996 #if defined(SWAP_DEBUG) 997 printf("swapping out %d\n", p->p_pid); 998 #endif 999 1000 /* 1001 * The states of this process and its threads may have changed 1002 * by now. Assuming that there is only one pageout daemon thread, 1003 * this process should still be in memory. 1004 */ 1005 KASSERT((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) == P_INMEM, 1006 ("swapout: lost a swapout race?")); 1007 1008 /* 1009 * remember the process resident count 1010 */ 1011 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 1012 /* 1013 * Check and mark all threads before we proceed. 1014 */ 1015 p->p_flag &= ~P_INMEM; 1016 p->p_flag |= P_SWAPPINGOUT; 1017 FOREACH_THREAD_IN_PROC(p, td) { 1018 thread_lock(td); 1019 if (!thread_safetoswapout(td)) { 1020 thread_unlock(td); 1021 swapclear(p); 1022 return (EBUSY); 1023 } 1024 td->td_flags &= ~TDF_INMEM; 1025 TD_SET_SWAPPED(td); 1026 thread_unlock(td); 1027 } 1028 td = FIRST_THREAD_IN_PROC(p); 1029 ++td->td_ru.ru_nswap; 1030 PROC_UNLOCK(p); 1031 1032 /* 1033 * This list is stable because all threads are now prevented from 1034 * running. The list is only modified in the context of a running 1035 * thread in this process. 1036 */ 1037 FOREACH_THREAD_IN_PROC(p, td) 1038 vm_thread_swapout(td); 1039 1040 PROC_LOCK(p); 1041 p->p_flag &= ~P_SWAPPINGOUT; 1042 p->p_swtick = ticks; 1043 return (0); 1044 } 1045 #endif /* !NO_SWAPPING */ 1046