1 /*- 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Permission to use, copy, modify and distribute this software and 39 * its documentation is hereby granted, provided that both the copyright 40 * notice and this permission notice appear in all copies of the 41 * software, derivative works or modified versions, and any portions 42 * thereof, and that both notices appear in supporting documentation. 43 * 44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 47 * 48 * Carnegie Mellon requests users of this software to return to 49 * 50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 51 * School of Computer Science 52 * Carnegie Mellon University 53 * Pittsburgh PA 15213-3890 54 * 55 * any improvements or extensions that they make and grant Carnegie the 56 * rights to redistribute these changes. 57 */ 58 59 #include <sys/cdefs.h> 60 __FBSDID("$FreeBSD$"); 61 62 #include "opt_vm.h" 63 #include "opt_kstack_pages.h" 64 #include "opt_kstack_max_pages.h" 65 66 #include <sys/param.h> 67 #include <sys/systm.h> 68 #include <sys/limits.h> 69 #include <sys/lock.h> 70 #include <sys/mutex.h> 71 #include <sys/proc.h> 72 #include <sys/racct.h> 73 #include <sys/resourcevar.h> 74 #include <sys/sched.h> 75 #include <sys/sf_buf.h> 76 #include <sys/shm.h> 77 #include <sys/vmmeter.h> 78 #include <sys/sx.h> 79 #include <sys/sysctl.h> 80 #include <sys/_kstack_cache.h> 81 #include <sys/eventhandler.h> 82 #include <sys/kernel.h> 83 #include <sys/ktr.h> 84 #include <sys/unistd.h> 85 86 #include <vm/vm.h> 87 #include <vm/vm_param.h> 88 #include <vm/pmap.h> 89 #include <vm/vm_map.h> 90 #include <vm/vm_page.h> 91 #include <vm/vm_pageout.h> 92 #include <vm/vm_object.h> 93 #include <vm/vm_kern.h> 94 #include <vm/vm_extern.h> 95 #include <vm/vm_pager.h> 96 #include <vm/swap_pager.h> 97 98 /* 99 * System initialization 100 * 101 * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 102 * 103 * Note: run scheduling should be divorced from the vm system. 104 */ 105 static void scheduler(void *); 106 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL); 107 108 #ifndef NO_SWAPPING 109 static int swapout(struct proc *); 110 static void swapclear(struct proc *); 111 static void vm_thread_swapin(struct thread *td); 112 static void vm_thread_swapout(struct thread *td); 113 #endif 114 115 /* 116 * MPSAFE 117 * 118 * WARNING! This code calls vm_map_check_protection() which only checks 119 * the associated vm_map_entry range. It does not determine whether the 120 * contents of the memory is actually readable or writable. In most cases 121 * just checking the vm_map_entry is sufficient within the kernel's address 122 * space. 123 */ 124 int 125 kernacc(addr, len, rw) 126 void *addr; 127 int len, rw; 128 { 129 boolean_t rv; 130 vm_offset_t saddr, eaddr; 131 vm_prot_t prot; 132 133 KASSERT((rw & ~VM_PROT_ALL) == 0, 134 ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 135 136 if ((vm_offset_t)addr + len > kernel_map->max_offset || 137 (vm_offset_t)addr + len < (vm_offset_t)addr) 138 return (FALSE); 139 140 prot = rw; 141 saddr = trunc_page((vm_offset_t)addr); 142 eaddr = round_page((vm_offset_t)addr + len); 143 vm_map_lock_read(kernel_map); 144 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 145 vm_map_unlock_read(kernel_map); 146 return (rv == TRUE); 147 } 148 149 /* 150 * MPSAFE 151 * 152 * WARNING! This code calls vm_map_check_protection() which only checks 153 * the associated vm_map_entry range. It does not determine whether the 154 * contents of the memory is actually readable or writable. vmapbuf(), 155 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be 156 * used in conjuction with this call. 157 */ 158 int 159 useracc(addr, len, rw) 160 void *addr; 161 int len, rw; 162 { 163 boolean_t rv; 164 vm_prot_t prot; 165 vm_map_t map; 166 167 KASSERT((rw & ~VM_PROT_ALL) == 0, 168 ("illegal ``rw'' argument to useracc (%x)\n", rw)); 169 prot = rw; 170 map = &curproc->p_vmspace->vm_map; 171 if ((vm_offset_t)addr + len > vm_map_max(map) || 172 (vm_offset_t)addr + len < (vm_offset_t)addr) { 173 return (FALSE); 174 } 175 vm_map_lock_read(map); 176 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 177 round_page((vm_offset_t)addr + len), prot); 178 vm_map_unlock_read(map); 179 return (rv == TRUE); 180 } 181 182 int 183 vslock(void *addr, size_t len) 184 { 185 vm_offset_t end, last, start; 186 vm_size_t npages; 187 int error; 188 189 last = (vm_offset_t)addr + len; 190 start = trunc_page((vm_offset_t)addr); 191 end = round_page(last); 192 if (last < (vm_offset_t)addr || end < (vm_offset_t)addr) 193 return (EINVAL); 194 npages = atop(end - start); 195 if (npages > vm_page_max_wired) 196 return (ENOMEM); 197 #if 0 198 /* 199 * XXX - not yet 200 * 201 * The limit for transient usage of wired pages should be 202 * larger than for "permanent" wired pages (mlock()). 203 * 204 * Also, the sysctl code, which is the only present user 205 * of vslock(), does a hard loop on EAGAIN. 206 */ 207 if (npages + cnt.v_wire_count > vm_page_max_wired) 208 return (EAGAIN); 209 #endif 210 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end, 211 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 212 /* 213 * Return EFAULT on error to match copy{in,out}() behaviour 214 * rather than returning ENOMEM like mlock() would. 215 */ 216 return (error == KERN_SUCCESS ? 0 : EFAULT); 217 } 218 219 void 220 vsunlock(void *addr, size_t len) 221 { 222 223 /* Rely on the parameter sanity checks performed by vslock(). */ 224 (void)vm_map_unwire(&curproc->p_vmspace->vm_map, 225 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), 226 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 227 } 228 229 /* 230 * Pin the page contained within the given object at the given offset. If the 231 * page is not resident, allocate and load it using the given object's pager. 232 * Return the pinned page if successful; otherwise, return NULL. 233 */ 234 static vm_page_t 235 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset) 236 { 237 vm_page_t m, ma[1]; 238 vm_pindex_t pindex; 239 int rv; 240 241 VM_OBJECT_LOCK(object); 242 pindex = OFF_TO_IDX(offset); 243 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 244 if (m->valid != VM_PAGE_BITS_ALL) { 245 ma[0] = m; 246 rv = vm_pager_get_pages(object, ma, 1, 0); 247 m = vm_page_lookup(object, pindex); 248 if (m == NULL) 249 goto out; 250 if (rv != VM_PAGER_OK) { 251 vm_page_lock(m); 252 vm_page_free(m); 253 vm_page_unlock(m); 254 m = NULL; 255 goto out; 256 } 257 } 258 vm_page_lock(m); 259 vm_page_hold(m); 260 vm_page_unlock(m); 261 vm_page_wakeup(m); 262 out: 263 VM_OBJECT_UNLOCK(object); 264 return (m); 265 } 266 267 /* 268 * Return a CPU private mapping to the page at the given offset within the 269 * given object. The page is pinned before it is mapped. 270 */ 271 struct sf_buf * 272 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset) 273 { 274 vm_page_t m; 275 276 m = vm_imgact_hold_page(object, offset); 277 if (m == NULL) 278 return (NULL); 279 sched_pin(); 280 return (sf_buf_alloc(m, SFB_CPUPRIVATE)); 281 } 282 283 /* 284 * Destroy the given CPU private mapping and unpin the page that it mapped. 285 */ 286 void 287 vm_imgact_unmap_page(struct sf_buf *sf) 288 { 289 vm_page_t m; 290 291 m = sf_buf_page(sf); 292 sf_buf_free(sf); 293 sched_unpin(); 294 vm_page_lock(m); 295 vm_page_unhold(m); 296 vm_page_unlock(m); 297 } 298 299 void 300 vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz) 301 { 302 303 pmap_sync_icache(map->pmap, va, sz); 304 } 305 306 struct kstack_cache_entry *kstack_cache; 307 static int kstack_cache_size = 128; 308 static int kstacks; 309 static struct mtx kstack_cache_mtx; 310 MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF); 311 312 SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0, 313 ""); 314 SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0, 315 ""); 316 317 #ifndef KSTACK_MAX_PAGES 318 #define KSTACK_MAX_PAGES 32 319 #endif 320 321 /* 322 * Create the kernel stack (including pcb for i386) for a new thread. 323 * This routine directly affects the fork perf for a process and 324 * create performance for a thread. 325 */ 326 int 327 vm_thread_new(struct thread *td, int pages) 328 { 329 vm_object_t ksobj; 330 vm_offset_t ks; 331 vm_page_t m, ma[KSTACK_MAX_PAGES]; 332 struct kstack_cache_entry *ks_ce; 333 int i; 334 335 /* Bounds check */ 336 if (pages <= 1) 337 pages = KSTACK_PAGES; 338 else if (pages > KSTACK_MAX_PAGES) 339 pages = KSTACK_MAX_PAGES; 340 341 if (pages == KSTACK_PAGES) { 342 mtx_lock(&kstack_cache_mtx); 343 if (kstack_cache != NULL) { 344 ks_ce = kstack_cache; 345 kstack_cache = ks_ce->next_ks_entry; 346 mtx_unlock(&kstack_cache_mtx); 347 348 td->td_kstack_obj = ks_ce->ksobj; 349 td->td_kstack = (vm_offset_t)ks_ce; 350 td->td_kstack_pages = KSTACK_PAGES; 351 return (1); 352 } 353 mtx_unlock(&kstack_cache_mtx); 354 } 355 356 /* 357 * Allocate an object for the kstack. 358 */ 359 ksobj = vm_object_allocate(OBJT_DEFAULT, pages); 360 361 /* 362 * Get a kernel virtual address for this thread's kstack. 363 */ 364 #if defined(__mips__) 365 /* 366 * We need to align the kstack's mapped address to fit within 367 * a single TLB entry. 368 */ 369 ks = kmem_alloc_nofault_space(kernel_map, 370 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE, VMFS_TLB_ALIGNED_SPACE); 371 #else 372 ks = kmem_alloc_nofault(kernel_map, 373 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 374 #endif 375 if (ks == 0) { 376 printf("vm_thread_new: kstack allocation failed\n"); 377 vm_object_deallocate(ksobj); 378 return (0); 379 } 380 381 atomic_add_int(&kstacks, 1); 382 if (KSTACK_GUARD_PAGES != 0) { 383 pmap_qremove(ks, KSTACK_GUARD_PAGES); 384 ks += KSTACK_GUARD_PAGES * PAGE_SIZE; 385 } 386 td->td_kstack_obj = ksobj; 387 td->td_kstack = ks; 388 /* 389 * Knowing the number of pages allocated is useful when you 390 * want to deallocate them. 391 */ 392 td->td_kstack_pages = pages; 393 /* 394 * For the length of the stack, link in a real page of ram for each 395 * page of stack. 396 */ 397 VM_OBJECT_LOCK(ksobj); 398 for (i = 0; i < pages; i++) { 399 /* 400 * Get a kernel stack page. 401 */ 402 m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY | 403 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 404 ma[i] = m; 405 m->valid = VM_PAGE_BITS_ALL; 406 } 407 VM_OBJECT_UNLOCK(ksobj); 408 pmap_qenter(ks, ma, pages); 409 return (1); 410 } 411 412 static void 413 vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages) 414 { 415 vm_page_t m; 416 int i; 417 418 atomic_add_int(&kstacks, -1); 419 pmap_qremove(ks, pages); 420 VM_OBJECT_LOCK(ksobj); 421 for (i = 0; i < pages; i++) { 422 m = vm_page_lookup(ksobj, i); 423 if (m == NULL) 424 panic("vm_thread_dispose: kstack already missing?"); 425 vm_page_lock(m); 426 vm_page_unwire(m, 0); 427 vm_page_free(m); 428 vm_page_unlock(m); 429 } 430 VM_OBJECT_UNLOCK(ksobj); 431 vm_object_deallocate(ksobj); 432 kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE), 433 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 434 } 435 436 /* 437 * Dispose of a thread's kernel stack. 438 */ 439 void 440 vm_thread_dispose(struct thread *td) 441 { 442 vm_object_t ksobj; 443 vm_offset_t ks; 444 struct kstack_cache_entry *ks_ce; 445 int pages; 446 447 pages = td->td_kstack_pages; 448 ksobj = td->td_kstack_obj; 449 ks = td->td_kstack; 450 td->td_kstack = 0; 451 td->td_kstack_pages = 0; 452 if (pages == KSTACK_PAGES && kstacks <= kstack_cache_size) { 453 ks_ce = (struct kstack_cache_entry *)ks; 454 ks_ce->ksobj = ksobj; 455 mtx_lock(&kstack_cache_mtx); 456 ks_ce->next_ks_entry = kstack_cache; 457 kstack_cache = ks_ce; 458 mtx_unlock(&kstack_cache_mtx); 459 return; 460 } 461 vm_thread_stack_dispose(ksobj, ks, pages); 462 } 463 464 static void 465 vm_thread_stack_lowmem(void *nulll) 466 { 467 struct kstack_cache_entry *ks_ce, *ks_ce1; 468 469 mtx_lock(&kstack_cache_mtx); 470 ks_ce = kstack_cache; 471 kstack_cache = NULL; 472 mtx_unlock(&kstack_cache_mtx); 473 474 while (ks_ce != NULL) { 475 ks_ce1 = ks_ce; 476 ks_ce = ks_ce->next_ks_entry; 477 478 vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1, 479 KSTACK_PAGES); 480 } 481 } 482 483 static void 484 kstack_cache_init(void *nulll) 485 { 486 487 EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL, 488 EVENTHANDLER_PRI_ANY); 489 } 490 491 SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL); 492 493 #ifndef NO_SWAPPING 494 /* 495 * Allow a thread's kernel stack to be paged out. 496 */ 497 static void 498 vm_thread_swapout(struct thread *td) 499 { 500 vm_object_t ksobj; 501 vm_page_t m; 502 int i, pages; 503 504 cpu_thread_swapout(td); 505 pages = td->td_kstack_pages; 506 ksobj = td->td_kstack_obj; 507 pmap_qremove(td->td_kstack, pages); 508 VM_OBJECT_LOCK(ksobj); 509 for (i = 0; i < pages; i++) { 510 m = vm_page_lookup(ksobj, i); 511 if (m == NULL) 512 panic("vm_thread_swapout: kstack already missing?"); 513 vm_page_dirty(m); 514 vm_page_lock(m); 515 vm_page_unwire(m, 0); 516 vm_page_unlock(m); 517 } 518 VM_OBJECT_UNLOCK(ksobj); 519 } 520 521 /* 522 * Bring the kernel stack for a specified thread back in. 523 */ 524 static void 525 vm_thread_swapin(struct thread *td) 526 { 527 vm_object_t ksobj; 528 vm_page_t ma[KSTACK_MAX_PAGES]; 529 int i, j, k, pages, rv; 530 531 pages = td->td_kstack_pages; 532 ksobj = td->td_kstack_obj; 533 VM_OBJECT_LOCK(ksobj); 534 for (i = 0; i < pages; i++) 535 ma[i] = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY | 536 VM_ALLOC_WIRED); 537 for (i = 0; i < pages; i++) { 538 if (ma[i]->valid != VM_PAGE_BITS_ALL) { 539 KASSERT(ma[i]->oflags & VPO_BUSY, 540 ("lost busy 1")); 541 vm_object_pip_add(ksobj, 1); 542 for (j = i + 1; j < pages; j++) { 543 KASSERT(ma[j]->valid == VM_PAGE_BITS_ALL || 544 (ma[j]->oflags & VPO_BUSY), 545 ("lost busy 2")); 546 if (ma[j]->valid == VM_PAGE_BITS_ALL) 547 break; 548 } 549 rv = vm_pager_get_pages(ksobj, ma + i, j - i, 0); 550 if (rv != VM_PAGER_OK) 551 panic("vm_thread_swapin: cannot get kstack for proc: %d", 552 td->td_proc->p_pid); 553 vm_object_pip_wakeup(ksobj); 554 for (k = i; k < j; k++) 555 ma[k] = vm_page_lookup(ksobj, k); 556 vm_page_wakeup(ma[i]); 557 } else if (ma[i]->oflags & VPO_BUSY) 558 vm_page_wakeup(ma[i]); 559 } 560 VM_OBJECT_UNLOCK(ksobj); 561 pmap_qenter(td->td_kstack, ma, pages); 562 cpu_thread_swapin(td); 563 } 564 #endif /* !NO_SWAPPING */ 565 566 /* 567 * Implement fork's actions on an address space. 568 * Here we arrange for the address space to be copied or referenced, 569 * allocate a user struct (pcb and kernel stack), then call the 570 * machine-dependent layer to fill those in and make the new process 571 * ready to run. The new process is set up so that it returns directly 572 * to user mode to avoid stack copying and relocation problems. 573 */ 574 int 575 vm_forkproc(td, p2, td2, vm2, flags) 576 struct thread *td; 577 struct proc *p2; 578 struct thread *td2; 579 struct vmspace *vm2; 580 int flags; 581 { 582 struct proc *p1 = td->td_proc; 583 int error; 584 585 if ((flags & RFPROC) == 0) { 586 /* 587 * Divorce the memory, if it is shared, essentially 588 * this changes shared memory amongst threads, into 589 * COW locally. 590 */ 591 if ((flags & RFMEM) == 0) { 592 if (p1->p_vmspace->vm_refcnt > 1) { 593 error = vmspace_unshare(p1); 594 if (error) 595 return (error); 596 } 597 } 598 cpu_fork(td, p2, td2, flags); 599 return (0); 600 } 601 602 if (flags & RFMEM) { 603 p2->p_vmspace = p1->p_vmspace; 604 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1); 605 } 606 607 while (vm_page_count_severe()) { 608 VM_WAIT; 609 } 610 611 if ((flags & RFMEM) == 0) { 612 p2->p_vmspace = vm2; 613 if (p1->p_vmspace->vm_shm) 614 shmfork(p1, p2); 615 } 616 617 /* 618 * cpu_fork will copy and update the pcb, set up the kernel stack, 619 * and make the child ready to run. 620 */ 621 cpu_fork(td, p2, td2, flags); 622 return (0); 623 } 624 625 /* 626 * Called after process has been wait(2)'ed apon and is being reaped. 627 * The idea is to reclaim resources that we could not reclaim while 628 * the process was still executing. 629 */ 630 void 631 vm_waitproc(p) 632 struct proc *p; 633 { 634 635 vmspace_exitfree(p); /* and clean-out the vmspace */ 636 } 637 638 void 639 faultin(p) 640 struct proc *p; 641 { 642 #ifdef NO_SWAPPING 643 644 PROC_LOCK_ASSERT(p, MA_OWNED); 645 if ((p->p_flag & P_INMEM) == 0) 646 panic("faultin: proc swapped out with NO_SWAPPING!"); 647 #else /* !NO_SWAPPING */ 648 struct thread *td; 649 650 PROC_LOCK_ASSERT(p, MA_OWNED); 651 /* 652 * If another process is swapping in this process, 653 * just wait until it finishes. 654 */ 655 if (p->p_flag & P_SWAPPINGIN) { 656 while (p->p_flag & P_SWAPPINGIN) 657 msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0); 658 return; 659 } 660 if ((p->p_flag & P_INMEM) == 0) { 661 /* 662 * Don't let another thread swap process p out while we are 663 * busy swapping it in. 664 */ 665 ++p->p_lock; 666 p->p_flag |= P_SWAPPINGIN; 667 PROC_UNLOCK(p); 668 669 /* 670 * We hold no lock here because the list of threads 671 * can not change while all threads in the process are 672 * swapped out. 673 */ 674 FOREACH_THREAD_IN_PROC(p, td) 675 vm_thread_swapin(td); 676 PROC_LOCK(p); 677 swapclear(p); 678 p->p_swtick = ticks; 679 680 wakeup(&p->p_flag); 681 682 /* Allow other threads to swap p out now. */ 683 --p->p_lock; 684 } 685 #endif /* NO_SWAPPING */ 686 } 687 688 /* 689 * This swapin algorithm attempts to swap-in processes only if there 690 * is enough space for them. Of course, if a process waits for a long 691 * time, it will be swapped in anyway. 692 * 693 * Giant is held on entry. 694 */ 695 /* ARGSUSED*/ 696 static void 697 scheduler(dummy) 698 void *dummy; 699 { 700 struct proc *p; 701 struct thread *td; 702 struct proc *pp; 703 int slptime; 704 int swtime; 705 int ppri; 706 int pri; 707 708 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED); 709 mtx_unlock(&Giant); 710 711 loop: 712 if (vm_page_count_min()) { 713 VM_WAIT; 714 goto loop; 715 } 716 717 pp = NULL; 718 ppri = INT_MIN; 719 sx_slock(&allproc_lock); 720 FOREACH_PROC_IN_SYSTEM(p) { 721 PROC_LOCK(p); 722 if (p->p_state == PRS_NEW || 723 p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) { 724 PROC_UNLOCK(p); 725 continue; 726 } 727 swtime = (ticks - p->p_swtick) / hz; 728 FOREACH_THREAD_IN_PROC(p, td) { 729 /* 730 * An otherwise runnable thread of a process 731 * swapped out has only the TDI_SWAPPED bit set. 732 * 733 */ 734 thread_lock(td); 735 if (td->td_inhibitors == TDI_SWAPPED) { 736 slptime = (ticks - td->td_slptick) / hz; 737 pri = swtime + slptime; 738 if ((td->td_flags & TDF_SWAPINREQ) == 0) 739 pri -= p->p_nice * 8; 740 /* 741 * if this thread is higher priority 742 * and there is enough space, then select 743 * this process instead of the previous 744 * selection. 745 */ 746 if (pri > ppri) { 747 pp = p; 748 ppri = pri; 749 } 750 } 751 thread_unlock(td); 752 } 753 PROC_UNLOCK(p); 754 } 755 sx_sunlock(&allproc_lock); 756 757 /* 758 * Nothing to do, back to sleep. 759 */ 760 if ((p = pp) == NULL) { 761 tsleep(&proc0, PVM, "sched", MAXSLP * hz / 2); 762 goto loop; 763 } 764 PROC_LOCK(p); 765 766 /* 767 * Another process may be bringing or may have already 768 * brought this process in while we traverse all threads. 769 * Or, this process may even be being swapped out again. 770 */ 771 if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) { 772 PROC_UNLOCK(p); 773 goto loop; 774 } 775 776 /* 777 * We would like to bring someone in. (only if there is space). 778 * [What checks the space? ] 779 */ 780 faultin(p); 781 PROC_UNLOCK(p); 782 goto loop; 783 } 784 785 void 786 kick_proc0(void) 787 { 788 789 wakeup(&proc0); 790 } 791 792 #ifndef NO_SWAPPING 793 794 /* 795 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 796 */ 797 static int swap_idle_threshold1 = 2; 798 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW, 799 &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process"); 800 801 /* 802 * Swap_idle_threshold2 is the time that a process can be idle before 803 * it will be swapped out, if idle swapping is enabled. 804 */ 805 static int swap_idle_threshold2 = 10; 806 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW, 807 &swap_idle_threshold2, 0, "Time before a process will be swapped out"); 808 809 /* 810 * First, if any processes have been sleeping or stopped for at least 811 * "swap_idle_threshold1" seconds, they are swapped out. If, however, 812 * no such processes exist, then the longest-sleeping or stopped 813 * process is swapped out. Finally, and only as a last resort, if 814 * there are no sleeping or stopped processes, the longest-resident 815 * process is swapped out. 816 */ 817 void 818 swapout_procs(action) 819 int action; 820 { 821 struct proc *p; 822 struct thread *td; 823 int didswap = 0; 824 825 retry: 826 sx_slock(&allproc_lock); 827 FOREACH_PROC_IN_SYSTEM(p) { 828 struct vmspace *vm; 829 int minslptime = 100000; 830 int slptime; 831 832 /* 833 * Watch out for a process in 834 * creation. It may have no 835 * address space or lock yet. 836 */ 837 if (p->p_state == PRS_NEW) 838 continue; 839 /* 840 * An aio daemon switches its 841 * address space while running. 842 * Perform a quick check whether 843 * a process has P_SYSTEM. 844 */ 845 if ((p->p_flag & P_SYSTEM) != 0) 846 continue; 847 /* 848 * Do not swapout a process that 849 * is waiting for VM data 850 * structures as there is a possible 851 * deadlock. Test this first as 852 * this may block. 853 * 854 * Lock the map until swapout 855 * finishes, or a thread of this 856 * process may attempt to alter 857 * the map. 858 */ 859 vm = vmspace_acquire_ref(p); 860 if (vm == NULL) 861 continue; 862 if (!vm_map_trylock(&vm->vm_map)) 863 goto nextproc1; 864 865 PROC_LOCK(p); 866 if (p->p_lock != 0 || 867 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT) 868 ) != 0) { 869 goto nextproc; 870 } 871 /* 872 * only aiod changes vmspace, however it will be 873 * skipped because of the if statement above checking 874 * for P_SYSTEM 875 */ 876 if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM) 877 goto nextproc; 878 879 switch (p->p_state) { 880 default: 881 /* Don't swap out processes in any sort 882 * of 'special' state. */ 883 break; 884 885 case PRS_NORMAL: 886 /* 887 * do not swapout a realtime process 888 * Check all the thread groups.. 889 */ 890 FOREACH_THREAD_IN_PROC(p, td) { 891 thread_lock(td); 892 if (PRI_IS_REALTIME(td->td_pri_class)) { 893 thread_unlock(td); 894 goto nextproc; 895 } 896 slptime = (ticks - td->td_slptick) / hz; 897 /* 898 * Guarantee swap_idle_threshold1 899 * time in memory. 900 */ 901 if (slptime < swap_idle_threshold1) { 902 thread_unlock(td); 903 goto nextproc; 904 } 905 906 /* 907 * Do not swapout a process if it is 908 * waiting on a critical event of some 909 * kind or there is a thread whose 910 * pageable memory may be accessed. 911 * 912 * This could be refined to support 913 * swapping out a thread. 914 */ 915 if (!thread_safetoswapout(td)) { 916 thread_unlock(td); 917 goto nextproc; 918 } 919 /* 920 * If the system is under memory stress, 921 * or if we are swapping 922 * idle processes >= swap_idle_threshold2, 923 * then swap the process out. 924 */ 925 if (((action & VM_SWAP_NORMAL) == 0) && 926 (((action & VM_SWAP_IDLE) == 0) || 927 (slptime < swap_idle_threshold2))) { 928 thread_unlock(td); 929 goto nextproc; 930 } 931 932 if (minslptime > slptime) 933 minslptime = slptime; 934 thread_unlock(td); 935 } 936 937 /* 938 * If the pageout daemon didn't free enough pages, 939 * or if this process is idle and the system is 940 * configured to swap proactively, swap it out. 941 */ 942 if ((action & VM_SWAP_NORMAL) || 943 ((action & VM_SWAP_IDLE) && 944 (minslptime > swap_idle_threshold2))) { 945 if (swapout(p) == 0) 946 didswap++; 947 PROC_UNLOCK(p); 948 vm_map_unlock(&vm->vm_map); 949 vmspace_free(vm); 950 sx_sunlock(&allproc_lock); 951 goto retry; 952 } 953 } 954 nextproc: 955 PROC_UNLOCK(p); 956 vm_map_unlock(&vm->vm_map); 957 nextproc1: 958 vmspace_free(vm); 959 continue; 960 } 961 sx_sunlock(&allproc_lock); 962 /* 963 * If we swapped something out, and another process needed memory, 964 * then wakeup the sched process. 965 */ 966 if (didswap) 967 wakeup(&proc0); 968 } 969 970 static void 971 swapclear(p) 972 struct proc *p; 973 { 974 struct thread *td; 975 976 PROC_LOCK_ASSERT(p, MA_OWNED); 977 978 FOREACH_THREAD_IN_PROC(p, td) { 979 thread_lock(td); 980 td->td_flags |= TDF_INMEM; 981 td->td_flags &= ~TDF_SWAPINREQ; 982 TD_CLR_SWAPPED(td); 983 if (TD_CAN_RUN(td)) 984 if (setrunnable(td)) { 985 #ifdef INVARIANTS 986 /* 987 * XXX: We just cleared TDI_SWAPPED 988 * above and set TDF_INMEM, so this 989 * should never happen. 990 */ 991 panic("not waking up swapper"); 992 #endif 993 } 994 thread_unlock(td); 995 } 996 p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT); 997 p->p_flag |= P_INMEM; 998 } 999 1000 static int 1001 swapout(p) 1002 struct proc *p; 1003 { 1004 struct thread *td; 1005 1006 PROC_LOCK_ASSERT(p, MA_OWNED); 1007 #if defined(SWAP_DEBUG) 1008 printf("swapping out %d\n", p->p_pid); 1009 #endif 1010 1011 /* 1012 * The states of this process and its threads may have changed 1013 * by now. Assuming that there is only one pageout daemon thread, 1014 * this process should still be in memory. 1015 */ 1016 KASSERT((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) == P_INMEM, 1017 ("swapout: lost a swapout race?")); 1018 1019 /* 1020 * remember the process resident count 1021 */ 1022 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 1023 /* 1024 * Check and mark all threads before we proceed. 1025 */ 1026 p->p_flag &= ~P_INMEM; 1027 p->p_flag |= P_SWAPPINGOUT; 1028 FOREACH_THREAD_IN_PROC(p, td) { 1029 thread_lock(td); 1030 if (!thread_safetoswapout(td)) { 1031 thread_unlock(td); 1032 swapclear(p); 1033 return (EBUSY); 1034 } 1035 td->td_flags &= ~TDF_INMEM; 1036 TD_SET_SWAPPED(td); 1037 thread_unlock(td); 1038 } 1039 td = FIRST_THREAD_IN_PROC(p); 1040 ++td->td_ru.ru_nswap; 1041 PROC_UNLOCK(p); 1042 1043 /* 1044 * This list is stable because all threads are now prevented from 1045 * running. The list is only modified in the context of a running 1046 * thread in this process. 1047 */ 1048 FOREACH_THREAD_IN_PROC(p, td) 1049 vm_thread_swapout(td); 1050 1051 PROC_LOCK(p); 1052 p->p_flag &= ~P_SWAPPINGOUT; 1053 p->p_swtick = ticks; 1054 return (0); 1055 } 1056 #endif /* !NO_SWAPPING */ 1057