1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Permission to use, copy, modify and distribute this software and 39 * its documentation is hereby granted, provided that both the copyright 40 * notice and this permission notice appear in all copies of the 41 * software, derivative works or modified versions, and any portions 42 * thereof, and that both notices appear in supporting documentation. 43 * 44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 47 * 48 * Carnegie Mellon requests users of this software to return to 49 * 50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 51 * School of Computer Science 52 * Carnegie Mellon University 53 * Pittsburgh PA 15213-3890 54 * 55 * any improvements or extensions that they make and grant Carnegie the 56 * rights to redistribute these changes. 57 */ 58 59 #include <sys/cdefs.h> 60 __FBSDID("$FreeBSD$"); 61 62 #include "opt_vm.h" 63 #include "opt_kstack_pages.h" 64 #include "opt_kstack_max_pages.h" 65 66 #include <sys/param.h> 67 #include <sys/systm.h> 68 #include <sys/limits.h> 69 #include <sys/lock.h> 70 #include <sys/mutex.h> 71 #include <sys/proc.h> 72 #include <sys/resourcevar.h> 73 #include <sys/shm.h> 74 #include <sys/vmmeter.h> 75 #include <sys/sx.h> 76 #include <sys/sysctl.h> 77 78 #include <sys/kernel.h> 79 #include <sys/ktr.h> 80 #include <sys/unistd.h> 81 82 #include <vm/vm.h> 83 #include <vm/vm_param.h> 84 #include <vm/pmap.h> 85 #include <vm/vm_map.h> 86 #include <vm/vm_page.h> 87 #include <vm/vm_pageout.h> 88 #include <vm/vm_object.h> 89 #include <vm/vm_kern.h> 90 #include <vm/vm_extern.h> 91 #include <vm/vm_pager.h> 92 #include <vm/swap_pager.h> 93 94 #include <sys/user.h> 95 96 extern int maxslp; 97 98 /* 99 * System initialization 100 * 101 * Note: proc0 from proc.h 102 */ 103 static void vm_init_limits(void *); 104 SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0) 105 106 /* 107 * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 108 * 109 * Note: run scheduling should be divorced from the vm system. 110 */ 111 static void scheduler(void *); 112 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL) 113 114 #ifndef NO_SWAPPING 115 static void swapout(struct proc *); 116 static void vm_proc_swapin(struct proc *p); 117 static void vm_proc_swapout(struct proc *p); 118 #endif 119 120 /* 121 * MPSAFE 122 * 123 * WARNING! This code calls vm_map_check_protection() which only checks 124 * the associated vm_map_entry range. It does not determine whether the 125 * contents of the memory is actually readable or writable. In most cases 126 * just checking the vm_map_entry is sufficient within the kernel's address 127 * space. 128 */ 129 int 130 kernacc(addr, len, rw) 131 void *addr; 132 int len, rw; 133 { 134 boolean_t rv; 135 vm_offset_t saddr, eaddr; 136 vm_prot_t prot; 137 138 KASSERT((rw & ~VM_PROT_ALL) == 0, 139 ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 140 prot = rw; 141 saddr = trunc_page((vm_offset_t)addr); 142 eaddr = round_page((vm_offset_t)addr + len); 143 vm_map_lock_read(kernel_map); 144 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 145 vm_map_unlock_read(kernel_map); 146 return (rv == TRUE); 147 } 148 149 /* 150 * MPSAFE 151 * 152 * WARNING! This code calls vm_map_check_protection() which only checks 153 * the associated vm_map_entry range. It does not determine whether the 154 * contents of the memory is actually readable or writable. vmapbuf(), 155 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be 156 * used in conjuction with this call. 157 */ 158 int 159 useracc(addr, len, rw) 160 void *addr; 161 int len, rw; 162 { 163 boolean_t rv; 164 vm_prot_t prot; 165 vm_map_t map; 166 167 KASSERT((rw & ~VM_PROT_ALL) == 0, 168 ("illegal ``rw'' argument to useracc (%x)\n", rw)); 169 prot = rw; 170 map = &curproc->p_vmspace->vm_map; 171 if ((vm_offset_t)addr + len > vm_map_max(map) || 172 (vm_offset_t)addr + len < (vm_offset_t)addr) { 173 return (FALSE); 174 } 175 vm_map_lock_read(map); 176 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 177 round_page((vm_offset_t)addr + len), prot); 178 vm_map_unlock_read(map); 179 return (rv == TRUE); 180 } 181 182 int 183 vslock(void *addr, size_t len) 184 { 185 vm_offset_t end, last, start; 186 vm_size_t npages; 187 int error; 188 189 last = (vm_offset_t)addr + len; 190 start = trunc_page((vm_offset_t)addr); 191 end = round_page(last); 192 if (last < (vm_offset_t)addr || end < (vm_offset_t)addr) 193 return (EINVAL); 194 npages = atop(end - start); 195 if (npages > vm_page_max_wired) 196 return (ENOMEM); 197 PROC_LOCK(curproc); 198 if (ptoa(npages + 199 pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))) > 200 lim_cur(curproc, RLIMIT_MEMLOCK)) { 201 PROC_UNLOCK(curproc); 202 return (ENOMEM); 203 } 204 PROC_UNLOCK(curproc); 205 #if 0 206 /* 207 * XXX - not yet 208 * 209 * The limit for transient usage of wired pages should be 210 * larger than for "permanent" wired pages (mlock()). 211 * 212 * Also, the sysctl code, which is the only present user 213 * of vslock(), does a hard loop on EAGAIN. 214 */ 215 if (npages + cnt.v_wire_count > vm_page_max_wired) 216 return (EAGAIN); 217 #endif 218 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end, 219 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 220 /* 221 * Return EFAULT on error to match copy{in,out}() behaviour 222 * rather than returning ENOMEM like mlock() would. 223 */ 224 return (error == KERN_SUCCESS ? 0 : EFAULT); 225 } 226 227 void 228 vsunlock(void *addr, size_t len) 229 { 230 231 /* Rely on the parameter sanity checks performed by vslock(). */ 232 (void)vm_map_unwire(&curproc->p_vmspace->vm_map, 233 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), 234 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 235 } 236 237 /* 238 * Create the U area for a new process. 239 * This routine directly affects the fork perf for a process. 240 */ 241 void 242 vm_proc_new(struct proc *p) 243 { 244 vm_page_t ma[UAREA_PAGES]; 245 vm_object_t upobj; 246 vm_offset_t up; 247 vm_page_t m; 248 u_int i; 249 250 /* 251 * Get a kernel virtual address for the U area for this process. 252 */ 253 up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE); 254 if (up == 0) 255 panic("vm_proc_new: upage allocation failed"); 256 p->p_uarea = (struct user *)up; 257 258 /* 259 * Allocate object and page(s) for the U area. 260 */ 261 upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES); 262 p->p_upages_obj = upobj; 263 VM_OBJECT_LOCK(upobj); 264 for (i = 0; i < UAREA_PAGES; i++) { 265 m = vm_page_grab(upobj, i, VM_ALLOC_NOBUSY | 266 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 267 ma[i] = m; 268 m->valid = VM_PAGE_BITS_ALL; 269 } 270 VM_OBJECT_UNLOCK(upobj); 271 272 /* 273 * Enter the pages into the kernel address space. 274 */ 275 pmap_qenter(up, ma, UAREA_PAGES); 276 } 277 278 /* 279 * Dispose the U area for a process that has exited. 280 * This routine directly impacts the exit perf of a process. 281 * 282 * XXX UNUSED 283 * U areas of free proc structures are no longer freed and are never 284 * swapped out. Ideally we would free U areas lazily, when low on memory. 285 */ 286 void 287 vm_proc_dispose(struct proc *p) 288 { 289 vm_object_t upobj; 290 vm_offset_t up; 291 vm_page_t m; 292 293 upobj = p->p_upages_obj; 294 VM_OBJECT_LOCK(upobj); 295 if (upobj->resident_page_count != UAREA_PAGES) 296 panic("vm_proc_dispose: incorrect number of pages in upobj"); 297 vm_page_lock_queues(); 298 while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) { 299 vm_page_unwire(m, 0); 300 vm_page_free(m); 301 } 302 vm_page_unlock_queues(); 303 VM_OBJECT_UNLOCK(upobj); 304 up = (vm_offset_t)p->p_uarea; 305 pmap_qremove(up, UAREA_PAGES); 306 kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE); 307 vm_object_deallocate(upobj); 308 } 309 310 #ifndef NO_SWAPPING 311 /* 312 * Allow the U area for a process to be prejudicially paged out. 313 */ 314 static void 315 vm_proc_swapout(struct proc *p) 316 { 317 vm_object_t upobj; 318 vm_offset_t up; 319 vm_page_t m; 320 321 upobj = p->p_upages_obj; 322 VM_OBJECT_LOCK(upobj); 323 if (upobj->resident_page_count != UAREA_PAGES) 324 panic("vm_proc_dispose: incorrect number of pages in upobj"); 325 vm_page_lock_queues(); 326 TAILQ_FOREACH(m, &upobj->memq, listq) { 327 vm_page_dirty(m); 328 vm_page_unwire(m, 0); 329 } 330 vm_page_unlock_queues(); 331 VM_OBJECT_UNLOCK(upobj); 332 up = (vm_offset_t)p->p_uarea; 333 pmap_qremove(up, UAREA_PAGES); 334 } 335 336 /* 337 * Bring the U area for a specified process back in. 338 */ 339 static void 340 vm_proc_swapin(struct proc *p) 341 { 342 vm_page_t ma[UAREA_PAGES]; 343 vm_object_t upobj; 344 vm_offset_t up; 345 vm_page_t m; 346 int rv; 347 int i; 348 349 upobj = p->p_upages_obj; 350 VM_OBJECT_LOCK(upobj); 351 for (i = 0; i < UAREA_PAGES; i++) { 352 m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 353 if (m->valid != VM_PAGE_BITS_ALL) { 354 rv = vm_pager_get_pages(upobj, &m, 1, 0); 355 if (rv != VM_PAGER_OK) 356 panic("vm_proc_swapin: cannot get upage"); 357 } 358 ma[i] = m; 359 } 360 if (upobj->resident_page_count != UAREA_PAGES) 361 panic("vm_proc_swapin: lost pages from upobj"); 362 vm_page_lock_queues(); 363 TAILQ_FOREACH(m, &upobj->memq, listq) { 364 m->valid = VM_PAGE_BITS_ALL; 365 vm_page_wire(m); 366 vm_page_wakeup(m); 367 } 368 vm_page_unlock_queues(); 369 VM_OBJECT_UNLOCK(upobj); 370 up = (vm_offset_t)p->p_uarea; 371 pmap_qenter(up, ma, UAREA_PAGES); 372 } 373 374 /* 375 * Swap in the UAREAs of all processes swapped out to the given device. 376 * The pages in the UAREA are marked dirty and their swap metadata is freed. 377 */ 378 void 379 vm_proc_swapin_all(struct swdevt *devidx) 380 { 381 struct proc *p; 382 vm_object_t object; 383 vm_page_t m; 384 385 retry: 386 sx_slock(&allproc_lock); 387 FOREACH_PROC_IN_SYSTEM(p) { 388 PROC_LOCK(p); 389 object = p->p_upages_obj; 390 if (object != NULL) { 391 VM_OBJECT_LOCK(object); 392 if (swap_pager_isswapped(object, devidx)) { 393 VM_OBJECT_UNLOCK(object); 394 sx_sunlock(&allproc_lock); 395 faultin(p); 396 PROC_UNLOCK(p); 397 VM_OBJECT_LOCK(object); 398 vm_page_lock_queues(); 399 TAILQ_FOREACH(m, &object->memq, listq) 400 vm_page_dirty(m); 401 vm_page_unlock_queues(); 402 swap_pager_freespace(object, 0, 403 object->un_pager.swp.swp_bcount); 404 VM_OBJECT_UNLOCK(object); 405 goto retry; 406 } 407 VM_OBJECT_UNLOCK(object); 408 } 409 PROC_UNLOCK(p); 410 } 411 sx_sunlock(&allproc_lock); 412 } 413 #endif 414 415 #ifndef KSTACK_MAX_PAGES 416 #define KSTACK_MAX_PAGES 32 417 #endif 418 419 /* 420 * Create the kernel stack (including pcb for i386) for a new thread. 421 * This routine directly affects the fork perf for a process and 422 * create performance for a thread. 423 */ 424 void 425 vm_thread_new(struct thread *td, int pages) 426 { 427 vm_object_t ksobj; 428 vm_offset_t ks; 429 vm_page_t m, ma[KSTACK_MAX_PAGES]; 430 int i; 431 432 /* Bounds check */ 433 if (pages <= 1) 434 pages = KSTACK_PAGES; 435 else if (pages > KSTACK_MAX_PAGES) 436 pages = KSTACK_MAX_PAGES; 437 /* 438 * Allocate an object for the kstack. 439 */ 440 ksobj = vm_object_allocate(OBJT_DEFAULT, pages); 441 td->td_kstack_obj = ksobj; 442 /* 443 * Get a kernel virtual address for this thread's kstack. 444 */ 445 ks = kmem_alloc_nofault(kernel_map, 446 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 447 if (ks == 0) 448 panic("vm_thread_new: kstack allocation failed"); 449 if (KSTACK_GUARD_PAGES != 0) { 450 pmap_qremove(ks, KSTACK_GUARD_PAGES); 451 ks += KSTACK_GUARD_PAGES * PAGE_SIZE; 452 } 453 td->td_kstack = ks; 454 /* 455 * Knowing the number of pages allocated is useful when you 456 * want to deallocate them. 457 */ 458 td->td_kstack_pages = pages; 459 /* 460 * For the length of the stack, link in a real page of ram for each 461 * page of stack. 462 */ 463 VM_OBJECT_LOCK(ksobj); 464 for (i = 0; i < pages; i++) { 465 /* 466 * Get a kernel stack page. 467 */ 468 m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY | 469 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 470 ma[i] = m; 471 m->valid = VM_PAGE_BITS_ALL; 472 } 473 VM_OBJECT_UNLOCK(ksobj); 474 pmap_qenter(ks, ma, pages); 475 } 476 477 /* 478 * Dispose of a thread's kernel stack. 479 */ 480 void 481 vm_thread_dispose(struct thread *td) 482 { 483 vm_object_t ksobj; 484 vm_offset_t ks; 485 vm_page_t m; 486 int i, pages; 487 488 pages = td->td_kstack_pages; 489 ksobj = td->td_kstack_obj; 490 ks = td->td_kstack; 491 pmap_qremove(ks, pages); 492 VM_OBJECT_LOCK(ksobj); 493 for (i = 0; i < pages; i++) { 494 m = vm_page_lookup(ksobj, i); 495 if (m == NULL) 496 panic("vm_thread_dispose: kstack already missing?"); 497 vm_page_lock_queues(); 498 vm_page_unwire(m, 0); 499 vm_page_free(m); 500 vm_page_unlock_queues(); 501 } 502 VM_OBJECT_UNLOCK(ksobj); 503 vm_object_deallocate(ksobj); 504 kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE), 505 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 506 } 507 508 /* 509 * Allow a thread's kernel stack to be paged out. 510 */ 511 void 512 vm_thread_swapout(struct thread *td) 513 { 514 vm_object_t ksobj; 515 vm_page_t m; 516 int i, pages; 517 518 cpu_thread_swapout(td); 519 pages = td->td_kstack_pages; 520 ksobj = td->td_kstack_obj; 521 pmap_qremove(td->td_kstack, pages); 522 VM_OBJECT_LOCK(ksobj); 523 for (i = 0; i < pages; i++) { 524 m = vm_page_lookup(ksobj, i); 525 if (m == NULL) 526 panic("vm_thread_swapout: kstack already missing?"); 527 vm_page_lock_queues(); 528 vm_page_dirty(m); 529 vm_page_unwire(m, 0); 530 vm_page_unlock_queues(); 531 } 532 VM_OBJECT_UNLOCK(ksobj); 533 } 534 535 /* 536 * Bring the kernel stack for a specified thread back in. 537 */ 538 void 539 vm_thread_swapin(struct thread *td) 540 { 541 vm_object_t ksobj; 542 vm_page_t m, ma[KSTACK_MAX_PAGES]; 543 int i, pages, rv; 544 545 pages = td->td_kstack_pages; 546 ksobj = td->td_kstack_obj; 547 VM_OBJECT_LOCK(ksobj); 548 for (i = 0; i < pages; i++) { 549 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 550 if (m->valid != VM_PAGE_BITS_ALL) { 551 rv = vm_pager_get_pages(ksobj, &m, 1, 0); 552 if (rv != VM_PAGER_OK) 553 panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid); 554 m = vm_page_lookup(ksobj, i); 555 m->valid = VM_PAGE_BITS_ALL; 556 } 557 ma[i] = m; 558 vm_page_lock_queues(); 559 vm_page_wire(m); 560 vm_page_wakeup(m); 561 vm_page_unlock_queues(); 562 } 563 VM_OBJECT_UNLOCK(ksobj); 564 pmap_qenter(td->td_kstack, ma, pages); 565 cpu_thread_swapin(td); 566 } 567 568 /* 569 * Set up a variable-sized alternate kstack. 570 */ 571 void 572 vm_thread_new_altkstack(struct thread *td, int pages) 573 { 574 575 td->td_altkstack = td->td_kstack; 576 td->td_altkstack_obj = td->td_kstack_obj; 577 td->td_altkstack_pages = td->td_kstack_pages; 578 579 vm_thread_new(td, pages); 580 } 581 582 /* 583 * Restore the original kstack. 584 */ 585 void 586 vm_thread_dispose_altkstack(struct thread *td) 587 { 588 589 vm_thread_dispose(td); 590 591 td->td_kstack = td->td_altkstack; 592 td->td_kstack_obj = td->td_altkstack_obj; 593 td->td_kstack_pages = td->td_altkstack_pages; 594 td->td_altkstack = 0; 595 td->td_altkstack_obj = NULL; 596 td->td_altkstack_pages = 0; 597 } 598 599 /* 600 * Implement fork's actions on an address space. 601 * Here we arrange for the address space to be copied or referenced, 602 * allocate a user struct (pcb and kernel stack), then call the 603 * machine-dependent layer to fill those in and make the new process 604 * ready to run. The new process is set up so that it returns directly 605 * to user mode to avoid stack copying and relocation problems. 606 */ 607 void 608 vm_forkproc(td, p2, td2, flags) 609 struct thread *td; 610 struct proc *p2; 611 struct thread *td2; 612 int flags; 613 { 614 struct proc *p1 = td->td_proc; 615 616 if ((flags & RFPROC) == 0) { 617 /* 618 * Divorce the memory, if it is shared, essentially 619 * this changes shared memory amongst threads, into 620 * COW locally. 621 */ 622 if ((flags & RFMEM) == 0) { 623 if (p1->p_vmspace->vm_refcnt > 1) { 624 vmspace_unshare(p1); 625 } 626 } 627 cpu_fork(td, p2, td2, flags); 628 return; 629 } 630 631 if (flags & RFMEM) { 632 p2->p_vmspace = p1->p_vmspace; 633 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1); 634 } 635 636 while (vm_page_count_severe()) { 637 VM_WAIT; 638 } 639 640 if ((flags & RFMEM) == 0) { 641 p2->p_vmspace = vmspace_fork(p1->p_vmspace); 642 if (p1->p_vmspace->vm_shm) 643 shmfork(p1, p2); 644 } 645 646 /* 647 * p_stats currently points at fields in the user struct. 648 * Copy parts of p_stats; zero the rest of p_stats (statistics). 649 */ 650 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 651 652 p2->p_stats = &p2->p_uarea->u_stats; 653 bzero(&p2->p_stats->pstat_startzero, 654 (unsigned) RANGEOF(struct pstats, pstat_startzero, pstat_endzero)); 655 bcopy(&p1->p_stats->pstat_startcopy, &p2->p_stats->pstat_startcopy, 656 (unsigned) RANGEOF(struct pstats, pstat_startcopy, pstat_endcopy)); 657 #undef RANGEOF 658 659 /* 660 * cpu_fork will copy and update the pcb, set up the kernel stack, 661 * and make the child ready to run. 662 */ 663 cpu_fork(td, p2, td2, flags); 664 } 665 666 /* 667 * Called after process has been wait(2)'ed apon and is being reaped. 668 * The idea is to reclaim resources that we could not reclaim while 669 * the process was still executing. 670 */ 671 void 672 vm_waitproc(p) 673 struct proc *p; 674 { 675 676 vmspace_exitfree(p); /* and clean-out the vmspace */ 677 } 678 679 /* 680 * Set default limits for VM system. 681 * Called for proc 0, and then inherited by all others. 682 * 683 * XXX should probably act directly on proc0. 684 */ 685 static void 686 vm_init_limits(udata) 687 void *udata; 688 { 689 struct proc *p = udata; 690 struct plimit *limp; 691 int rss_limit; 692 693 /* 694 * Set up the initial limits on process VM. Set the maximum resident 695 * set size to be half of (reasonably) available memory. Since this 696 * is a soft limit, it comes into effect only when the system is out 697 * of memory - half of main memory helps to favor smaller processes, 698 * and reduces thrashing of the object cache. 699 */ 700 limp = p->p_limit; 701 limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; 702 limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz; 703 limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; 704 limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; 705 /* limit the limit to no less than 2MB */ 706 rss_limit = max(cnt.v_free_count, 512); 707 limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 708 limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 709 } 710 711 void 712 faultin(p) 713 struct proc *p; 714 { 715 #ifdef NO_SWAPPING 716 717 PROC_LOCK_ASSERT(p, MA_OWNED); 718 if ((p->p_sflag & PS_INMEM) == 0) 719 panic("faultin: proc swapped out with NO_SWAPPING!"); 720 #else /* !NO_SWAPPING */ 721 struct thread *td; 722 723 GIANT_REQUIRED; 724 PROC_LOCK_ASSERT(p, MA_OWNED); 725 /* 726 * If another process is swapping in this process, 727 * just wait until it finishes. 728 */ 729 if (p->p_sflag & PS_SWAPPINGIN) 730 msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0); 731 else if ((p->p_sflag & PS_INMEM) == 0) { 732 /* 733 * Don't let another thread swap process p out while we are 734 * busy swapping it in. 735 */ 736 ++p->p_lock; 737 mtx_lock_spin(&sched_lock); 738 p->p_sflag |= PS_SWAPPINGIN; 739 mtx_unlock_spin(&sched_lock); 740 PROC_UNLOCK(p); 741 742 vm_proc_swapin(p); 743 FOREACH_THREAD_IN_PROC(p, td) 744 vm_thread_swapin(td); 745 746 PROC_LOCK(p); 747 mtx_lock_spin(&sched_lock); 748 p->p_sflag &= ~PS_SWAPPINGIN; 749 p->p_sflag |= PS_INMEM; 750 FOREACH_THREAD_IN_PROC(p, td) { 751 TD_CLR_SWAPPED(td); 752 if (TD_CAN_RUN(td)) 753 setrunnable(td); 754 } 755 mtx_unlock_spin(&sched_lock); 756 757 wakeup(&p->p_sflag); 758 759 /* Allow other threads to swap p out now. */ 760 --p->p_lock; 761 } 762 #endif /* NO_SWAPPING */ 763 } 764 765 /* 766 * This swapin algorithm attempts to swap-in processes only if there 767 * is enough space for them. Of course, if a process waits for a long 768 * time, it will be swapped in anyway. 769 * 770 * XXXKSE - process with the thread with highest priority counts.. 771 * 772 * Giant is still held at this point, to be released in tsleep. 773 */ 774 /* ARGSUSED*/ 775 static void 776 scheduler(dummy) 777 void *dummy; 778 { 779 struct proc *p; 780 struct thread *td; 781 int pri; 782 struct proc *pp; 783 int ppri; 784 785 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED); 786 /* GIANT_REQUIRED */ 787 788 loop: 789 if (vm_page_count_min()) { 790 VM_WAIT; 791 goto loop; 792 } 793 794 pp = NULL; 795 ppri = INT_MIN; 796 sx_slock(&allproc_lock); 797 FOREACH_PROC_IN_SYSTEM(p) { 798 struct ksegrp *kg; 799 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { 800 continue; 801 } 802 mtx_lock_spin(&sched_lock); 803 FOREACH_THREAD_IN_PROC(p, td) { 804 /* 805 * An otherwise runnable thread of a process 806 * swapped out has only the TDI_SWAPPED bit set. 807 * 808 */ 809 if (td->td_inhibitors == TDI_SWAPPED) { 810 kg = td->td_ksegrp; 811 pri = p->p_swtime + kg->kg_slptime; 812 if ((p->p_sflag & PS_SWAPINREQ) == 0) { 813 pri -= p->p_nice * 8; 814 } 815 816 /* 817 * if this ksegrp is higher priority 818 * and there is enough space, then select 819 * this process instead of the previous 820 * selection. 821 */ 822 if (pri > ppri) { 823 pp = p; 824 ppri = pri; 825 } 826 } 827 } 828 mtx_unlock_spin(&sched_lock); 829 } 830 sx_sunlock(&allproc_lock); 831 832 /* 833 * Nothing to do, back to sleep. 834 */ 835 if ((p = pp) == NULL) { 836 tsleep(&proc0, PVM, "sched", maxslp * hz / 2); 837 goto loop; 838 } 839 PROC_LOCK(p); 840 841 /* 842 * Another process may be bringing or may have already 843 * brought this process in while we traverse all threads. 844 * Or, this process may even be being swapped out again. 845 */ 846 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { 847 PROC_UNLOCK(p); 848 goto loop; 849 } 850 851 mtx_lock_spin(&sched_lock); 852 p->p_sflag &= ~PS_SWAPINREQ; 853 mtx_unlock_spin(&sched_lock); 854 855 /* 856 * We would like to bring someone in. (only if there is space). 857 * [What checks the space? ] 858 */ 859 faultin(p); 860 PROC_UNLOCK(p); 861 mtx_lock_spin(&sched_lock); 862 p->p_swtime = 0; 863 mtx_unlock_spin(&sched_lock); 864 goto loop; 865 } 866 867 #ifndef NO_SWAPPING 868 869 /* 870 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 871 */ 872 static int swap_idle_threshold1 = 2; 873 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW, 874 &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process"); 875 876 /* 877 * Swap_idle_threshold2 is the time that a process can be idle before 878 * it will be swapped out, if idle swapping is enabled. 879 */ 880 static int swap_idle_threshold2 = 10; 881 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW, 882 &swap_idle_threshold2, 0, "Time before a process will be swapped out"); 883 884 /* 885 * Swapout is driven by the pageout daemon. Very simple, we find eligible 886 * procs and unwire their u-areas. We try to always "swap" at least one 887 * process in case we need the room for a swapin. 888 * If any procs have been sleeping/stopped for at least maxslp seconds, 889 * they are swapped. Else, we swap the longest-sleeping or stopped process, 890 * if any, otherwise the longest-resident process. 891 */ 892 void 893 swapout_procs(action) 894 int action; 895 { 896 struct proc *p; 897 struct thread *td; 898 struct ksegrp *kg; 899 int didswap = 0; 900 901 GIANT_REQUIRED; 902 903 retry: 904 sx_slock(&allproc_lock); 905 FOREACH_PROC_IN_SYSTEM(p) { 906 struct vmspace *vm; 907 int minslptime = 100000; 908 909 /* 910 * Watch out for a process in 911 * creation. It may have no 912 * address space or lock yet. 913 */ 914 mtx_lock_spin(&sched_lock); 915 if (p->p_state == PRS_NEW) { 916 mtx_unlock_spin(&sched_lock); 917 continue; 918 } 919 mtx_unlock_spin(&sched_lock); 920 921 /* 922 * An aio daemon switches its 923 * address space while running. 924 * Perform a quick check whether 925 * a process has P_SYSTEM. 926 */ 927 if ((p->p_flag & P_SYSTEM) != 0) 928 continue; 929 930 /* 931 * Do not swapout a process that 932 * is waiting for VM data 933 * structures as there is a possible 934 * deadlock. Test this first as 935 * this may block. 936 * 937 * Lock the map until swapout 938 * finishes, or a thread of this 939 * process may attempt to alter 940 * the map. 941 */ 942 PROC_LOCK(p); 943 vm = p->p_vmspace; 944 KASSERT(vm != NULL, 945 ("swapout_procs: a process has no address space")); 946 atomic_add_int(&vm->vm_refcnt, 1); 947 PROC_UNLOCK(p); 948 if (!vm_map_trylock(&vm->vm_map)) 949 goto nextproc1; 950 951 PROC_LOCK(p); 952 if (p->p_lock != 0 || 953 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT) 954 ) != 0) { 955 goto nextproc2; 956 } 957 /* 958 * only aiod changes vmspace, however it will be 959 * skipped because of the if statement above checking 960 * for P_SYSTEM 961 */ 962 if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM) 963 goto nextproc2; 964 965 switch (p->p_state) { 966 default: 967 /* Don't swap out processes in any sort 968 * of 'special' state. */ 969 break; 970 971 case PRS_NORMAL: 972 mtx_lock_spin(&sched_lock); 973 /* 974 * do not swapout a realtime process 975 * Check all the thread groups.. 976 */ 977 FOREACH_KSEGRP_IN_PROC(p, kg) { 978 if (PRI_IS_REALTIME(kg->kg_pri_class)) 979 goto nextproc; 980 981 /* 982 * Guarantee swap_idle_threshold1 983 * time in memory. 984 */ 985 if (kg->kg_slptime < swap_idle_threshold1) 986 goto nextproc; 987 988 /* 989 * Do not swapout a process if it is 990 * waiting on a critical event of some 991 * kind or there is a thread whose 992 * pageable memory may be accessed. 993 * 994 * This could be refined to support 995 * swapping out a thread. 996 */ 997 FOREACH_THREAD_IN_GROUP(kg, td) { 998 if ((td->td_priority) < PSOCK || 999 !thread_safetoswapout(td)) 1000 goto nextproc; 1001 } 1002 /* 1003 * If the system is under memory stress, 1004 * or if we are swapping 1005 * idle processes >= swap_idle_threshold2, 1006 * then swap the process out. 1007 */ 1008 if (((action & VM_SWAP_NORMAL) == 0) && 1009 (((action & VM_SWAP_IDLE) == 0) || 1010 (kg->kg_slptime < swap_idle_threshold2))) 1011 goto nextproc; 1012 1013 if (minslptime > kg->kg_slptime) 1014 minslptime = kg->kg_slptime; 1015 } 1016 1017 /* 1018 * If the pageout daemon didn't free enough pages, 1019 * or if this process is idle and the system is 1020 * configured to swap proactively, swap it out. 1021 */ 1022 if ((action & VM_SWAP_NORMAL) || 1023 ((action & VM_SWAP_IDLE) && 1024 (minslptime > swap_idle_threshold2))) { 1025 swapout(p); 1026 didswap++; 1027 mtx_unlock_spin(&sched_lock); 1028 PROC_UNLOCK(p); 1029 vm_map_unlock(&vm->vm_map); 1030 vmspace_free(vm); 1031 sx_sunlock(&allproc_lock); 1032 goto retry; 1033 } 1034 nextproc: 1035 mtx_unlock_spin(&sched_lock); 1036 } 1037 nextproc2: 1038 PROC_UNLOCK(p); 1039 vm_map_unlock(&vm->vm_map); 1040 nextproc1: 1041 vmspace_free(vm); 1042 continue; 1043 } 1044 sx_sunlock(&allproc_lock); 1045 /* 1046 * If we swapped something out, and another process needed memory, 1047 * then wakeup the sched process. 1048 */ 1049 if (didswap) 1050 wakeup(&proc0); 1051 } 1052 1053 static void 1054 swapout(p) 1055 struct proc *p; 1056 { 1057 struct thread *td; 1058 1059 PROC_LOCK_ASSERT(p, MA_OWNED); 1060 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); 1061 #if defined(SWAP_DEBUG) 1062 printf("swapping out %d\n", p->p_pid); 1063 #endif 1064 1065 /* 1066 * The states of this process and its threads may have changed 1067 * by now. Assuming that there is only one pageout daemon thread, 1068 * this process should still be in memory. 1069 */ 1070 KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM, 1071 ("swapout: lost a swapout race?")); 1072 1073 #if defined(INVARIANTS) 1074 /* 1075 * Make sure that all threads are safe to be swapped out. 1076 * 1077 * Alternatively, we could swap out only safe threads. 1078 */ 1079 FOREACH_THREAD_IN_PROC(p, td) { 1080 KASSERT(thread_safetoswapout(td), 1081 ("swapout: there is a thread not safe for swapout")); 1082 } 1083 #endif /* INVARIANTS */ 1084 1085 ++p->p_stats->p_ru.ru_nswap; 1086 /* 1087 * remember the process resident count 1088 */ 1089 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 1090 1091 p->p_sflag &= ~PS_INMEM; 1092 p->p_sflag |= PS_SWAPPINGOUT; 1093 PROC_UNLOCK(p); 1094 FOREACH_THREAD_IN_PROC(p, td) 1095 TD_SET_SWAPPED(td); 1096 mtx_unlock_spin(&sched_lock); 1097 1098 vm_proc_swapout(p); 1099 FOREACH_THREAD_IN_PROC(p, td) 1100 vm_thread_swapout(td); 1101 1102 PROC_LOCK(p); 1103 mtx_lock_spin(&sched_lock); 1104 p->p_sflag &= ~PS_SWAPPINGOUT; 1105 p->p_swtime = 0; 1106 } 1107 #endif /* !NO_SWAPPING */ 1108