1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 35 * 36 * 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 */ 60 61 #include <sys/cdefs.h> 62 __FBSDID("$FreeBSD$"); 63 64 #include "opt_vm.h" 65 #include "opt_kstack_pages.h" 66 #include "opt_kstack_max_pages.h" 67 #include "opt_kstack_usage_prof.h" 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/domainset.h> 72 #include <sys/limits.h> 73 #include <sys/lock.h> 74 #include <sys/malloc.h> 75 #include <sys/mutex.h> 76 #include <sys/proc.h> 77 #include <sys/racct.h> 78 #include <sys/resourcevar.h> 79 #include <sys/rwlock.h> 80 #include <sys/sched.h> 81 #include <sys/sf_buf.h> 82 #include <sys/shm.h> 83 #include <sys/vmmeter.h> 84 #include <sys/vmem.h> 85 #include <sys/sx.h> 86 #include <sys/sysctl.h> 87 #include <sys/_kstack_cache.h> 88 #include <sys/eventhandler.h> 89 #include <sys/kernel.h> 90 #include <sys/ktr.h> 91 #include <sys/unistd.h> 92 93 #include <vm/vm.h> 94 #include <vm/vm_param.h> 95 #include <vm/pmap.h> 96 #include <vm/vm_domainset.h> 97 #include <vm/vm_map.h> 98 #include <vm/vm_page.h> 99 #include <vm/vm_pageout.h> 100 #include <vm/vm_object.h> 101 #include <vm/vm_kern.h> 102 #include <vm/vm_extern.h> 103 #include <vm/vm_pager.h> 104 #include <vm/swap_pager.h> 105 106 #include <machine/cpu.h> 107 108 /* 109 * MPSAFE 110 * 111 * WARNING! This code calls vm_map_check_protection() which only checks 112 * the associated vm_map_entry range. It does not determine whether the 113 * contents of the memory is actually readable or writable. In most cases 114 * just checking the vm_map_entry is sufficient within the kernel's address 115 * space. 116 */ 117 int 118 kernacc(void *addr, int len, int rw) 119 { 120 boolean_t rv; 121 vm_offset_t saddr, eaddr; 122 vm_prot_t prot; 123 124 KASSERT((rw & ~VM_PROT_ALL) == 0, 125 ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 126 127 if ((vm_offset_t)addr + len > vm_map_max(kernel_map) || 128 (vm_offset_t)addr + len < (vm_offset_t)addr) 129 return (FALSE); 130 131 prot = rw; 132 saddr = trunc_page((vm_offset_t)addr); 133 eaddr = round_page((vm_offset_t)addr + len); 134 vm_map_lock_read(kernel_map); 135 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 136 vm_map_unlock_read(kernel_map); 137 return (rv == TRUE); 138 } 139 140 /* 141 * MPSAFE 142 * 143 * WARNING! This code calls vm_map_check_protection() which only checks 144 * the associated vm_map_entry range. It does not determine whether the 145 * contents of the memory is actually readable or writable. vmapbuf(), 146 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be 147 * used in conjunction with this call. 148 */ 149 int 150 useracc(void *addr, int len, int rw) 151 { 152 boolean_t rv; 153 vm_prot_t prot; 154 vm_map_t map; 155 156 KASSERT((rw & ~VM_PROT_ALL) == 0, 157 ("illegal ``rw'' argument to useracc (%x)\n", rw)); 158 prot = rw; 159 map = &curproc->p_vmspace->vm_map; 160 if ((vm_offset_t)addr + len > vm_map_max(map) || 161 (vm_offset_t)addr + len < (vm_offset_t)addr) { 162 return (FALSE); 163 } 164 vm_map_lock_read(map); 165 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 166 round_page((vm_offset_t)addr + len), prot); 167 vm_map_unlock_read(map); 168 return (rv == TRUE); 169 } 170 171 int 172 vslock(void *addr, size_t len) 173 { 174 vm_offset_t end, last, start; 175 vm_size_t npages; 176 int error; 177 178 last = (vm_offset_t)addr + len; 179 start = trunc_page((vm_offset_t)addr); 180 end = round_page(last); 181 if (last < (vm_offset_t)addr || end < (vm_offset_t)addr) 182 return (EINVAL); 183 npages = atop(end - start); 184 if (npages > vm_page_max_user_wired) 185 return (ENOMEM); 186 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end, 187 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 188 if (error == KERN_SUCCESS) { 189 curthread->td_vslock_sz += len; 190 return (0); 191 } 192 193 /* 194 * Return EFAULT on error to match copy{in,out}() behaviour 195 * rather than returning ENOMEM like mlock() would. 196 */ 197 return (EFAULT); 198 } 199 200 void 201 vsunlock(void *addr, size_t len) 202 { 203 204 /* Rely on the parameter sanity checks performed by vslock(). */ 205 MPASS(curthread->td_vslock_sz >= len); 206 curthread->td_vslock_sz -= len; 207 (void)vm_map_unwire(&curproc->p_vmspace->vm_map, 208 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), 209 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 210 } 211 212 /* 213 * Pin the page contained within the given object at the given offset. If the 214 * page is not resident, allocate and load it using the given object's pager. 215 * Return the pinned page if successful; otherwise, return NULL. 216 */ 217 static vm_page_t 218 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset) 219 { 220 vm_page_t m; 221 vm_pindex_t pindex; 222 int rv; 223 224 VM_OBJECT_WLOCK(object); 225 pindex = OFF_TO_IDX(offset); 226 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY); 227 if (m->valid != VM_PAGE_BITS_ALL) { 228 vm_page_xbusy(m); 229 rv = vm_pager_get_pages(object, &m, 1, NULL, NULL); 230 if (rv != VM_PAGER_OK) { 231 vm_page_lock(m); 232 vm_page_free(m); 233 vm_page_unlock(m); 234 m = NULL; 235 goto out; 236 } 237 vm_page_xunbusy(m); 238 } 239 vm_page_lock(m); 240 vm_page_hold(m); 241 vm_page_activate(m); 242 vm_page_unlock(m); 243 out: 244 VM_OBJECT_WUNLOCK(object); 245 return (m); 246 } 247 248 /* 249 * Return a CPU private mapping to the page at the given offset within the 250 * given object. The page is pinned before it is mapped. 251 */ 252 struct sf_buf * 253 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset) 254 { 255 vm_page_t m; 256 257 m = vm_imgact_hold_page(object, offset); 258 if (m == NULL) 259 return (NULL); 260 sched_pin(); 261 return (sf_buf_alloc(m, SFB_CPUPRIVATE)); 262 } 263 264 /* 265 * Destroy the given CPU private mapping and unpin the page that it mapped. 266 */ 267 void 268 vm_imgact_unmap_page(struct sf_buf *sf) 269 { 270 vm_page_t m; 271 272 m = sf_buf_page(sf); 273 sf_buf_free(sf); 274 sched_unpin(); 275 vm_page_lock(m); 276 vm_page_unhold(m); 277 vm_page_unlock(m); 278 } 279 280 void 281 vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz) 282 { 283 284 pmap_sync_icache(map->pmap, va, sz); 285 } 286 287 struct kstack_cache_entry *kstack_cache; 288 static int kstack_cache_size = 128; 289 static int kstacks, kstack_domain_iter; 290 static struct mtx kstack_cache_mtx; 291 MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF); 292 293 SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0, 294 ""); 295 SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0, 296 ""); 297 298 /* 299 * Create the kernel stack (including pcb for i386) for a new thread. 300 * This routine directly affects the fork perf for a process and 301 * create performance for a thread. 302 */ 303 int 304 vm_thread_new(struct thread *td, int pages) 305 { 306 vm_object_t ksobj; 307 vm_offset_t ks; 308 vm_page_t ma[KSTACK_MAX_PAGES]; 309 struct kstack_cache_entry *ks_ce; 310 int i; 311 312 /* Bounds check */ 313 if (pages <= 1) 314 pages = kstack_pages; 315 else if (pages > KSTACK_MAX_PAGES) 316 pages = KSTACK_MAX_PAGES; 317 318 if (pages == kstack_pages && kstack_cache != NULL) { 319 mtx_lock(&kstack_cache_mtx); 320 if (kstack_cache != NULL) { 321 ks_ce = kstack_cache; 322 kstack_cache = ks_ce->next_ks_entry; 323 mtx_unlock(&kstack_cache_mtx); 324 325 td->td_kstack_obj = ks_ce->ksobj; 326 td->td_kstack = (vm_offset_t)ks_ce; 327 td->td_kstack_pages = kstack_pages; 328 return (1); 329 } 330 mtx_unlock(&kstack_cache_mtx); 331 } 332 333 /* 334 * Allocate an object for the kstack. 335 */ 336 ksobj = vm_object_allocate(OBJT_DEFAULT, pages); 337 338 /* 339 * Get a kernel virtual address for this thread's kstack. 340 */ 341 #if defined(__mips__) 342 /* 343 * We need to align the kstack's mapped address to fit within 344 * a single TLB entry. 345 */ 346 if (vmem_xalloc(kernel_arena, (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE, 347 PAGE_SIZE * 2, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, 348 M_BESTFIT | M_NOWAIT, &ks)) { 349 ks = 0; 350 } 351 #else 352 ks = kva_alloc((pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 353 #endif 354 if (ks == 0) { 355 printf("vm_thread_new: kstack allocation failed\n"); 356 vm_object_deallocate(ksobj); 357 return (0); 358 } 359 360 /* 361 * Ensure that kstack objects can draw pages from any memory 362 * domain. Otherwise a local memory shortage can block a process 363 * swap-in. 364 */ 365 if (vm_ndomains > 1) { 366 ksobj->domain.dr_policy = DOMAINSET_RR(); 367 ksobj->domain.dr_iter = 368 atomic_fetchadd_int(&kstack_domain_iter, 1); 369 } 370 371 atomic_add_int(&kstacks, 1); 372 if (KSTACK_GUARD_PAGES != 0) { 373 pmap_qremove(ks, KSTACK_GUARD_PAGES); 374 ks += KSTACK_GUARD_PAGES * PAGE_SIZE; 375 } 376 td->td_kstack_obj = ksobj; 377 td->td_kstack = ks; 378 /* 379 * Knowing the number of pages allocated is useful when you 380 * want to deallocate them. 381 */ 382 td->td_kstack_pages = pages; 383 /* 384 * For the length of the stack, link in a real page of ram for each 385 * page of stack. 386 */ 387 VM_OBJECT_WLOCK(ksobj); 388 (void)vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | 389 VM_ALLOC_WIRED, ma, pages); 390 for (i = 0; i < pages; i++) 391 ma[i]->valid = VM_PAGE_BITS_ALL; 392 VM_OBJECT_WUNLOCK(ksobj); 393 pmap_qenter(ks, ma, pages); 394 return (1); 395 } 396 397 static void 398 vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages) 399 { 400 vm_page_t m; 401 int i; 402 403 atomic_add_int(&kstacks, -1); 404 pmap_qremove(ks, pages); 405 VM_OBJECT_WLOCK(ksobj); 406 for (i = 0; i < pages; i++) { 407 m = vm_page_lookup(ksobj, i); 408 if (m == NULL) 409 panic("vm_thread_dispose: kstack already missing?"); 410 vm_page_lock(m); 411 vm_page_unwire_noq(m); 412 vm_page_free(m); 413 vm_page_unlock(m); 414 } 415 VM_OBJECT_WUNLOCK(ksobj); 416 vm_object_deallocate(ksobj); 417 kva_free(ks - (KSTACK_GUARD_PAGES * PAGE_SIZE), 418 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 419 } 420 421 /* 422 * Dispose of a thread's kernel stack. 423 */ 424 void 425 vm_thread_dispose(struct thread *td) 426 { 427 vm_object_t ksobj; 428 vm_offset_t ks; 429 struct kstack_cache_entry *ks_ce; 430 int pages; 431 432 pages = td->td_kstack_pages; 433 ksobj = td->td_kstack_obj; 434 ks = td->td_kstack; 435 td->td_kstack = 0; 436 td->td_kstack_pages = 0; 437 if (pages == kstack_pages && kstacks <= kstack_cache_size) { 438 ks_ce = (struct kstack_cache_entry *)ks; 439 ks_ce->ksobj = ksobj; 440 mtx_lock(&kstack_cache_mtx); 441 ks_ce->next_ks_entry = kstack_cache; 442 kstack_cache = ks_ce; 443 mtx_unlock(&kstack_cache_mtx); 444 return; 445 } 446 vm_thread_stack_dispose(ksobj, ks, pages); 447 } 448 449 static void 450 vm_thread_stack_lowmem(void *nulll) 451 { 452 struct kstack_cache_entry *ks_ce, *ks_ce1; 453 454 mtx_lock(&kstack_cache_mtx); 455 ks_ce = kstack_cache; 456 kstack_cache = NULL; 457 mtx_unlock(&kstack_cache_mtx); 458 459 while (ks_ce != NULL) { 460 ks_ce1 = ks_ce; 461 ks_ce = ks_ce->next_ks_entry; 462 463 vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1, 464 kstack_pages); 465 } 466 } 467 468 static void 469 kstack_cache_init(void *nulll) 470 { 471 472 EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL, 473 EVENTHANDLER_PRI_ANY); 474 } 475 476 SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL); 477 478 #ifdef KSTACK_USAGE_PROF 479 /* 480 * Track maximum stack used by a thread in kernel. 481 */ 482 static int max_kstack_used; 483 484 SYSCTL_INT(_debug, OID_AUTO, max_kstack_used, CTLFLAG_RD, 485 &max_kstack_used, 0, 486 "Maxiumum stack depth used by a thread in kernel"); 487 488 void 489 intr_prof_stack_use(struct thread *td, struct trapframe *frame) 490 { 491 vm_offset_t stack_top; 492 vm_offset_t current; 493 int used, prev_used; 494 495 /* 496 * Testing for interrupted kernel mode isn't strictly 497 * needed. It optimizes the execution, since interrupts from 498 * usermode will have only the trap frame on the stack. 499 */ 500 if (TRAPF_USERMODE(frame)) 501 return; 502 503 stack_top = td->td_kstack + td->td_kstack_pages * PAGE_SIZE; 504 current = (vm_offset_t)(uintptr_t)&stack_top; 505 506 /* 507 * Try to detect if interrupt is using kernel thread stack. 508 * Hardware could use a dedicated stack for interrupt handling. 509 */ 510 if (stack_top <= current || current < td->td_kstack) 511 return; 512 513 used = stack_top - current; 514 for (;;) { 515 prev_used = max_kstack_used; 516 if (prev_used >= used) 517 break; 518 if (atomic_cmpset_int(&max_kstack_used, prev_used, used)) 519 break; 520 } 521 } 522 #endif /* KSTACK_USAGE_PROF */ 523 524 /* 525 * Implement fork's actions on an address space. 526 * Here we arrange for the address space to be copied or referenced, 527 * allocate a user struct (pcb and kernel stack), then call the 528 * machine-dependent layer to fill those in and make the new process 529 * ready to run. The new process is set up so that it returns directly 530 * to user mode to avoid stack copying and relocation problems. 531 */ 532 int 533 vm_forkproc(struct thread *td, struct proc *p2, struct thread *td2, 534 struct vmspace *vm2, int flags) 535 { 536 struct proc *p1 = td->td_proc; 537 struct domainset *dset; 538 int error; 539 540 if ((flags & RFPROC) == 0) { 541 /* 542 * Divorce the memory, if it is shared, essentially 543 * this changes shared memory amongst threads, into 544 * COW locally. 545 */ 546 if ((flags & RFMEM) == 0) { 547 if (p1->p_vmspace->vm_refcnt > 1) { 548 error = vmspace_unshare(p1); 549 if (error) 550 return (error); 551 } 552 } 553 cpu_fork(td, p2, td2, flags); 554 return (0); 555 } 556 557 if (flags & RFMEM) { 558 p2->p_vmspace = p1->p_vmspace; 559 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1); 560 } 561 dset = td2->td_domain.dr_policy; 562 while (vm_page_count_severe_set(&dset->ds_mask)) { 563 vm_wait_doms(&dset->ds_mask); 564 } 565 566 if ((flags & RFMEM) == 0) { 567 p2->p_vmspace = vm2; 568 if (p1->p_vmspace->vm_shm) 569 shmfork(p1, p2); 570 } 571 572 /* 573 * cpu_fork will copy and update the pcb, set up the kernel stack, 574 * and make the child ready to run. 575 */ 576 cpu_fork(td, p2, td2, flags); 577 return (0); 578 } 579 580 /* 581 * Called after process has been wait(2)'ed upon and is being reaped. 582 * The idea is to reclaim resources that we could not reclaim while 583 * the process was still executing. 584 */ 585 void 586 vm_waitproc(p) 587 struct proc *p; 588 { 589 590 vmspace_exitfree(p); /* and clean-out the vmspace */ 591 } 592 593 void 594 kick_proc0(void) 595 { 596 597 wakeup(&proc0); 598 } 599