1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 35 * 36 * 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 */ 60 61 #include <sys/cdefs.h> 62 __FBSDID("$FreeBSD$"); 63 64 #include "opt_vm.h" 65 #include "opt_kstack_pages.h" 66 #include "opt_kstack_max_pages.h" 67 #include "opt_kstack_usage_prof.h" 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/limits.h> 72 #include <sys/lock.h> 73 #include <sys/malloc.h> 74 #include <sys/mutex.h> 75 #include <sys/proc.h> 76 #include <sys/racct.h> 77 #include <sys/resourcevar.h> 78 #include <sys/rwlock.h> 79 #include <sys/sched.h> 80 #include <sys/sf_buf.h> 81 #include <sys/shm.h> 82 #include <sys/vmmeter.h> 83 #include <sys/vmem.h> 84 #include <sys/sx.h> 85 #include <sys/sysctl.h> 86 #include <sys/_kstack_cache.h> 87 #include <sys/eventhandler.h> 88 #include <sys/kernel.h> 89 #include <sys/ktr.h> 90 #include <sys/unistd.h> 91 92 #include <vm/vm.h> 93 #include <vm/vm_param.h> 94 #include <vm/pmap.h> 95 #include <vm/vm_domainset.h> 96 #include <vm/vm_map.h> 97 #include <vm/vm_page.h> 98 #include <vm/vm_pageout.h> 99 #include <vm/vm_object.h> 100 #include <vm/vm_kern.h> 101 #include <vm/vm_extern.h> 102 #include <vm/vm_pager.h> 103 #include <vm/swap_pager.h> 104 105 #include <machine/cpu.h> 106 107 /* 108 * MPSAFE 109 * 110 * WARNING! This code calls vm_map_check_protection() which only checks 111 * the associated vm_map_entry range. It does not determine whether the 112 * contents of the memory is actually readable or writable. In most cases 113 * just checking the vm_map_entry is sufficient within the kernel's address 114 * space. 115 */ 116 int 117 kernacc(void *addr, int len, int rw) 118 { 119 boolean_t rv; 120 vm_offset_t saddr, eaddr; 121 vm_prot_t prot; 122 123 KASSERT((rw & ~VM_PROT_ALL) == 0, 124 ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 125 126 if ((vm_offset_t)addr + len > vm_map_max(kernel_map) || 127 (vm_offset_t)addr + len < (vm_offset_t)addr) 128 return (FALSE); 129 130 prot = rw; 131 saddr = trunc_page((vm_offset_t)addr); 132 eaddr = round_page((vm_offset_t)addr + len); 133 vm_map_lock_read(kernel_map); 134 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 135 vm_map_unlock_read(kernel_map); 136 return (rv == TRUE); 137 } 138 139 /* 140 * MPSAFE 141 * 142 * WARNING! This code calls vm_map_check_protection() which only checks 143 * the associated vm_map_entry range. It does not determine whether the 144 * contents of the memory is actually readable or writable. vmapbuf(), 145 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be 146 * used in conjunction with this call. 147 */ 148 int 149 useracc(void *addr, int len, int rw) 150 { 151 boolean_t rv; 152 vm_prot_t prot; 153 vm_map_t map; 154 155 KASSERT((rw & ~VM_PROT_ALL) == 0, 156 ("illegal ``rw'' argument to useracc (%x)\n", rw)); 157 prot = rw; 158 map = &curproc->p_vmspace->vm_map; 159 if ((vm_offset_t)addr + len > vm_map_max(map) || 160 (vm_offset_t)addr + len < (vm_offset_t)addr) { 161 return (FALSE); 162 } 163 vm_map_lock_read(map); 164 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 165 round_page((vm_offset_t)addr + len), prot); 166 vm_map_unlock_read(map); 167 return (rv == TRUE); 168 } 169 170 int 171 vslock(void *addr, size_t len) 172 { 173 vm_offset_t end, last, start; 174 vm_size_t npages; 175 int error; 176 177 last = (vm_offset_t)addr + len; 178 start = trunc_page((vm_offset_t)addr); 179 end = round_page(last); 180 if (last < (vm_offset_t)addr || end < (vm_offset_t)addr) 181 return (EINVAL); 182 npages = atop(end - start); 183 if (npages > vm_page_max_wired) 184 return (ENOMEM); 185 #if 0 186 /* 187 * XXX - not yet 188 * 189 * The limit for transient usage of wired pages should be 190 * larger than for "permanent" wired pages (mlock()). 191 * 192 * Also, the sysctl code, which is the only present user 193 * of vslock(), does a hard loop on EAGAIN. 194 */ 195 if (npages + vm_wire_count() > vm_page_max_wired) 196 return (EAGAIN); 197 #endif 198 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end, 199 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 200 if (error == KERN_SUCCESS) { 201 curthread->td_vslock_sz += len; 202 return (0); 203 } 204 205 /* 206 * Return EFAULT on error to match copy{in,out}() behaviour 207 * rather than returning ENOMEM like mlock() would. 208 */ 209 return (EFAULT); 210 } 211 212 void 213 vsunlock(void *addr, size_t len) 214 { 215 216 /* Rely on the parameter sanity checks performed by vslock(). */ 217 MPASS(curthread->td_vslock_sz >= len); 218 curthread->td_vslock_sz -= len; 219 (void)vm_map_unwire(&curproc->p_vmspace->vm_map, 220 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), 221 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 222 } 223 224 /* 225 * Pin the page contained within the given object at the given offset. If the 226 * page is not resident, allocate and load it using the given object's pager. 227 * Return the pinned page if successful; otherwise, return NULL. 228 */ 229 static vm_page_t 230 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset) 231 { 232 vm_page_t m; 233 vm_pindex_t pindex; 234 int rv; 235 236 VM_OBJECT_WLOCK(object); 237 pindex = OFF_TO_IDX(offset); 238 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY); 239 if (m->valid != VM_PAGE_BITS_ALL) { 240 vm_page_xbusy(m); 241 rv = vm_pager_get_pages(object, &m, 1, NULL, NULL); 242 if (rv != VM_PAGER_OK) { 243 vm_page_lock(m); 244 vm_page_free(m); 245 vm_page_unlock(m); 246 m = NULL; 247 goto out; 248 } 249 vm_page_xunbusy(m); 250 } 251 vm_page_lock(m); 252 vm_page_hold(m); 253 vm_page_activate(m); 254 vm_page_unlock(m); 255 out: 256 VM_OBJECT_WUNLOCK(object); 257 return (m); 258 } 259 260 /* 261 * Return a CPU private mapping to the page at the given offset within the 262 * given object. The page is pinned before it is mapped. 263 */ 264 struct sf_buf * 265 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset) 266 { 267 vm_page_t m; 268 269 m = vm_imgact_hold_page(object, offset); 270 if (m == NULL) 271 return (NULL); 272 sched_pin(); 273 return (sf_buf_alloc(m, SFB_CPUPRIVATE)); 274 } 275 276 /* 277 * Destroy the given CPU private mapping and unpin the page that it mapped. 278 */ 279 void 280 vm_imgact_unmap_page(struct sf_buf *sf) 281 { 282 vm_page_t m; 283 284 m = sf_buf_page(sf); 285 sf_buf_free(sf); 286 sched_unpin(); 287 vm_page_lock(m); 288 vm_page_unhold(m); 289 vm_page_unlock(m); 290 } 291 292 void 293 vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz) 294 { 295 296 pmap_sync_icache(map->pmap, va, sz); 297 } 298 299 struct kstack_cache_entry *kstack_cache; 300 static int kstack_cache_size = 128; 301 static int kstacks; 302 static struct mtx kstack_cache_mtx; 303 MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF); 304 305 SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0, 306 ""); 307 SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0, 308 ""); 309 310 /* 311 * Create the kernel stack (including pcb for i386) for a new thread. 312 * This routine directly affects the fork perf for a process and 313 * create performance for a thread. 314 */ 315 int 316 vm_thread_new(struct thread *td, int pages) 317 { 318 vm_object_t ksobj; 319 vm_offset_t ks; 320 vm_page_t ma[KSTACK_MAX_PAGES]; 321 struct kstack_cache_entry *ks_ce; 322 int i; 323 324 /* Bounds check */ 325 if (pages <= 1) 326 pages = kstack_pages; 327 else if (pages > KSTACK_MAX_PAGES) 328 pages = KSTACK_MAX_PAGES; 329 330 if (pages == kstack_pages && kstack_cache != NULL) { 331 mtx_lock(&kstack_cache_mtx); 332 if (kstack_cache != NULL) { 333 ks_ce = kstack_cache; 334 kstack_cache = ks_ce->next_ks_entry; 335 mtx_unlock(&kstack_cache_mtx); 336 337 td->td_kstack_obj = ks_ce->ksobj; 338 td->td_kstack = (vm_offset_t)ks_ce; 339 td->td_kstack_pages = kstack_pages; 340 return (1); 341 } 342 mtx_unlock(&kstack_cache_mtx); 343 } 344 345 /* 346 * Allocate an object for the kstack. 347 */ 348 ksobj = vm_object_allocate(OBJT_DEFAULT, pages); 349 350 /* 351 * Get a kernel virtual address for this thread's kstack. 352 */ 353 #if defined(__mips__) 354 /* 355 * We need to align the kstack's mapped address to fit within 356 * a single TLB entry. 357 */ 358 if (vmem_xalloc(kernel_arena, (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE, 359 PAGE_SIZE * 2, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, 360 M_BESTFIT | M_NOWAIT, &ks)) { 361 ks = 0; 362 } 363 #else 364 ks = kva_alloc((pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 365 #endif 366 if (ks == 0) { 367 printf("vm_thread_new: kstack allocation failed\n"); 368 vm_object_deallocate(ksobj); 369 return (0); 370 } 371 372 atomic_add_int(&kstacks, 1); 373 if (KSTACK_GUARD_PAGES != 0) { 374 pmap_qremove(ks, KSTACK_GUARD_PAGES); 375 ks += KSTACK_GUARD_PAGES * PAGE_SIZE; 376 } 377 td->td_kstack_obj = ksobj; 378 td->td_kstack = ks; 379 /* 380 * Knowing the number of pages allocated is useful when you 381 * want to deallocate them. 382 */ 383 td->td_kstack_pages = pages; 384 /* 385 * For the length of the stack, link in a real page of ram for each 386 * page of stack. 387 */ 388 VM_OBJECT_WLOCK(ksobj); 389 (void)vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | 390 VM_ALLOC_WIRED, ma, pages); 391 for (i = 0; i < pages; i++) 392 ma[i]->valid = VM_PAGE_BITS_ALL; 393 VM_OBJECT_WUNLOCK(ksobj); 394 pmap_qenter(ks, ma, pages); 395 return (1); 396 } 397 398 static void 399 vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages) 400 { 401 vm_page_t m; 402 int i; 403 404 atomic_add_int(&kstacks, -1); 405 pmap_qremove(ks, pages); 406 VM_OBJECT_WLOCK(ksobj); 407 for (i = 0; i < pages; i++) { 408 m = vm_page_lookup(ksobj, i); 409 if (m == NULL) 410 panic("vm_thread_dispose: kstack already missing?"); 411 vm_page_lock(m); 412 vm_page_unwire(m, PQ_NONE); 413 vm_page_free(m); 414 vm_page_unlock(m); 415 } 416 VM_OBJECT_WUNLOCK(ksobj); 417 vm_object_deallocate(ksobj); 418 kva_free(ks - (KSTACK_GUARD_PAGES * PAGE_SIZE), 419 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 420 } 421 422 /* 423 * Dispose of a thread's kernel stack. 424 */ 425 void 426 vm_thread_dispose(struct thread *td) 427 { 428 vm_object_t ksobj; 429 vm_offset_t ks; 430 struct kstack_cache_entry *ks_ce; 431 int pages; 432 433 pages = td->td_kstack_pages; 434 ksobj = td->td_kstack_obj; 435 ks = td->td_kstack; 436 td->td_kstack = 0; 437 td->td_kstack_pages = 0; 438 if (pages == kstack_pages && kstacks <= kstack_cache_size) { 439 ks_ce = (struct kstack_cache_entry *)ks; 440 ks_ce->ksobj = ksobj; 441 mtx_lock(&kstack_cache_mtx); 442 ks_ce->next_ks_entry = kstack_cache; 443 kstack_cache = ks_ce; 444 mtx_unlock(&kstack_cache_mtx); 445 return; 446 } 447 vm_thread_stack_dispose(ksobj, ks, pages); 448 } 449 450 static void 451 vm_thread_stack_lowmem(void *nulll) 452 { 453 struct kstack_cache_entry *ks_ce, *ks_ce1; 454 455 mtx_lock(&kstack_cache_mtx); 456 ks_ce = kstack_cache; 457 kstack_cache = NULL; 458 mtx_unlock(&kstack_cache_mtx); 459 460 while (ks_ce != NULL) { 461 ks_ce1 = ks_ce; 462 ks_ce = ks_ce->next_ks_entry; 463 464 vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1, 465 kstack_pages); 466 } 467 } 468 469 static void 470 kstack_cache_init(void *nulll) 471 { 472 473 EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL, 474 EVENTHANDLER_PRI_ANY); 475 } 476 477 SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL); 478 479 #ifdef KSTACK_USAGE_PROF 480 /* 481 * Track maximum stack used by a thread in kernel. 482 */ 483 static int max_kstack_used; 484 485 SYSCTL_INT(_debug, OID_AUTO, max_kstack_used, CTLFLAG_RD, 486 &max_kstack_used, 0, 487 "Maxiumum stack depth used by a thread in kernel"); 488 489 void 490 intr_prof_stack_use(struct thread *td, struct trapframe *frame) 491 { 492 vm_offset_t stack_top; 493 vm_offset_t current; 494 int used, prev_used; 495 496 /* 497 * Testing for interrupted kernel mode isn't strictly 498 * needed. It optimizes the execution, since interrupts from 499 * usermode will have only the trap frame on the stack. 500 */ 501 if (TRAPF_USERMODE(frame)) 502 return; 503 504 stack_top = td->td_kstack + td->td_kstack_pages * PAGE_SIZE; 505 current = (vm_offset_t)(uintptr_t)&stack_top; 506 507 /* 508 * Try to detect if interrupt is using kernel thread stack. 509 * Hardware could use a dedicated stack for interrupt handling. 510 */ 511 if (stack_top <= current || current < td->td_kstack) 512 return; 513 514 used = stack_top - current; 515 for (;;) { 516 prev_used = max_kstack_used; 517 if (prev_used >= used) 518 break; 519 if (atomic_cmpset_int(&max_kstack_used, prev_used, used)) 520 break; 521 } 522 } 523 #endif /* KSTACK_USAGE_PROF */ 524 525 /* 526 * Implement fork's actions on an address space. 527 * Here we arrange for the address space to be copied or referenced, 528 * allocate a user struct (pcb and kernel stack), then call the 529 * machine-dependent layer to fill those in and make the new process 530 * ready to run. The new process is set up so that it returns directly 531 * to user mode to avoid stack copying and relocation problems. 532 */ 533 int 534 vm_forkproc(struct thread *td, struct proc *p2, struct thread *td2, 535 struct vmspace *vm2, int flags) 536 { 537 struct proc *p1 = td->td_proc; 538 struct domainset *dset; 539 int error; 540 541 if ((flags & RFPROC) == 0) { 542 /* 543 * Divorce the memory, if it is shared, essentially 544 * this changes shared memory amongst threads, into 545 * COW locally. 546 */ 547 if ((flags & RFMEM) == 0) { 548 if (p1->p_vmspace->vm_refcnt > 1) { 549 error = vmspace_unshare(p1); 550 if (error) 551 return (error); 552 } 553 } 554 cpu_fork(td, p2, td2, flags); 555 return (0); 556 } 557 558 if (flags & RFMEM) { 559 p2->p_vmspace = p1->p_vmspace; 560 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1); 561 } 562 dset = td2->td_domain.dr_policy; 563 while (vm_page_count_severe_set(&dset->ds_mask)) { 564 vm_wait_doms(&dset->ds_mask); 565 } 566 567 if ((flags & RFMEM) == 0) { 568 p2->p_vmspace = vm2; 569 if (p1->p_vmspace->vm_shm) 570 shmfork(p1, p2); 571 } 572 573 /* 574 * cpu_fork will copy and update the pcb, set up the kernel stack, 575 * and make the child ready to run. 576 */ 577 cpu_fork(td, p2, td2, flags); 578 return (0); 579 } 580 581 /* 582 * Called after process has been wait(2)'ed upon and is being reaped. 583 * The idea is to reclaim resources that we could not reclaim while 584 * the process was still executing. 585 */ 586 void 587 vm_waitproc(p) 588 struct proc *p; 589 { 590 591 vmspace_exitfree(p); /* and clean-out the vmspace */ 592 } 593 594 void 595 kick_proc0(void) 596 { 597 598 wakeup(&proc0); 599 } 600