1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Permission to use, copy, modify and distribute this software and 39 * its documentation is hereby granted, provided that both the copyright 40 * notice and this permission notice appear in all copies of the 41 * software, derivative works or modified versions, and any portions 42 * thereof, and that both notices appear in supporting documentation. 43 * 44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 47 * 48 * Carnegie Mellon requests users of this software to return to 49 * 50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 51 * School of Computer Science 52 * Carnegie Mellon University 53 * Pittsburgh PA 15213-3890 54 * 55 * any improvements or extensions that they make and grant Carnegie the 56 * rights to redistribute these changes. 57 */ 58 59 #include "opt_vm.h" 60 #include "opt_kstack_pages.h" 61 #include "opt_kstack_max_pages.h" 62 #include "opt_kstack_usage_prof.h" 63 64 #include <sys/param.h> 65 #include <sys/systm.h> 66 #include <sys/asan.h> 67 #include <sys/domainset.h> 68 #include <sys/limits.h> 69 #include <sys/lock.h> 70 #include <sys/malloc.h> 71 #include <sys/msan.h> 72 #include <sys/mutex.h> 73 #include <sys/proc.h> 74 #include <sys/racct.h> 75 #include <sys/refcount.h> 76 #include <sys/resourcevar.h> 77 #include <sys/rwlock.h> 78 #include <sys/sched.h> 79 #include <sys/sf_buf.h> 80 #include <sys/shm.h> 81 #include <sys/smp.h> 82 #include <sys/vmmeter.h> 83 #include <sys/vmem.h> 84 #include <sys/sx.h> 85 #include <sys/sysctl.h> 86 #include <sys/kernel.h> 87 #include <sys/ktr.h> 88 #include <sys/unistd.h> 89 90 #include <vm/uma.h> 91 #include <vm/vm.h> 92 #include <vm/vm_param.h> 93 #include <vm/pmap.h> 94 #include <vm/vm_domainset.h> 95 #include <vm/vm_map.h> 96 #include <vm/vm_page.h> 97 #include <vm/vm_pageout.h> 98 #include <vm/vm_pagequeue.h> 99 #include <vm/vm_object.h> 100 #include <vm/vm_kern.h> 101 #include <vm/vm_radix.h> 102 #include <vm/vm_extern.h> 103 #include <vm/vm_pager.h> 104 #include <vm/vm_phys.h> 105 106 #include <machine/cpu.h> 107 108 #if VM_NRESERVLEVEL > 1 109 #define KVA_KSTACK_QUANTUM_SHIFT (VM_LEVEL_1_ORDER + VM_LEVEL_0_ORDER + \ 110 PAGE_SHIFT) 111 #elif VM_NRESERVLEVEL > 0 112 #define KVA_KSTACK_QUANTUM_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT) 113 #else 114 #define KVA_KSTACK_QUANTUM_SHIFT (8 + PAGE_SHIFT) 115 #endif 116 #define KVA_KSTACK_QUANTUM (1ul << KVA_KSTACK_QUANTUM_SHIFT) 117 118 /* 119 * MPSAFE 120 * 121 * WARNING! This code calls vm_map_check_protection() which only checks 122 * the associated vm_map_entry range. It does not determine whether the 123 * contents of the memory is actually readable or writable. In most cases 124 * just checking the vm_map_entry is sufficient within the kernel's address 125 * space. 126 */ 127 bool 128 kernacc(void *addr, int len, int rw) 129 { 130 boolean_t rv; 131 vm_offset_t saddr, eaddr; 132 vm_prot_t prot; 133 134 KASSERT((rw & ~VM_PROT_ALL) == 0, 135 ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 136 137 if ((vm_offset_t)addr + len > vm_map_max(kernel_map) || 138 (vm_offset_t)addr + len < (vm_offset_t)addr) 139 return (false); 140 141 prot = rw; 142 saddr = trunc_page((vm_offset_t)addr); 143 eaddr = round_page((vm_offset_t)addr + len); 144 vm_map_lock_read(kernel_map); 145 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 146 vm_map_unlock_read(kernel_map); 147 return (rv == TRUE); 148 } 149 150 /* 151 * MPSAFE 152 * 153 * WARNING! This code calls vm_map_check_protection() which only checks 154 * the associated vm_map_entry range. It does not determine whether the 155 * contents of the memory is actually readable or writable. vmapbuf(), 156 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be 157 * used in conjunction with this call. 158 */ 159 bool 160 useracc(void *addr, int len, int rw) 161 { 162 boolean_t rv; 163 vm_prot_t prot; 164 vm_map_t map; 165 166 KASSERT((rw & ~VM_PROT_ALL) == 0, 167 ("illegal ``rw'' argument to useracc (%x)\n", rw)); 168 prot = rw; 169 map = &curproc->p_vmspace->vm_map; 170 if ((vm_offset_t)addr + len > vm_map_max(map) || 171 (vm_offset_t)addr + len < (vm_offset_t)addr) { 172 return (false); 173 } 174 vm_map_lock_read(map); 175 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 176 round_page((vm_offset_t)addr + len), prot); 177 vm_map_unlock_read(map); 178 return (rv == TRUE); 179 } 180 181 int 182 vslock(void *addr, size_t len) 183 { 184 vm_offset_t end, last, start; 185 vm_size_t npages; 186 int error; 187 188 last = (vm_offset_t)addr + len; 189 start = trunc_page((vm_offset_t)addr); 190 end = round_page(last); 191 if (last < (vm_offset_t)addr || end < (vm_offset_t)addr) 192 return (EINVAL); 193 npages = atop(end - start); 194 if (npages > vm_page_max_user_wired) 195 return (ENOMEM); 196 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end, 197 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 198 if (error == KERN_SUCCESS) { 199 curthread->td_vslock_sz += len; 200 return (0); 201 } 202 203 /* 204 * Return EFAULT on error to match copy{in,out}() behaviour 205 * rather than returning ENOMEM like mlock() would. 206 */ 207 return (EFAULT); 208 } 209 210 void 211 vsunlock(void *addr, size_t len) 212 { 213 214 /* Rely on the parameter sanity checks performed by vslock(). */ 215 MPASS(curthread->td_vslock_sz >= len); 216 curthread->td_vslock_sz -= len; 217 (void)vm_map_unwire(&curproc->p_vmspace->vm_map, 218 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), 219 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 220 } 221 222 /* 223 * Pin the page contained within the given object at the given offset. If the 224 * page is not resident, allocate and load it using the given object's pager. 225 * Return the pinned page if successful; otherwise, return NULL. 226 */ 227 static vm_page_t 228 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset) 229 { 230 vm_page_t m; 231 vm_pindex_t pindex; 232 233 pindex = OFF_TO_IDX(offset); 234 (void)vm_page_grab_valid_unlocked(&m, object, pindex, 235 VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED); 236 return (m); 237 } 238 239 /* 240 * Return a CPU private mapping to the page at the given offset within the 241 * given object. The page is pinned before it is mapped. 242 */ 243 struct sf_buf * 244 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset) 245 { 246 vm_page_t m; 247 248 m = vm_imgact_hold_page(object, offset); 249 if (m == NULL) 250 return (NULL); 251 sched_pin(); 252 return (sf_buf_alloc(m, SFB_CPUPRIVATE)); 253 } 254 255 /* 256 * Destroy the given CPU private mapping and unpin the page that it mapped. 257 */ 258 void 259 vm_imgact_unmap_page(struct sf_buf *sf) 260 { 261 vm_page_t m; 262 263 m = sf_buf_page(sf); 264 sf_buf_free(sf); 265 sched_unpin(); 266 vm_page_unwire(m, PQ_ACTIVE); 267 } 268 269 void 270 vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz) 271 { 272 273 pmap_sync_icache(map->pmap, va, sz); 274 } 275 276 static vm_object_t kstack_object; 277 static vm_object_t kstack_alt_object; 278 static uma_zone_t kstack_cache; 279 static int kstack_cache_size; 280 static vmem_t *vmd_kstack_arena[MAXMEMDOM]; 281 282 static vm_pindex_t vm_kstack_pindex(vm_offset_t ks, int npages); 283 static vm_object_t vm_thread_kstack_size_to_obj(int npages); 284 static int vm_thread_stack_back(vm_offset_t kaddr, vm_page_t ma[], int npages, 285 int req_class, int domain); 286 287 static int 288 sysctl_kstack_cache_size(SYSCTL_HANDLER_ARGS) 289 { 290 int error, oldsize; 291 292 oldsize = kstack_cache_size; 293 error = sysctl_handle_int(oidp, arg1, arg2, req); 294 if (error == 0 && req->newptr && oldsize != kstack_cache_size) 295 uma_zone_set_maxcache(kstack_cache, kstack_cache_size); 296 return (error); 297 } 298 SYSCTL_PROC(_vm, OID_AUTO, kstack_cache_size, 299 CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &kstack_cache_size, 0, 300 sysctl_kstack_cache_size, "IU", "Maximum number of cached kernel stacks"); 301 302 /* 303 * Allocate a virtual address range from a domain kstack arena, following 304 * the specified NUMA policy. 305 */ 306 static vm_offset_t 307 vm_thread_alloc_kstack_kva(vm_size_t size, int domain) 308 { 309 #ifndef __ILP32__ 310 int rv; 311 vmem_t *arena; 312 vm_offset_t addr = 0; 313 314 size = round_page(size); 315 /* Allocate from the kernel arena for non-standard kstack sizes. */ 316 if (size != ptoa(kstack_pages + KSTACK_GUARD_PAGES)) { 317 arena = vm_dom[domain].vmd_kernel_arena; 318 } else { 319 arena = vmd_kstack_arena[domain]; 320 } 321 rv = vmem_alloc(arena, size, M_BESTFIT | M_NOWAIT, &addr); 322 if (rv == ENOMEM) 323 return (0); 324 KASSERT(atop(addr - VM_MIN_KERNEL_ADDRESS) % 325 (kstack_pages + KSTACK_GUARD_PAGES) == 0, 326 ("%s: allocated kstack KVA not aligned to multiple of kstack size", 327 __func__)); 328 329 return (addr); 330 #else 331 return (kva_alloc(size)); 332 #endif 333 } 334 335 /* 336 * Release a region of kernel virtual memory 337 * allocated from the kstack arena. 338 */ 339 static __noinline void 340 vm_thread_free_kstack_kva(vm_offset_t addr, vm_size_t size, int domain) 341 { 342 vmem_t *arena; 343 344 size = round_page(size); 345 #ifdef __ILP32__ 346 arena = kernel_arena; 347 #else 348 arena = vmd_kstack_arena[domain]; 349 if (size != ptoa(kstack_pages + KSTACK_GUARD_PAGES)) { 350 arena = vm_dom[domain].vmd_kernel_arena; 351 } 352 #endif 353 vmem_free(arena, addr, size); 354 } 355 356 static vmem_size_t 357 vm_thread_kstack_import_quantum(void) 358 { 359 #ifndef __ILP32__ 360 /* 361 * The kstack_quantum is larger than KVA_QUANTUM to account 362 * for holes induced by guard pages. 363 */ 364 return (KVA_KSTACK_QUANTUM * (kstack_pages + KSTACK_GUARD_PAGES)); 365 #else 366 return (KVA_KSTACK_QUANTUM); 367 #endif 368 } 369 370 /* 371 * Import KVA from a parent arena into the kstack arena. Imports must be 372 * a multiple of kernel stack pages + guard pages in size. 373 * 374 * Kstack VA allocations need to be aligned so that the linear KVA pindex 375 * is divisible by the total number of kstack VA pages. This is necessary to 376 * make vm_kstack_pindex work properly. 377 * 378 * We import a multiple of KVA_KSTACK_QUANTUM-sized region from the parent 379 * arena. The actual size used by the kstack arena is one kstack smaller to 380 * allow for the necessary alignment adjustments to be made. 381 */ 382 static int 383 vm_thread_kstack_arena_import(void *arena, vmem_size_t size, int flags, 384 vmem_addr_t *addrp) 385 { 386 int error, rem; 387 size_t kpages = kstack_pages + KSTACK_GUARD_PAGES; 388 389 KASSERT(atop(size) % kpages == 0, 390 ("%s: Size %jd is not a multiple of kstack pages (%d)", __func__, 391 (intmax_t)size, (int)kpages)); 392 393 error = vmem_xalloc(arena, vm_thread_kstack_import_quantum(), 394 KVA_KSTACK_QUANTUM, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags, 395 addrp); 396 if (error) { 397 return (error); 398 } 399 400 rem = atop(*addrp - VM_MIN_KERNEL_ADDRESS) % kpages; 401 if (rem != 0) { 402 /* Bump addr to next aligned address */ 403 *addrp = *addrp + (kpages - rem) * PAGE_SIZE; 404 } 405 406 return (0); 407 } 408 409 /* 410 * Release KVA from a parent arena into the kstack arena. Released imports must 411 * be a multiple of kernel stack pages + guard pages in size. 412 */ 413 static void 414 vm_thread_kstack_arena_release(void *arena, vmem_addr_t addr, vmem_size_t size) 415 { 416 int rem; 417 size_t kpages __diagused = kstack_pages + KSTACK_GUARD_PAGES; 418 419 KASSERT(size % kpages == 0, 420 ("%s: Size %jd is not a multiple of kstack pages (%d)", __func__, 421 (intmax_t)size, (int)kpages)); 422 423 KASSERT((addr - VM_MIN_KERNEL_ADDRESS) % kpages == 0, 424 ("%s: Address %p is not properly aligned (%p)", __func__, 425 (void *)addr, (void *)VM_MIN_KERNEL_ADDRESS)); 426 /* 427 * If the address is not KVA_KSTACK_QUANTUM-aligned we have to decrement 428 * it to account for the shift in kva_import_kstack. 429 */ 430 rem = addr % KVA_KSTACK_QUANTUM; 431 if (rem) { 432 KASSERT(rem <= ptoa(kpages), 433 ("%s: rem > kpages (%d), (%d)", __func__, rem, 434 (int)kpages)); 435 addr -= rem; 436 } 437 vmem_xfree(arena, addr, vm_thread_kstack_import_quantum()); 438 } 439 440 /* 441 * Create the kernel stack for a new thread. 442 */ 443 static vm_offset_t 444 vm_thread_stack_create(struct domainset *ds, int pages, int flags) 445 { 446 vm_page_t ma[KSTACK_MAX_PAGES]; 447 struct vm_domainset_iter di; 448 int req; 449 vm_offset_t ks; 450 int domain, i; 451 452 vm_domainset_iter_policy_init(&di, ds, &domain, &flags); 453 req = malloc2vm_flags(flags); 454 do { 455 /* 456 * Get a kernel virtual address for this thread's kstack. 457 */ 458 ks = vm_thread_alloc_kstack_kva(ptoa(pages + KSTACK_GUARD_PAGES), 459 domain); 460 if (ks == 0) 461 continue; 462 ks += ptoa(KSTACK_GUARD_PAGES); 463 464 /* 465 * Allocate physical pages to back the stack. 466 */ 467 if (vm_thread_stack_back(ks, ma, pages, req, domain) != 0) { 468 vm_thread_free_kstack_kva(ks - ptoa(KSTACK_GUARD_PAGES), 469 ptoa(pages + KSTACK_GUARD_PAGES), domain); 470 continue; 471 } 472 if (KSTACK_GUARD_PAGES != 0) { 473 pmap_qremove(ks - ptoa(KSTACK_GUARD_PAGES), 474 KSTACK_GUARD_PAGES); 475 } 476 for (i = 0; i < pages; i++) 477 vm_page_valid(ma[i]); 478 pmap_qenter(ks, ma, pages); 479 return (ks); 480 } while (vm_domainset_iter_policy(&di, &domain) == 0); 481 482 return (0); 483 } 484 485 static __noinline void 486 vm_thread_stack_dispose(vm_offset_t ks, int pages) 487 { 488 vm_page_t m; 489 vm_pindex_t pindex; 490 int i, domain; 491 vm_object_t obj = vm_thread_kstack_size_to_obj(pages); 492 493 pindex = vm_kstack_pindex(ks, pages); 494 domain = vm_phys_domain(vtophys(ks)); 495 pmap_qremove(ks, pages); 496 VM_OBJECT_WLOCK(obj); 497 for (i = 0; i < pages; i++) { 498 m = vm_page_lookup(obj, pindex + i); 499 if (m == NULL) 500 panic("%s: kstack already missing?", __func__); 501 KASSERT(vm_page_domain(m) == domain, 502 ("%s: page %p domain mismatch, expected %d got %d", 503 __func__, m, domain, vm_page_domain(m))); 504 vm_page_xbusy_claim(m); 505 vm_page_unwire_noq(m); 506 vm_page_free(m); 507 } 508 VM_OBJECT_WUNLOCK(obj); 509 kasan_mark((void *)ks, ptoa(pages), ptoa(pages), 0); 510 vm_thread_free_kstack_kva(ks - (KSTACK_GUARD_PAGES * PAGE_SIZE), 511 ptoa(pages + KSTACK_GUARD_PAGES), domain); 512 } 513 514 /* 515 * Allocate the kernel stack for a new thread. 516 */ 517 int 518 vm_thread_new(struct thread *td, int pages) 519 { 520 vm_offset_t ks; 521 u_short ks_domain; 522 523 /* Bounds check */ 524 if (pages <= 1) 525 pages = kstack_pages; 526 else if (pages > KSTACK_MAX_PAGES) 527 pages = KSTACK_MAX_PAGES; 528 529 ks = 0; 530 if (pages == kstack_pages && kstack_cache != NULL) 531 ks = (vm_offset_t)uma_zalloc(kstack_cache, M_NOWAIT); 532 if (ks == 0) 533 ks = vm_thread_stack_create(DOMAINSET_PREF(PCPU_GET(domain)), 534 pages, M_NOWAIT); 535 if (ks == 0) 536 return (0); 537 538 ks_domain = vm_phys_domain(vtophys(ks)); 539 KASSERT(ks_domain >= 0 && ks_domain < vm_ndomains, 540 ("%s: invalid domain for kstack %p", __func__, (void *)ks)); 541 td->td_kstack = ks; 542 td->td_kstack_pages = pages; 543 td->td_kstack_domain = ks_domain; 544 return (1); 545 } 546 547 /* 548 * Dispose of a thread's kernel stack. 549 */ 550 void 551 vm_thread_dispose(struct thread *td) 552 { 553 vm_offset_t ks; 554 int pages; 555 556 pages = td->td_kstack_pages; 557 ks = td->td_kstack; 558 td->td_kstack = 0; 559 td->td_kstack_pages = 0; 560 td->td_kstack_domain = MAXMEMDOM; 561 if (pages == kstack_pages) { 562 kasan_mark((void *)ks, 0, ptoa(pages), KASAN_KSTACK_FREED); 563 uma_zfree(kstack_cache, (void *)ks); 564 } else { 565 vm_thread_stack_dispose(ks, pages); 566 } 567 } 568 569 /* 570 * Calculate kstack pindex. 571 * 572 * Uses a non-identity mapping if guard pages are 573 * active to avoid pindex holes in the kstack object. 574 */ 575 static vm_pindex_t 576 vm_kstack_pindex(vm_offset_t ks, int kpages) 577 { 578 vm_pindex_t pindex = atop(ks - VM_MIN_KERNEL_ADDRESS); 579 580 #ifdef __ILP32__ 581 return (pindex); 582 #else 583 /* 584 * Return the linear pindex if guard pages aren't active or if we are 585 * allocating a non-standard kstack size. 586 */ 587 if (KSTACK_GUARD_PAGES == 0 || kpages != kstack_pages) { 588 return (pindex); 589 } 590 KASSERT(pindex % (kpages + KSTACK_GUARD_PAGES) >= KSTACK_GUARD_PAGES, 591 ("%s: Attempting to calculate kstack guard page pindex", __func__)); 592 593 return (pindex - 594 (pindex / (kpages + KSTACK_GUARD_PAGES) + 1) * KSTACK_GUARD_PAGES); 595 #endif 596 } 597 598 /* 599 * Allocate physical pages, following the specified NUMA policy, to back a 600 * kernel stack. 601 */ 602 static int 603 vm_thread_stack_back(vm_offset_t ks, vm_page_t ma[], int npages, int req_class, 604 int domain) 605 { 606 struct pctrie_iter pages; 607 vm_object_t obj = vm_thread_kstack_size_to_obj(npages); 608 vm_pindex_t pindex; 609 vm_page_t m; 610 int n; 611 612 pindex = vm_kstack_pindex(ks, npages); 613 614 vm_page_iter_init(&pages, obj); 615 VM_OBJECT_WLOCK(obj); 616 for (n = 0; n < npages; ma[n++] = m) { 617 m = vm_page_grab_iter(obj, pindex + n, 618 VM_ALLOC_NOCREAT | VM_ALLOC_WIRED, &pages); 619 if (m != NULL) 620 continue; 621 m = vm_page_alloc_domain_iter(obj, pindex + n, 622 domain, req_class | VM_ALLOC_WIRED, &pages); 623 if (m != NULL) 624 continue; 625 for (int i = 0; i < n; i++) { 626 m = ma[i]; 627 (void)vm_page_unwire_noq(m); 628 vm_page_free(m); 629 } 630 break; 631 } 632 VM_OBJECT_WUNLOCK(obj); 633 return (n < npages ? ENOMEM : 0); 634 } 635 636 static vm_object_t 637 vm_thread_kstack_size_to_obj(int npages) 638 { 639 return (npages == kstack_pages ? kstack_object : kstack_alt_object); 640 } 641 642 static int 643 kstack_import(void *arg, void **store, int cnt, int domain, int flags) 644 { 645 struct domainset *ds; 646 int i; 647 648 if (domain == UMA_ANYDOMAIN) 649 ds = DOMAINSET_RR(); 650 else 651 ds = DOMAINSET_PREF(domain); 652 653 for (i = 0; i < cnt; i++) { 654 store[i] = (void *)vm_thread_stack_create(ds, kstack_pages, 655 flags); 656 if (store[i] == NULL) 657 break; 658 } 659 return (i); 660 } 661 662 static void 663 kstack_release(void *arg, void **store, int cnt) 664 { 665 vm_offset_t ks; 666 int i; 667 668 for (i = 0; i < cnt; i++) { 669 ks = (vm_offset_t)store[i]; 670 vm_thread_stack_dispose(ks, kstack_pages); 671 } 672 } 673 674 static void 675 kstack_cache_init(void *null) 676 { 677 vm_size_t kstack_quantum; 678 int domain; 679 680 kstack_object = vm_object_allocate(OBJT_PHYS, 681 atop(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)); 682 kstack_cache = uma_zcache_create("kstack_cache", 683 kstack_pages * PAGE_SIZE, NULL, NULL, NULL, NULL, 684 kstack_import, kstack_release, NULL, 685 UMA_ZONE_FIRSTTOUCH); 686 kstack_cache_size = imax(128, mp_ncpus * 4); 687 uma_zone_set_maxcache(kstack_cache, kstack_cache_size); 688 689 kstack_alt_object = vm_object_allocate(OBJT_PHYS, 690 atop(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)); 691 692 kstack_quantum = vm_thread_kstack_import_quantum(); 693 /* 694 * Reduce size used by the kstack arena to allow for 695 * alignment adjustments in vm_thread_kstack_arena_import. 696 */ 697 kstack_quantum -= (kstack_pages + KSTACK_GUARD_PAGES) * PAGE_SIZE; 698 /* 699 * Create the kstack_arena for each domain and set kernel_arena as 700 * parent. 701 */ 702 for (domain = 0; domain < vm_ndomains; domain++) { 703 vmd_kstack_arena[domain] = vmem_create("kstack arena", 0, 0, 704 PAGE_SIZE, 0, M_WAITOK); 705 KASSERT(vmd_kstack_arena[domain] != NULL, 706 ("%s: failed to create domain %d kstack_arena", __func__, 707 domain)); 708 vmem_set_import(vmd_kstack_arena[domain], 709 vm_thread_kstack_arena_import, 710 vm_thread_kstack_arena_release, 711 vm_dom[domain].vmd_kernel_arena, kstack_quantum); 712 } 713 } 714 SYSINIT(vm_kstacks, SI_SUB_KMEM, SI_ORDER_ANY, kstack_cache_init, NULL); 715 716 #ifdef KSTACK_USAGE_PROF 717 /* 718 * Track maximum stack used by a thread in kernel. 719 */ 720 static int max_kstack_used; 721 722 SYSCTL_INT(_debug, OID_AUTO, max_kstack_used, CTLFLAG_RD, 723 &max_kstack_used, 0, 724 "Maximum stack depth used by a thread in kernel"); 725 726 void 727 intr_prof_stack_use(struct thread *td, struct trapframe *frame) 728 { 729 vm_offset_t stack_top; 730 vm_offset_t current; 731 int used, prev_used; 732 733 /* 734 * Testing for interrupted kernel mode isn't strictly 735 * needed. It optimizes the execution, since interrupts from 736 * usermode will have only the trap frame on the stack. 737 */ 738 if (TRAPF_USERMODE(frame)) 739 return; 740 741 stack_top = td->td_kstack + td->td_kstack_pages * PAGE_SIZE; 742 current = (vm_offset_t)(uintptr_t)&stack_top; 743 744 /* 745 * Try to detect if interrupt is using kernel thread stack. 746 * Hardware could use a dedicated stack for interrupt handling. 747 */ 748 if (stack_top <= current || current < td->td_kstack) 749 return; 750 751 used = stack_top - current; 752 for (;;) { 753 prev_used = max_kstack_used; 754 if (prev_used >= used) 755 break; 756 if (atomic_cmpset_int(&max_kstack_used, prev_used, used)) 757 break; 758 } 759 } 760 #endif /* KSTACK_USAGE_PROF */ 761 762 /* 763 * Implement fork's actions on an address space. 764 * Here we arrange for the address space to be copied or referenced, 765 * allocate a user struct (pcb and kernel stack), then call the 766 * machine-dependent layer to fill those in and make the new process 767 * ready to run. The new process is set up so that it returns directly 768 * to user mode to avoid stack copying and relocation problems. 769 */ 770 int 771 vm_forkproc(struct thread *td, struct proc *p2, struct thread *td2, 772 struct vmspace *vm2, int flags) 773 { 774 struct proc *p1 = td->td_proc; 775 struct domainset *dset; 776 int error; 777 778 if ((flags & RFPROC) == 0) { 779 /* 780 * Divorce the memory, if it is shared, essentially 781 * this changes shared memory amongst threads, into 782 * COW locally. 783 */ 784 if ((flags & RFMEM) == 0) { 785 error = vmspace_unshare(p1); 786 if (error) 787 return (error); 788 } 789 cpu_fork(td, p2, td2, flags); 790 return (0); 791 } 792 793 if (flags & RFMEM) { 794 p2->p_vmspace = p1->p_vmspace; 795 refcount_acquire(&p1->p_vmspace->vm_refcnt); 796 } 797 dset = td2->td_domain.dr_policy; 798 while (vm_page_count_severe_set(&dset->ds_mask)) { 799 vm_wait_doms(&dset->ds_mask, 0); 800 } 801 802 if ((flags & RFMEM) == 0) { 803 p2->p_vmspace = vm2; 804 if (p1->p_vmspace->vm_shm) 805 shmfork(p1, p2); 806 } 807 808 /* 809 * cpu_fork will copy and update the pcb, set up the kernel stack, 810 * and make the child ready to run. 811 */ 812 cpu_fork(td, p2, td2, flags); 813 return (0); 814 } 815 816 /* 817 * Called after process has been wait(2)'ed upon and is being reaped. 818 * The idea is to reclaim resources that we could not reclaim while 819 * the process was still executing. 820 */ 821 void 822 vm_waitproc(struct proc *p) 823 { 824 825 vmspace_exitfree(p); /* and clean-out the vmspace */ 826 } 827