160727d8bSWarner Losh /*- 2796df753SPedro F. Giffuni * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 351369649SPedro F. Giffuni * 4df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 5df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 6df8bae1dSRodney W. Grimes * 7df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 8df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 11df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 12df8bae1dSRodney W. Grimes * are met: 13df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 15df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 17df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 18fbbd9655SWarner Losh * 3. Neither the name of the University nor the names of its contributors 19df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 20df8bae1dSRodney W. Grimes * without specific prior written permission. 21df8bae1dSRodney W. Grimes * 22df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32df8bae1dSRodney W. Grimes * SUCH DAMAGE. 33df8bae1dSRodney W. Grimes * 343c4dd356SDavid Greenman * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 35df8bae1dSRodney W. Grimes * 36df8bae1dSRodney W. Grimes * 37df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38df8bae1dSRodney W. Grimes * All rights reserved. 39df8bae1dSRodney W. Grimes * 40df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 41df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 42df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 43df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 44df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 45df8bae1dSRodney W. Grimes * 46df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49df8bae1dSRodney W. Grimes * 50df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 51df8bae1dSRodney W. Grimes * 52df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53df8bae1dSRodney W. Grimes * School of Computer Science 54df8bae1dSRodney W. Grimes * Carnegie Mellon University 55df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 56df8bae1dSRodney W. Grimes * 57df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 58df8bae1dSRodney W. Grimes * rights to redistribute these changes. 59df8bae1dSRodney W. Grimes */ 60df8bae1dSRodney W. Grimes 61874651b1SDavid E. O'Brien #include <sys/cdefs.h> 62874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 63874651b1SDavid E. O'Brien 64faa5f8d8SAndrzej Bialecki #include "opt_vm.h" 6515a7ad60SPeter Wemm #include "opt_kstack_pages.h" 6615a7ad60SPeter Wemm #include "opt_kstack_max_pages.h" 67b7627840SKonstantin Belousov #include "opt_kstack_usage_prof.h" 68e9822d92SJoerg Wunsch 69df8bae1dSRodney W. Grimes #include <sys/param.h> 70df8bae1dSRodney W. Grimes #include <sys/systm.h> 71104a9b7eSAlexander Kabaev #include <sys/limits.h> 72fb919e4dSMark Murray #include <sys/lock.h> 735df87b21SJeff Roberson #include <sys/malloc.h> 74fb919e4dSMark Murray #include <sys/mutex.h> 75df8bae1dSRodney W. Grimes #include <sys/proc.h> 761ba5ad42SEdward Tomasz Napierala #include <sys/racct.h> 77df8bae1dSRodney W. Grimes #include <sys/resourcevar.h> 7889f6b863SAttilio Rao #include <sys/rwlock.h> 79da61b9a6SAlan Cox #include <sys/sched.h> 80da61b9a6SAlan Cox #include <sys/sf_buf.h> 813aa12267SBruce Evans #include <sys/shm.h> 82efeaf95aSDavid Greenman #include <sys/vmmeter.h> 835df87b21SJeff Roberson #include <sys/vmem.h> 841005a129SJohn Baldwin #include <sys/sx.h> 85ceb0cf87SJohn Dyson #include <sys/sysctl.h> 86e878d997SKonstantin Belousov #include <sys/_kstack_cache.h> 878a945d10SKonstantin Belousov #include <sys/eventhandler.h> 8826f9a767SRodney W. Grimes #include <sys/kernel.h> 890384fff8SJason Evans #include <sys/ktr.h> 90a2a1c95cSPeter Wemm #include <sys/unistd.h> 9126f9a767SRodney W. Grimes 92df8bae1dSRodney W. Grimes #include <vm/vm.h> 93efeaf95aSDavid Greenman #include <vm/vm_param.h> 94efeaf95aSDavid Greenman #include <vm/pmap.h> 95efeaf95aSDavid Greenman #include <vm/vm_map.h> 96df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 9726f9a767SRodney W. Grimes #include <vm/vm_pageout.h> 98a136efe9SPeter Wemm #include <vm/vm_object.h> 99df8bae1dSRodney W. Grimes #include <vm/vm_kern.h> 100efeaf95aSDavid Greenman #include <vm/vm_extern.h> 101a136efe9SPeter Wemm #include <vm/vm_pager.h> 10292da00bbSMatthew Dillon #include <vm/swap_pager.h> 103efeaf95aSDavid Greenman 104b7627840SKonstantin Belousov #include <machine/cpu.h> 105b7627840SKonstantin Belousov 10643a90f3aSAlan Cox /* 10743a90f3aSAlan Cox * MPSAFE 1082d5c7e45SMatthew Dillon * 1092d5c7e45SMatthew Dillon * WARNING! This code calls vm_map_check_protection() which only checks 1102d5c7e45SMatthew Dillon * the associated vm_map_entry range. It does not determine whether the 1112d5c7e45SMatthew Dillon * contents of the memory is actually readable or writable. In most cases 1122d5c7e45SMatthew Dillon * just checking the vm_map_entry is sufficient within the kernel's address 1132d5c7e45SMatthew Dillon * space. 11443a90f3aSAlan Cox */ 115df8bae1dSRodney W. Grimes int 116*d0389015SEd Maste kernacc(void *addr, int len, int rw) 117df8bae1dSRodney W. Grimes { 118df8bae1dSRodney W. Grimes boolean_t rv; 119df8bae1dSRodney W. Grimes vm_offset_t saddr, eaddr; 12002c58685SPoul-Henning Kamp vm_prot_t prot; 121df8bae1dSRodney W. Grimes 122e50f5c2eSBruce Evans KASSERT((rw & ~VM_PROT_ALL) == 0, 12302c58685SPoul-Henning Kamp ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 12475337a56SAlan Cox 12575337a56SAlan Cox if ((vm_offset_t)addr + len > kernel_map->max_offset || 12675337a56SAlan Cox (vm_offset_t)addr + len < (vm_offset_t)addr) 12775337a56SAlan Cox return (FALSE); 12875337a56SAlan Cox 12902c58685SPoul-Henning Kamp prot = rw; 1306cde7a16SDavid Greenman saddr = trunc_page((vm_offset_t)addr); 1316cde7a16SDavid Greenman eaddr = round_page((vm_offset_t)addr + len); 132d8834602SAlan Cox vm_map_lock_read(kernel_map); 133df8bae1dSRodney W. Grimes rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 134d8834602SAlan Cox vm_map_unlock_read(kernel_map); 135df8bae1dSRodney W. Grimes return (rv == TRUE); 136df8bae1dSRodney W. Grimes } 137df8bae1dSRodney W. Grimes 13843a90f3aSAlan Cox /* 13943a90f3aSAlan Cox * MPSAFE 1402d5c7e45SMatthew Dillon * 1412d5c7e45SMatthew Dillon * WARNING! This code calls vm_map_check_protection() which only checks 1422d5c7e45SMatthew Dillon * the associated vm_map_entry range. It does not determine whether the 1432d5c7e45SMatthew Dillon * contents of the memory is actually readable or writable. vmapbuf(), 1442d5c7e45SMatthew Dillon * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be 145763df3ecSPedro F. Giffuni * used in conjunction with this call. 14643a90f3aSAlan Cox */ 147df8bae1dSRodney W. Grimes int 148*d0389015SEd Maste useracc(void *addr, int len, int rw) 149df8bae1dSRodney W. Grimes { 150df8bae1dSRodney W. Grimes boolean_t rv; 15102c58685SPoul-Henning Kamp vm_prot_t prot; 15205ba50f5SJake Burkholder vm_map_t map; 153df8bae1dSRodney W. Grimes 154e50f5c2eSBruce Evans KASSERT((rw & ~VM_PROT_ALL) == 0, 15502c58685SPoul-Henning Kamp ("illegal ``rw'' argument to useracc (%x)\n", rw)); 15602c58685SPoul-Henning Kamp prot = rw; 15705ba50f5SJake Burkholder map = &curproc->p_vmspace->vm_map; 15805ba50f5SJake Burkholder if ((vm_offset_t)addr + len > vm_map_max(map) || 15905ba50f5SJake Burkholder (vm_offset_t)addr + len < (vm_offset_t)addr) { 16026f9a767SRodney W. Grimes return (FALSE); 16126f9a767SRodney W. Grimes } 162d8834602SAlan Cox vm_map_lock_read(map); 16305ba50f5SJake Burkholder rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 16405ba50f5SJake Burkholder round_page((vm_offset_t)addr + len), prot); 165d8834602SAlan Cox vm_map_unlock_read(map); 166df8bae1dSRodney W. Grimes return (rv == TRUE); 167df8bae1dSRodney W. Grimes } 168df8bae1dSRodney W. Grimes 16916929939SDon Lewis int 170f0ea4612SDon Lewis vslock(void *addr, size_t len) 17116929939SDon Lewis { 172bb734798SDon Lewis vm_offset_t end, last, start; 173bb734798SDon Lewis vm_size_t npages; 174bb734798SDon Lewis int error; 17516929939SDon Lewis 176bb734798SDon Lewis last = (vm_offset_t)addr + len; 177ce8660e3SDon Lewis start = trunc_page((vm_offset_t)addr); 178bb734798SDon Lewis end = round_page(last); 179bb734798SDon Lewis if (last < (vm_offset_t)addr || end < (vm_offset_t)addr) 18016929939SDon Lewis return (EINVAL); 18116929939SDon Lewis npages = atop(end - start); 18216929939SDon Lewis if (npages > vm_page_max_wired) 18316929939SDon Lewis return (ENOMEM); 18416929939SDon Lewis #if 0 18516929939SDon Lewis /* 18616929939SDon Lewis * XXX - not yet 18716929939SDon Lewis * 18816929939SDon Lewis * The limit for transient usage of wired pages should be 18916929939SDon Lewis * larger than for "permanent" wired pages (mlock()). 19016929939SDon Lewis * 19116929939SDon Lewis * Also, the sysctl code, which is the only present user 19216929939SDon Lewis * of vslock(), does a hard loop on EAGAIN. 19316929939SDon Lewis */ 19444f1c916SBryan Drewery if (npages + vm_cnt.v_wire_count > vm_page_max_wired) 19516929939SDon Lewis return (EAGAIN); 19616929939SDon Lewis #endif 197ce8660e3SDon Lewis error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end, 198d9b2500eSBrian Feldman VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 199ce8660e3SDon Lewis /* 200ce8660e3SDon Lewis * Return EFAULT on error to match copy{in,out}() behaviour 201ce8660e3SDon Lewis * rather than returning ENOMEM like mlock() would. 202ce8660e3SDon Lewis */ 203ce8660e3SDon Lewis return (error == KERN_SUCCESS ? 0 : EFAULT); 20416929939SDon Lewis } 20516929939SDon Lewis 206ce8660e3SDon Lewis void 207f0ea4612SDon Lewis vsunlock(void *addr, size_t len) 20816929939SDon Lewis { 20916929939SDon Lewis 210ce8660e3SDon Lewis /* Rely on the parameter sanity checks performed by vslock(). */ 211ce8660e3SDon Lewis (void)vm_map_unwire(&curproc->p_vmspace->vm_map, 212ce8660e3SDon Lewis trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), 21316929939SDon Lewis VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 21416929939SDon Lewis } 21516929939SDon Lewis 216da61b9a6SAlan Cox /* 217da61b9a6SAlan Cox * Pin the page contained within the given object at the given offset. If the 218da61b9a6SAlan Cox * page is not resident, allocate and load it using the given object's pager. 219da61b9a6SAlan Cox * Return the pinned page if successful; otherwise, return NULL. 220da61b9a6SAlan Cox */ 221da61b9a6SAlan Cox static vm_page_t 222be996836SAttilio Rao vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset) 223da61b9a6SAlan Cox { 224093c7f39SGleb Smirnoff vm_page_t m; 225da61b9a6SAlan Cox vm_pindex_t pindex; 226da61b9a6SAlan Cox int rv; 227da61b9a6SAlan Cox 22889f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 229da61b9a6SAlan Cox pindex = OFF_TO_IDX(offset); 230ce3ee09bSAlan Cox m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY); 2310a2e596aSAlan Cox if (m->valid != VM_PAGE_BITS_ALL) { 232ce3ee09bSAlan Cox vm_page_xbusy(m); 233b0cd2017SGleb Smirnoff rv = vm_pager_get_pages(object, &m, 1, NULL, NULL); 234d1a6e42dSAlan Cox if (rv != VM_PAGER_OK) { 2352965a453SKip Macy vm_page_lock(m); 236da61b9a6SAlan Cox vm_page_free(m); 2372965a453SKip Macy vm_page_unlock(m); 238da61b9a6SAlan Cox m = NULL; 239da61b9a6SAlan Cox goto out; 240da61b9a6SAlan Cox } 241c7aebda8SAttilio Rao vm_page_xunbusy(m); 242ce3ee09bSAlan Cox } 243be996836SAttilio Rao vm_page_lock(m); 244be996836SAttilio Rao vm_page_hold(m); 24570978c93SKonstantin Belousov vm_page_activate(m); 246be996836SAttilio Rao vm_page_unlock(m); 247da61b9a6SAlan Cox out: 24889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 249da61b9a6SAlan Cox return (m); 250da61b9a6SAlan Cox } 251da61b9a6SAlan Cox 252da61b9a6SAlan Cox /* 253da61b9a6SAlan Cox * Return a CPU private mapping to the page at the given offset within the 254da61b9a6SAlan Cox * given object. The page is pinned before it is mapped. 255da61b9a6SAlan Cox */ 256da61b9a6SAlan Cox struct sf_buf * 257da61b9a6SAlan Cox vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset) 258da61b9a6SAlan Cox { 259da61b9a6SAlan Cox vm_page_t m; 260da61b9a6SAlan Cox 261be996836SAttilio Rao m = vm_imgact_hold_page(object, offset); 262da61b9a6SAlan Cox if (m == NULL) 263da61b9a6SAlan Cox return (NULL); 264da61b9a6SAlan Cox sched_pin(); 265da61b9a6SAlan Cox return (sf_buf_alloc(m, SFB_CPUPRIVATE)); 266da61b9a6SAlan Cox } 267da61b9a6SAlan Cox 268da61b9a6SAlan Cox /* 269da61b9a6SAlan Cox * Destroy the given CPU private mapping and unpin the page that it mapped. 270da61b9a6SAlan Cox */ 271da61b9a6SAlan Cox void 272be996836SAttilio Rao vm_imgact_unmap_page(struct sf_buf *sf) 273da61b9a6SAlan Cox { 274da61b9a6SAlan Cox vm_page_t m; 275da61b9a6SAlan Cox 276da61b9a6SAlan Cox m = sf_buf_page(sf); 277da61b9a6SAlan Cox sf_buf_free(sf); 278da61b9a6SAlan Cox sched_unpin(); 279be996836SAttilio Rao vm_page_lock(m); 280be996836SAttilio Rao vm_page_unhold(m); 281be996836SAttilio Rao vm_page_unlock(m); 282da61b9a6SAlan Cox } 283da61b9a6SAlan Cox 2841a4fcaebSMarcel Moolenaar void 2851a4fcaebSMarcel Moolenaar vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz) 2861a4fcaebSMarcel Moolenaar { 2871a4fcaebSMarcel Moolenaar 2881a4fcaebSMarcel Moolenaar pmap_sync_icache(map->pmap, va, sz); 2891a4fcaebSMarcel Moolenaar } 2901a4fcaebSMarcel Moolenaar 291e878d997SKonstantin Belousov struct kstack_cache_entry *kstack_cache; 2928a945d10SKonstantin Belousov static int kstack_cache_size = 128; 2938a945d10SKonstantin Belousov static int kstacks; 2948a945d10SKonstantin Belousov static struct mtx kstack_cache_mtx; 29525c1e164SAndre Oppermann MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF); 29625c1e164SAndre Oppermann 2978a945d10SKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0, 2988a945d10SKonstantin Belousov ""); 2998a945d10SKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0, 3008a945d10SKonstantin Belousov ""); 3018a945d10SKonstantin Belousov 30249a2507bSAlan Cox /* 30349a2507bSAlan Cox * Create the kernel stack (including pcb for i386) for a new thread. 30449a2507bSAlan Cox * This routine directly affects the fork perf for a process and 30549a2507bSAlan Cox * create performance for a thread. 30649a2507bSAlan Cox */ 30789b57fcfSKonstantin Belousov int 30849a2507bSAlan Cox vm_thread_new(struct thread *td, int pages) 30949a2507bSAlan Cox { 31049a2507bSAlan Cox vm_object_t ksobj; 31149a2507bSAlan Cox vm_offset_t ks; 3125471caf6SAlan Cox vm_page_t ma[KSTACK_MAX_PAGES]; 3138a945d10SKonstantin Belousov struct kstack_cache_entry *ks_ce; 31449a2507bSAlan Cox int i; 31549a2507bSAlan Cox 31649a2507bSAlan Cox /* Bounds check */ 31749a2507bSAlan Cox if (pages <= 1) 318edc82223SKonstantin Belousov pages = kstack_pages; 31949a2507bSAlan Cox else if (pages > KSTACK_MAX_PAGES) 32049a2507bSAlan Cox pages = KSTACK_MAX_PAGES; 3218a945d10SKonstantin Belousov 322edc82223SKonstantin Belousov if (pages == kstack_pages) { 3238a945d10SKonstantin Belousov mtx_lock(&kstack_cache_mtx); 3248a945d10SKonstantin Belousov if (kstack_cache != NULL) { 3258a945d10SKonstantin Belousov ks_ce = kstack_cache; 3268a945d10SKonstantin Belousov kstack_cache = ks_ce->next_ks_entry; 3278a945d10SKonstantin Belousov mtx_unlock(&kstack_cache_mtx); 3288a945d10SKonstantin Belousov 3298a945d10SKonstantin Belousov td->td_kstack_obj = ks_ce->ksobj; 3308a945d10SKonstantin Belousov td->td_kstack = (vm_offset_t)ks_ce; 331edc82223SKonstantin Belousov td->td_kstack_pages = kstack_pages; 3328a945d10SKonstantin Belousov return (1); 3338a945d10SKonstantin Belousov } 3348a945d10SKonstantin Belousov mtx_unlock(&kstack_cache_mtx); 3358a945d10SKonstantin Belousov } 3368a945d10SKonstantin Belousov 33749a2507bSAlan Cox /* 33849a2507bSAlan Cox * Allocate an object for the kstack. 33949a2507bSAlan Cox */ 34049a2507bSAlan Cox ksobj = vm_object_allocate(OBJT_DEFAULT, pages); 341374ae2a3SJeff Roberson 34249a2507bSAlan Cox /* 34349a2507bSAlan Cox * Get a kernel virtual address for this thread's kstack. 34449a2507bSAlan Cox */ 345ca596a25SJuli Mallett #if defined(__mips__) 346ca596a25SJuli Mallett /* 347ca596a25SJuli Mallett * We need to align the kstack's mapped address to fit within 348ca596a25SJuli Mallett * a single TLB entry. 349ca596a25SJuli Mallett */ 3505df87b21SJeff Roberson if (vmem_xalloc(kernel_arena, (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE, 3515df87b21SJeff Roberson PAGE_SIZE * 2, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, 3525df87b21SJeff Roberson M_BESTFIT | M_NOWAIT, &ks)) { 3535df87b21SJeff Roberson ks = 0; 3545df87b21SJeff Roberson } 355ca596a25SJuli Mallett #else 3565df87b21SJeff Roberson ks = kva_alloc((pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 357ca596a25SJuli Mallett #endif 35889b57fcfSKonstantin Belousov if (ks == 0) { 35989b57fcfSKonstantin Belousov printf("vm_thread_new: kstack allocation failed\n"); 36089b57fcfSKonstantin Belousov vm_object_deallocate(ksobj); 36189b57fcfSKonstantin Belousov return (0); 36289b57fcfSKonstantin Belousov } 36389b57fcfSKonstantin Belousov 3648a945d10SKonstantin Belousov atomic_add_int(&kstacks, 1); 36549a2507bSAlan Cox if (KSTACK_GUARD_PAGES != 0) { 36649a2507bSAlan Cox pmap_qremove(ks, KSTACK_GUARD_PAGES); 36749a2507bSAlan Cox ks += KSTACK_GUARD_PAGES * PAGE_SIZE; 36849a2507bSAlan Cox } 36989b57fcfSKonstantin Belousov td->td_kstack_obj = ksobj; 37049a2507bSAlan Cox td->td_kstack = ks; 37149a2507bSAlan Cox /* 37249a2507bSAlan Cox * Knowing the number of pages allocated is useful when you 37349a2507bSAlan Cox * want to deallocate them. 37449a2507bSAlan Cox */ 37549a2507bSAlan Cox td->td_kstack_pages = pages; 37649a2507bSAlan Cox /* 37749a2507bSAlan Cox * For the length of the stack, link in a real page of ram for each 37849a2507bSAlan Cox * page of stack. 37949a2507bSAlan Cox */ 38089f6b863SAttilio Rao VM_OBJECT_WLOCK(ksobj); 3819df950b3SMark Johnston (void)vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | 3825471caf6SAlan Cox VM_ALLOC_WIRED, ma, pages); 3835471caf6SAlan Cox for (i = 0; i < pages; i++) 3845471caf6SAlan Cox ma[i]->valid = VM_PAGE_BITS_ALL; 38589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(ksobj); 38649a2507bSAlan Cox pmap_qenter(ks, ma, pages); 38789b57fcfSKonstantin Belousov return (1); 38849a2507bSAlan Cox } 38949a2507bSAlan Cox 3908a945d10SKonstantin Belousov static void 3918a945d10SKonstantin Belousov vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages) 39249a2507bSAlan Cox { 39349a2507bSAlan Cox vm_page_t m; 3948a945d10SKonstantin Belousov int i; 39549a2507bSAlan Cox 3968a945d10SKonstantin Belousov atomic_add_int(&kstacks, -1); 39749a2507bSAlan Cox pmap_qremove(ks, pages); 39889f6b863SAttilio Rao VM_OBJECT_WLOCK(ksobj); 39949a2507bSAlan Cox for (i = 0; i < pages; i++) { 40049a2507bSAlan Cox m = vm_page_lookup(ksobj, i); 40149a2507bSAlan Cox if (m == NULL) 40249a2507bSAlan Cox panic("vm_thread_dispose: kstack already missing?"); 4032965a453SKip Macy vm_page_lock(m); 404e595970aSAlan Cox vm_page_unwire(m, PQ_NONE); 40549a2507bSAlan Cox vm_page_free(m); 4062965a453SKip Macy vm_page_unlock(m); 40749a2507bSAlan Cox } 40889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(ksobj); 40949a2507bSAlan Cox vm_object_deallocate(ksobj); 4105df87b21SJeff Roberson kva_free(ks - (KSTACK_GUARD_PAGES * PAGE_SIZE), 41149a2507bSAlan Cox (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 412c3cf0b47SKonstantin Belousov } 413c3cf0b47SKonstantin Belousov 414c3cf0b47SKonstantin Belousov /* 4158a945d10SKonstantin Belousov * Dispose of a thread's kernel stack. 4168a945d10SKonstantin Belousov */ 4178a945d10SKonstantin Belousov void 4188a945d10SKonstantin Belousov vm_thread_dispose(struct thread *td) 4198a945d10SKonstantin Belousov { 4208a945d10SKonstantin Belousov vm_object_t ksobj; 4218a945d10SKonstantin Belousov vm_offset_t ks; 4228a945d10SKonstantin Belousov struct kstack_cache_entry *ks_ce; 4238a945d10SKonstantin Belousov int pages; 4248a945d10SKonstantin Belousov 4258a945d10SKonstantin Belousov pages = td->td_kstack_pages; 4268a945d10SKonstantin Belousov ksobj = td->td_kstack_obj; 4278a945d10SKonstantin Belousov ks = td->td_kstack; 4288a945d10SKonstantin Belousov td->td_kstack = 0; 4298a945d10SKonstantin Belousov td->td_kstack_pages = 0; 430edc82223SKonstantin Belousov if (pages == kstack_pages && kstacks <= kstack_cache_size) { 4318a945d10SKonstantin Belousov ks_ce = (struct kstack_cache_entry *)ks; 4328a945d10SKonstantin Belousov ks_ce->ksobj = ksobj; 4338a945d10SKonstantin Belousov mtx_lock(&kstack_cache_mtx); 4348a945d10SKonstantin Belousov ks_ce->next_ks_entry = kstack_cache; 4358a945d10SKonstantin Belousov kstack_cache = ks_ce; 4368a945d10SKonstantin Belousov mtx_unlock(&kstack_cache_mtx); 4378a945d10SKonstantin Belousov return; 4388a945d10SKonstantin Belousov } 4398a945d10SKonstantin Belousov vm_thread_stack_dispose(ksobj, ks, pages); 4408a945d10SKonstantin Belousov } 4418a945d10SKonstantin Belousov 4428a945d10SKonstantin Belousov static void 4438a945d10SKonstantin Belousov vm_thread_stack_lowmem(void *nulll) 4448a945d10SKonstantin Belousov { 4458a945d10SKonstantin Belousov struct kstack_cache_entry *ks_ce, *ks_ce1; 4468a945d10SKonstantin Belousov 4478a945d10SKonstantin Belousov mtx_lock(&kstack_cache_mtx); 4488a945d10SKonstantin Belousov ks_ce = kstack_cache; 4498a945d10SKonstantin Belousov kstack_cache = NULL; 4508a945d10SKonstantin Belousov mtx_unlock(&kstack_cache_mtx); 4518a945d10SKonstantin Belousov 4528a945d10SKonstantin Belousov while (ks_ce != NULL) { 4538a945d10SKonstantin Belousov ks_ce1 = ks_ce; 4548a945d10SKonstantin Belousov ks_ce = ks_ce->next_ks_entry; 4558a945d10SKonstantin Belousov 4568a945d10SKonstantin Belousov vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1, 457edc82223SKonstantin Belousov kstack_pages); 4588a945d10SKonstantin Belousov } 4598a945d10SKonstantin Belousov } 4608a945d10SKonstantin Belousov 4618a945d10SKonstantin Belousov static void 4628a945d10SKonstantin Belousov kstack_cache_init(void *nulll) 4638a945d10SKonstantin Belousov { 4648a945d10SKonstantin Belousov 4658a945d10SKonstantin Belousov EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL, 4668a945d10SKonstantin Belousov EVENTHANDLER_PRI_ANY); 4678a945d10SKonstantin Belousov } 4688a945d10SKonstantin Belousov 4698a945d10SKonstantin Belousov SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL); 4708a945d10SKonstantin Belousov 471b7627840SKonstantin Belousov #ifdef KSTACK_USAGE_PROF 472b7627840SKonstantin Belousov /* 473b7627840SKonstantin Belousov * Track maximum stack used by a thread in kernel. 474b7627840SKonstantin Belousov */ 475b7627840SKonstantin Belousov static int max_kstack_used; 476b7627840SKonstantin Belousov 477b7627840SKonstantin Belousov SYSCTL_INT(_debug, OID_AUTO, max_kstack_used, CTLFLAG_RD, 478b7627840SKonstantin Belousov &max_kstack_used, 0, 479b7627840SKonstantin Belousov "Maxiumum stack depth used by a thread in kernel"); 480b7627840SKonstantin Belousov 481b7627840SKonstantin Belousov void 482b7627840SKonstantin Belousov intr_prof_stack_use(struct thread *td, struct trapframe *frame) 483b7627840SKonstantin Belousov { 484b7627840SKonstantin Belousov vm_offset_t stack_top; 485b7627840SKonstantin Belousov vm_offset_t current; 486b7627840SKonstantin Belousov int used, prev_used; 487b7627840SKonstantin Belousov 488b7627840SKonstantin Belousov /* 489b7627840SKonstantin Belousov * Testing for interrupted kernel mode isn't strictly 490b7627840SKonstantin Belousov * needed. It optimizes the execution, since interrupts from 491b7627840SKonstantin Belousov * usermode will have only the trap frame on the stack. 492b7627840SKonstantin Belousov */ 493b7627840SKonstantin Belousov if (TRAPF_USERMODE(frame)) 494b7627840SKonstantin Belousov return; 495b7627840SKonstantin Belousov 496b7627840SKonstantin Belousov stack_top = td->td_kstack + td->td_kstack_pages * PAGE_SIZE; 497b7627840SKonstantin Belousov current = (vm_offset_t)(uintptr_t)&stack_top; 498b7627840SKonstantin Belousov 499b7627840SKonstantin Belousov /* 500b7627840SKonstantin Belousov * Try to detect if interrupt is using kernel thread stack. 501b7627840SKonstantin Belousov * Hardware could use a dedicated stack for interrupt handling. 502b7627840SKonstantin Belousov */ 503b7627840SKonstantin Belousov if (stack_top <= current || current < td->td_kstack) 504b7627840SKonstantin Belousov return; 505b7627840SKonstantin Belousov 506b7627840SKonstantin Belousov used = stack_top - current; 507b7627840SKonstantin Belousov for (;;) { 508b7627840SKonstantin Belousov prev_used = max_kstack_used; 509b7627840SKonstantin Belousov if (prev_used >= used) 510b7627840SKonstantin Belousov break; 511b7627840SKonstantin Belousov if (atomic_cmpset_int(&max_kstack_used, prev_used, used)) 512b7627840SKonstantin Belousov break; 513b7627840SKonstantin Belousov } 514b7627840SKonstantin Belousov } 515b7627840SKonstantin Belousov #endif /* KSTACK_USAGE_PROF */ 516b7627840SKonstantin Belousov 517a136efe9SPeter Wemm /* 518df8bae1dSRodney W. Grimes * Implement fork's actions on an address space. 519df8bae1dSRodney W. Grimes * Here we arrange for the address space to be copied or referenced, 520df8bae1dSRodney W. Grimes * allocate a user struct (pcb and kernel stack), then call the 521df8bae1dSRodney W. Grimes * machine-dependent layer to fill those in and make the new process 522a2a1c95cSPeter Wemm * ready to run. The new process is set up so that it returns directly 523a2a1c95cSPeter Wemm * to user mode to avoid stack copying and relocation problems. 524df8bae1dSRodney W. Grimes */ 52589b57fcfSKonstantin Belousov int 526*d0389015SEd Maste vm_forkproc(struct thread *td, struct proc *p2, struct thread *td2, 527*d0389015SEd Maste struct vmspace *vm2, int flags) 528df8bae1dSRodney W. Grimes { 529b40ce416SJulian Elischer struct proc *p1 = td->td_proc; 53089b57fcfSKonstantin Belousov int error; 531df8bae1dSRodney W. Grimes 53291c28bfdSLuoqi Chen if ((flags & RFPROC) == 0) { 53391c28bfdSLuoqi Chen /* 53491c28bfdSLuoqi Chen * Divorce the memory, if it is shared, essentially 53591c28bfdSLuoqi Chen * this changes shared memory amongst threads, into 53691c28bfdSLuoqi Chen * COW locally. 53791c28bfdSLuoqi Chen */ 53891c28bfdSLuoqi Chen if ((flags & RFMEM) == 0) { 53991c28bfdSLuoqi Chen if (p1->p_vmspace->vm_refcnt > 1) { 54089b57fcfSKonstantin Belousov error = vmspace_unshare(p1); 54189b57fcfSKonstantin Belousov if (error) 54289b57fcfSKonstantin Belousov return (error); 54391c28bfdSLuoqi Chen } 54491c28bfdSLuoqi Chen } 545079b7badSJulian Elischer cpu_fork(td, p2, td2, flags); 54689b57fcfSKonstantin Belousov return (0); 54791c28bfdSLuoqi Chen } 54891c28bfdSLuoqi Chen 5495856e12eSJohn Dyson if (flags & RFMEM) { 5505856e12eSJohn Dyson p2->p_vmspace = p1->p_vmspace; 5511a276a3fSAlan Cox atomic_add_int(&p1->p_vmspace->vm_refcnt, 1); 5525856e12eSJohn Dyson } 5535856e12eSJohn Dyson 55490ecac61SMatthew Dillon while (vm_page_count_severe()) { 55526f9a767SRodney W. Grimes VM_WAIT; 5560d94caffSDavid Greenman } 55726f9a767SRodney W. Grimes 5585856e12eSJohn Dyson if ((flags & RFMEM) == 0) { 55989b57fcfSKonstantin Belousov p2->p_vmspace = vm2; 560df8bae1dSRodney W. Grimes if (p1->p_vmspace->vm_shm) 561dabee6feSPeter Wemm shmfork(p1, p2); 562a2a1c95cSPeter Wemm } 563df8bae1dSRodney W. Grimes 56439fb8e6bSJulian Elischer /* 565a2a1c95cSPeter Wemm * cpu_fork will copy and update the pcb, set up the kernel stack, 566a2a1c95cSPeter Wemm * and make the child ready to run. 567df8bae1dSRodney W. Grimes */ 568079b7badSJulian Elischer cpu_fork(td, p2, td2, flags); 56989b57fcfSKonstantin Belousov return (0); 570df8bae1dSRodney W. Grimes } 571df8bae1dSRodney W. Grimes 572df8bae1dSRodney W. Grimes /* 573763df3ecSPedro F. Giffuni * Called after process has been wait(2)'ed upon and is being reaped. 574eb30c1c0SPeter Wemm * The idea is to reclaim resources that we could not reclaim while 575eb30c1c0SPeter Wemm * the process was still executing. 576eb30c1c0SPeter Wemm */ 577eb30c1c0SPeter Wemm void 578eb30c1c0SPeter Wemm vm_waitproc(p) 579eb30c1c0SPeter Wemm struct proc *p; 580eb30c1c0SPeter Wemm { 581eb30c1c0SPeter Wemm 582582ec34cSAlfred Perlstein vmspace_exitfree(p); /* and clean-out the vmspace */ 583eb30c1c0SPeter Wemm } 584eb30c1c0SPeter Wemm 58526f9a767SRodney W. Grimes void 586da7bbd2cSJohn Baldwin kick_proc0(void) 587d13ec713SStephan Uphoff { 588d13ec713SStephan Uphoff 589da7bbd2cSJohn Baldwin wakeup(&proc0); 590d13ec713SStephan Uphoff } 591