xref: /freebsd/sys/vm/vm_glue.c (revision 6d42d5dbdd677c3422bdb3867770639f48c6df7a)
1 /*-
2  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Permission to use, copy, modify and distribute this software and
39  * its documentation is hereby granted, provided that both the copyright
40  * notice and this permission notice appear in all copies of the
41  * software, derivative works or modified versions, and any portions
42  * thereof, and that both notices appear in supporting documentation.
43  *
44  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47  *
48  * Carnegie Mellon requests users of this software to return to
49  *
50  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
51  *  School of Computer Science
52  *  Carnegie Mellon University
53  *  Pittsburgh PA 15213-3890
54  *
55  * any improvements or extensions that they make and grant Carnegie the
56  * rights to redistribute these changes.
57  */
58 
59 #include "opt_vm.h"
60 #include "opt_kstack_pages.h"
61 #include "opt_kstack_max_pages.h"
62 #include "opt_kstack_usage_prof.h"
63 
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/asan.h>
67 #include <sys/domainset.h>
68 #include <sys/limits.h>
69 #include <sys/lock.h>
70 #include <sys/malloc.h>
71 #include <sys/msan.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/racct.h>
75 #include <sys/refcount.h>
76 #include <sys/resourcevar.h>
77 #include <sys/rwlock.h>
78 #include <sys/sched.h>
79 #include <sys/sf_buf.h>
80 #include <sys/shm.h>
81 #include <sys/smp.h>
82 #include <sys/vmmeter.h>
83 #include <sys/vmem.h>
84 #include <sys/sx.h>
85 #include <sys/sysctl.h>
86 #include <sys/kernel.h>
87 #include <sys/ktr.h>
88 #include <sys/unistd.h>
89 
90 #include <vm/uma.h>
91 #include <vm/vm.h>
92 #include <vm/vm_param.h>
93 #include <vm/pmap.h>
94 #include <vm/vm_domainset.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_page.h>
97 #include <vm/vm_pageout.h>
98 #include <vm/vm_pagequeue.h>
99 #include <vm/vm_object.h>
100 #include <vm/vm_kern.h>
101 #include <vm/vm_extern.h>
102 #include <vm/vm_pager.h>
103 #include <vm/vm_phys.h>
104 
105 #include <machine/cpu.h>
106 
107 #if VM_NRESERVLEVEL > 1
108 #define KVA_KSTACK_QUANTUM_SHIFT (VM_LEVEL_1_ORDER + VM_LEVEL_0_ORDER + \
109     PAGE_SHIFT)
110 #elif VM_NRESERVLEVEL > 0
111 #define KVA_KSTACK_QUANTUM_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT)
112 #else
113 #define KVA_KSTACK_QUANTUM_SHIFT (8 + PAGE_SHIFT)
114 #endif
115 #define KVA_KSTACK_QUANTUM (1ul << KVA_KSTACK_QUANTUM_SHIFT)
116 
117 /*
118  * MPSAFE
119  *
120  * WARNING!  This code calls vm_map_check_protection() which only checks
121  * the associated vm_map_entry range.  It does not determine whether the
122  * contents of the memory is actually readable or writable.  In most cases
123  * just checking the vm_map_entry is sufficient within the kernel's address
124  * space.
125  */
126 bool
kernacc(void * addr,int len,int rw)127 kernacc(void *addr, int len, int rw)
128 {
129 	boolean_t rv;
130 	vm_offset_t saddr, eaddr;
131 	vm_prot_t prot;
132 
133 	KASSERT((rw & ~VM_PROT_ALL) == 0,
134 	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
135 
136 	if ((vm_offset_t)addr + len > vm_map_max(kernel_map) ||
137 	    (vm_offset_t)addr + len < (vm_offset_t)addr)
138 		return (false);
139 
140 	prot = rw;
141 	saddr = trunc_page((vm_offset_t)addr);
142 	eaddr = round_page((vm_offset_t)addr + len);
143 	vm_map_lock_read(kernel_map);
144 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
145 	vm_map_unlock_read(kernel_map);
146 	return (rv == TRUE);
147 }
148 
149 /*
150  * MPSAFE
151  *
152  * WARNING!  This code calls vm_map_check_protection() which only checks
153  * the associated vm_map_entry range.  It does not determine whether the
154  * contents of the memory is actually readable or writable.  vmapbuf(),
155  * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
156  * used in conjunction with this call.
157  */
158 bool
useracc(void * addr,int len,int rw)159 useracc(void *addr, int len, int rw)
160 {
161 	boolean_t rv;
162 	vm_prot_t prot;
163 	vm_map_t map;
164 
165 	KASSERT((rw & ~VM_PROT_ALL) == 0,
166 	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
167 	prot = rw;
168 	map = &curproc->p_vmspace->vm_map;
169 	if ((vm_offset_t)addr + len > vm_map_max(map) ||
170 	    (vm_offset_t)addr + len < (vm_offset_t)addr) {
171 		return (false);
172 	}
173 	vm_map_lock_read(map);
174 	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
175 	    round_page((vm_offset_t)addr + len), prot);
176 	vm_map_unlock_read(map);
177 	return (rv == TRUE);
178 }
179 
180 int
vslock(void * addr,size_t len)181 vslock(void *addr, size_t len)
182 {
183 	vm_offset_t end, last, start;
184 	vm_size_t npages;
185 	int error;
186 
187 	last = (vm_offset_t)addr + len;
188 	start = trunc_page((vm_offset_t)addr);
189 	end = round_page(last);
190 	if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
191 		return (EINVAL);
192 	npages = atop(end - start);
193 	if (npages > vm_page_max_user_wired)
194 		return (ENOMEM);
195 	error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
196 	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
197 	if (error == KERN_SUCCESS) {
198 		curthread->td_vslock_sz += len;
199 		return (0);
200 	}
201 
202 	/*
203 	 * Return EFAULT on error to match copy{in,out}() behaviour
204 	 * rather than returning ENOMEM like mlock() would.
205 	 */
206 	return (EFAULT);
207 }
208 
209 void
vsunlock(void * addr,size_t len)210 vsunlock(void *addr, size_t len)
211 {
212 
213 	/* Rely on the parameter sanity checks performed by vslock(). */
214 	MPASS(curthread->td_vslock_sz >= len);
215 	curthread->td_vslock_sz -= len;
216 	(void)vm_map_unwire(&curproc->p_vmspace->vm_map,
217 	    trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
218 	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
219 }
220 
221 /*
222  * Pin the page contained within the given object at the given offset.  If the
223  * page is not resident, allocate and load it using the given object's pager.
224  * Return the pinned page if successful; otherwise, return NULL.
225  */
226 static vm_page_t
vm_imgact_hold_page(vm_object_t object,vm_ooffset_t offset)227 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
228 {
229 	vm_page_t m;
230 	vm_pindex_t pindex;
231 
232 	pindex = OFF_TO_IDX(offset);
233 	(void)vm_page_grab_valid_unlocked(&m, object, pindex,
234 	    VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED);
235 	return (m);
236 }
237 
238 /*
239  * Return a CPU private mapping to the page at the given offset within the
240  * given object.  The page is pinned before it is mapped.
241  */
242 struct sf_buf *
vm_imgact_map_page(vm_object_t object,vm_ooffset_t offset)243 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
244 {
245 	vm_page_t m;
246 
247 	m = vm_imgact_hold_page(object, offset);
248 	if (m == NULL)
249 		return (NULL);
250 	sched_pin();
251 	return (sf_buf_alloc(m, SFB_CPUPRIVATE));
252 }
253 
254 /*
255  * Destroy the given CPU private mapping and unpin the page that it mapped.
256  */
257 void
vm_imgact_unmap_page(struct sf_buf * sf)258 vm_imgact_unmap_page(struct sf_buf *sf)
259 {
260 	vm_page_t m;
261 
262 	m = sf_buf_page(sf);
263 	sf_buf_free(sf);
264 	sched_unpin();
265 	vm_page_unwire(m, PQ_ACTIVE);
266 }
267 
268 void
vm_sync_icache(vm_map_t map,vm_offset_t va,vm_offset_t sz)269 vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
270 {
271 
272 	pmap_sync_icache(map->pmap, va, sz);
273 }
274 
275 static vm_object_t kstack_object;
276 static vm_object_t kstack_alt_object;
277 static uma_zone_t kstack_cache;
278 static int kstack_cache_size;
279 static vmem_t *vmd_kstack_arena[MAXMEMDOM];
280 
281 static vm_pindex_t vm_kstack_pindex(vm_offset_t ks, int npages);
282 static vm_object_t vm_thread_kstack_size_to_obj(int npages);
283 static int vm_thread_stack_back(vm_offset_t kaddr, vm_page_t ma[], int npages,
284     int req_class, int domain);
285 
286 static int
sysctl_kstack_cache_size(SYSCTL_HANDLER_ARGS)287 sysctl_kstack_cache_size(SYSCTL_HANDLER_ARGS)
288 {
289 	int error, oldsize;
290 
291 	oldsize = kstack_cache_size;
292 	error = sysctl_handle_int(oidp, arg1, arg2, req);
293 	if (error == 0 && req->newptr && oldsize != kstack_cache_size)
294 		uma_zone_set_maxcache(kstack_cache, kstack_cache_size);
295 	return (error);
296 }
297 SYSCTL_PROC(_vm, OID_AUTO, kstack_cache_size,
298     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &kstack_cache_size, 0,
299     sysctl_kstack_cache_size, "IU", "Maximum number of cached kernel stacks");
300 
301 /*
302  *	Allocate a virtual address range from a domain kstack arena, following
303  *	the specified NUMA policy.
304  */
305 static vm_offset_t
vm_thread_alloc_kstack_kva(vm_size_t size,int domain)306 vm_thread_alloc_kstack_kva(vm_size_t size, int domain)
307 {
308 #ifndef __ILP32__
309 	int rv;
310 	vmem_t *arena;
311 	vm_offset_t addr = 0;
312 
313 	size = round_page(size);
314 	/* Allocate from the kernel arena for non-standard kstack sizes. */
315 	if (size != ptoa(kstack_pages + KSTACK_GUARD_PAGES)) {
316 		arena = vm_dom[domain].vmd_kernel_arena;
317 	} else {
318 		arena = vmd_kstack_arena[domain];
319 	}
320 	rv = vmem_alloc(arena, size, M_BESTFIT | M_NOWAIT, &addr);
321 	if (rv == ENOMEM)
322 		return (0);
323 	KASSERT(atop(addr - VM_MIN_KERNEL_ADDRESS) %
324 	    (kstack_pages + KSTACK_GUARD_PAGES) == 0,
325 	    ("%s: allocated kstack KVA not aligned to multiple of kstack size",
326 	    __func__));
327 
328 	return (addr);
329 #else
330 	return (kva_alloc(size));
331 #endif
332 }
333 
334 /*
335  *	Release a region of kernel virtual memory
336  *	allocated from the kstack arena.
337  */
338 static __noinline void
vm_thread_free_kstack_kva(vm_offset_t addr,vm_size_t size,int domain)339 vm_thread_free_kstack_kva(vm_offset_t addr, vm_size_t size, int domain)
340 {
341 	vmem_t *arena;
342 
343 	size = round_page(size);
344 #ifdef __ILP32__
345 	arena = kernel_arena;
346 #else
347 	arena = vmd_kstack_arena[domain];
348 	if (size != ptoa(kstack_pages + KSTACK_GUARD_PAGES)) {
349 		arena = vm_dom[domain].vmd_kernel_arena;
350 	}
351 #endif
352 	vmem_free(arena, addr, size);
353 }
354 
355 static vmem_size_t
vm_thread_kstack_import_quantum(void)356 vm_thread_kstack_import_quantum(void)
357 {
358 #ifndef __ILP32__
359 	/*
360 	 * The kstack_quantum is larger than KVA_QUANTUM to account
361 	 * for holes induced by guard pages.
362 	 */
363 	return (KVA_KSTACK_QUANTUM * (kstack_pages + KSTACK_GUARD_PAGES));
364 #else
365 	return (KVA_KSTACK_QUANTUM);
366 #endif
367 }
368 
369 /*
370  * Import KVA from a parent arena into the kstack arena. Imports must be
371  * a multiple of kernel stack pages + guard pages in size.
372  *
373  * Kstack VA allocations need to be aligned so that the linear KVA pindex
374  * is divisible by the total number of kstack VA pages. This is necessary to
375  * make vm_kstack_pindex work properly.
376  *
377  * We import a multiple of KVA_KSTACK_QUANTUM-sized region from the parent
378  * arena. The actual size used by the kstack arena is one kstack smaller to
379  * allow for the necessary alignment adjustments to be made.
380  */
381 static int
vm_thread_kstack_arena_import(void * arena,vmem_size_t size,int flags,vmem_addr_t * addrp)382 vm_thread_kstack_arena_import(void *arena, vmem_size_t size, int flags,
383     vmem_addr_t *addrp)
384 {
385 	int error, rem;
386 	size_t kpages = kstack_pages + KSTACK_GUARD_PAGES;
387 
388 	KASSERT(atop(size) % kpages == 0,
389 	    ("%s: Size %jd is not a multiple of kstack pages (%d)", __func__,
390 	    (intmax_t)size, (int)kpages));
391 
392 	error = vmem_xalloc(arena, vm_thread_kstack_import_quantum(),
393 	    KVA_KSTACK_QUANTUM, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags,
394 	    addrp);
395 	if (error) {
396 		return (error);
397 	}
398 
399 	rem = atop(*addrp - VM_MIN_KERNEL_ADDRESS) % kpages;
400 	if (rem != 0) {
401 		/* Bump addr to next aligned address */
402 		*addrp = *addrp + (kpages - rem) * PAGE_SIZE;
403 	}
404 
405 	return (0);
406 }
407 
408 /*
409  * Release KVA from a parent arena into the kstack arena. Released imports must
410  * be a multiple of kernel stack pages + guard pages in size.
411  */
412 static void
vm_thread_kstack_arena_release(void * arena,vmem_addr_t addr,vmem_size_t size)413 vm_thread_kstack_arena_release(void *arena, vmem_addr_t addr, vmem_size_t size)
414 {
415 	int rem;
416 	size_t kpages __diagused = kstack_pages + KSTACK_GUARD_PAGES;
417 
418 	KASSERT(size % kpages == 0,
419 	    ("%s: Size %jd is not a multiple of kstack pages (%d)", __func__,
420 	    (intmax_t)size, (int)kpages));
421 
422 	KASSERT((addr - VM_MIN_KERNEL_ADDRESS) % kpages == 0,
423 	    ("%s: Address %p is not properly aligned (%p)", __func__,
424 		(void *)addr, (void *)VM_MIN_KERNEL_ADDRESS));
425 	/*
426 	 * If the address is not KVA_KSTACK_QUANTUM-aligned we have to decrement
427 	 * it to account for the shift in kva_import_kstack.
428 	 */
429 	rem = addr % KVA_KSTACK_QUANTUM;
430 	if (rem) {
431 		KASSERT(rem <= ptoa(kpages),
432 		    ("%s: rem > kpages (%d), (%d)", __func__, rem,
433 			(int)kpages));
434 		addr -= rem;
435 	}
436 	vmem_xfree(arena, addr, vm_thread_kstack_import_quantum());
437 }
438 
439 /*
440  * Create the kernel stack for a new thread.
441  */
442 static vm_offset_t
vm_thread_stack_create(struct domainset * ds,int pages)443 vm_thread_stack_create(struct domainset *ds, int pages)
444 {
445 	vm_page_t ma[KSTACK_MAX_PAGES];
446 	struct vm_domainset_iter di;
447 	int req = VM_ALLOC_NORMAL;
448 	vm_object_t obj;
449 	vm_offset_t ks;
450 	int domain, i;
451 
452 	obj = vm_thread_kstack_size_to_obj(pages);
453 	if (vm_ndomains > 1)
454 		obj->domain.dr_policy = ds;
455 	vm_domainset_iter_page_init(&di, obj, 0, &domain, &req);
456 	do {
457 		/*
458 		 * Get a kernel virtual address for this thread's kstack.
459 		 */
460 		ks = vm_thread_alloc_kstack_kva(ptoa(pages + KSTACK_GUARD_PAGES),
461 		    domain);
462 		if (ks == 0)
463 			continue;
464 		ks += ptoa(KSTACK_GUARD_PAGES);
465 
466 		/*
467 		 * Allocate physical pages to back the stack.
468 		 */
469 		if (vm_thread_stack_back(ks, ma, pages, req, domain) != 0) {
470 			vm_thread_free_kstack_kva(ks - ptoa(KSTACK_GUARD_PAGES),
471 			    ptoa(pages + KSTACK_GUARD_PAGES), domain);
472 			continue;
473 		}
474 		if (KSTACK_GUARD_PAGES != 0) {
475 			pmap_qremove(ks - ptoa(KSTACK_GUARD_PAGES),
476 			    KSTACK_GUARD_PAGES);
477 		}
478 		for (i = 0; i < pages; i++)
479 			vm_page_valid(ma[i]);
480 		pmap_qenter(ks, ma, pages);
481 		return (ks);
482 	} while (vm_domainset_iter_page(&di, obj, &domain) == 0);
483 
484 	return (0);
485 }
486 
487 static __noinline void
vm_thread_stack_dispose(vm_offset_t ks,int pages)488 vm_thread_stack_dispose(vm_offset_t ks, int pages)
489 {
490 	vm_page_t m;
491 	vm_pindex_t pindex;
492 	int i, domain;
493 	vm_object_t obj = vm_thread_kstack_size_to_obj(pages);
494 
495 	pindex = vm_kstack_pindex(ks, pages);
496 	domain = vm_phys_domain(vtophys(ks));
497 	pmap_qremove(ks, pages);
498 	VM_OBJECT_WLOCK(obj);
499 	for (i = 0; i < pages; i++) {
500 		m = vm_page_lookup(obj, pindex + i);
501 		if (m == NULL)
502 			panic("%s: kstack already missing?", __func__);
503 		KASSERT(vm_page_domain(m) == domain,
504 		    ("%s: page %p domain mismatch, expected %d got %d",
505 		    __func__, m, domain, vm_page_domain(m)));
506 		vm_page_xbusy_claim(m);
507 		vm_page_unwire_noq(m);
508 		vm_page_free(m);
509 	}
510 	VM_OBJECT_WUNLOCK(obj);
511 	kasan_mark((void *)ks, ptoa(pages), ptoa(pages), 0);
512 	vm_thread_free_kstack_kva(ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
513 	    ptoa(pages + KSTACK_GUARD_PAGES), domain);
514 }
515 
516 /*
517  * Allocate the kernel stack for a new thread.
518  */
519 int
vm_thread_new(struct thread * td,int pages)520 vm_thread_new(struct thread *td, int pages)
521 {
522 	vm_offset_t ks;
523 	u_short ks_domain;
524 
525 	/* Bounds check */
526 	if (pages <= 1)
527 		pages = kstack_pages;
528 	else if (pages > KSTACK_MAX_PAGES)
529 		pages = KSTACK_MAX_PAGES;
530 
531 	ks = 0;
532 	if (pages == kstack_pages && kstack_cache != NULL)
533 		ks = (vm_offset_t)uma_zalloc(kstack_cache, M_NOWAIT);
534 
535 	/*
536 	 * Ensure that kstack objects can draw pages from any memory
537 	 * domain.  Otherwise a local memory shortage can block a process
538 	 * swap-in.
539 	 */
540 	if (ks == 0)
541 		ks = vm_thread_stack_create(DOMAINSET_PREF(PCPU_GET(domain)),
542 		    pages);
543 	if (ks == 0)
544 		return (0);
545 
546 	ks_domain = vm_phys_domain(vtophys(ks));
547 	KASSERT(ks_domain >= 0 && ks_domain < vm_ndomains,
548 	    ("%s: invalid domain for kstack %p", __func__, (void *)ks));
549 	td->td_kstack = ks;
550 	td->td_kstack_pages = pages;
551 	td->td_kstack_domain = ks_domain;
552 	return (1);
553 }
554 
555 /*
556  * Dispose of a thread's kernel stack.
557  */
558 void
vm_thread_dispose(struct thread * td)559 vm_thread_dispose(struct thread *td)
560 {
561 	vm_offset_t ks;
562 	int pages;
563 
564 	pages = td->td_kstack_pages;
565 	ks = td->td_kstack;
566 	td->td_kstack = 0;
567 	td->td_kstack_pages = 0;
568 	td->td_kstack_domain = MAXMEMDOM;
569 	if (pages == kstack_pages) {
570 		kasan_mark((void *)ks, 0, ptoa(pages), KASAN_KSTACK_FREED);
571 		uma_zfree(kstack_cache, (void *)ks);
572 	} else {
573 		vm_thread_stack_dispose(ks, pages);
574 	}
575 }
576 
577 /*
578  * Calculate kstack pindex.
579  *
580  * Uses a non-identity mapping if guard pages are
581  * active to avoid pindex holes in the kstack object.
582  */
583 static vm_pindex_t
vm_kstack_pindex(vm_offset_t ks,int kpages)584 vm_kstack_pindex(vm_offset_t ks, int kpages)
585 {
586 	vm_pindex_t pindex = atop(ks - VM_MIN_KERNEL_ADDRESS);
587 
588 #ifdef __ILP32__
589 	return (pindex);
590 #else
591 	/*
592 	 * Return the linear pindex if guard pages aren't active or if we are
593 	 * allocating a non-standard kstack size.
594 	 */
595 	if (KSTACK_GUARD_PAGES == 0 || kpages != kstack_pages) {
596 		return (pindex);
597 	}
598 	KASSERT(pindex % (kpages + KSTACK_GUARD_PAGES) >= KSTACK_GUARD_PAGES,
599 	    ("%s: Attempting to calculate kstack guard page pindex", __func__));
600 
601 	return (pindex -
602 	    (pindex / (kpages + KSTACK_GUARD_PAGES) + 1) * KSTACK_GUARD_PAGES);
603 #endif
604 }
605 
606 /*
607  * Allocate physical pages, following the specified NUMA policy, to back a
608  * kernel stack.
609  */
610 static int
vm_thread_stack_back(vm_offset_t ks,vm_page_t ma[],int npages,int req_class,int domain)611 vm_thread_stack_back(vm_offset_t ks, vm_page_t ma[], int npages, int req_class,
612     int domain)
613 {
614 	vm_object_t obj = vm_thread_kstack_size_to_obj(npages);
615 	vm_pindex_t pindex;
616 	vm_page_t m;
617 	int n;
618 
619 	pindex = vm_kstack_pindex(ks, npages);
620 
621 	VM_OBJECT_WLOCK(obj);
622 	for (n = 0; n < npages;) {
623 		m = vm_page_grab(obj, pindex + n,
624 		    VM_ALLOC_NOCREAT | VM_ALLOC_WIRED);
625 		if (m == NULL) {
626 			m = n > 0 ? ma[n - 1] : vm_page_mpred(obj, pindex);
627 			m = vm_page_alloc_domain_after(obj, pindex + n, domain,
628 			    req_class | VM_ALLOC_WIRED, m);
629 		}
630 		if (m == NULL)
631 			break;
632 		ma[n++] = m;
633 	}
634 	if (n < npages)
635 		goto cleanup;
636 	VM_OBJECT_WUNLOCK(obj);
637 
638 	return (0);
639 cleanup:
640 	for (int i = 0; i < n; i++) {
641 		m = ma[i];
642 		(void)vm_page_unwire_noq(m);
643 		vm_page_free(m);
644 	}
645 	VM_OBJECT_WUNLOCK(obj);
646 
647 	return (ENOMEM);
648 }
649 
650 static vm_object_t
vm_thread_kstack_size_to_obj(int npages)651 vm_thread_kstack_size_to_obj(int npages)
652 {
653 	return (npages == kstack_pages ? kstack_object : kstack_alt_object);
654 }
655 
656 static int
kstack_import(void * arg,void ** store,int cnt,int domain,int flags)657 kstack_import(void *arg, void **store, int cnt, int domain, int flags)
658 {
659 	struct domainset *ds;
660 	int i;
661 
662 	if (domain == UMA_ANYDOMAIN)
663 		ds = DOMAINSET_RR();
664 	else
665 		ds = DOMAINSET_PREF(domain);
666 
667 	for (i = 0; i < cnt; i++) {
668 		store[i] = (void *)vm_thread_stack_create(ds, kstack_pages);
669 		if (store[i] == NULL)
670 			break;
671 	}
672 	return (i);
673 }
674 
675 static void
kstack_release(void * arg,void ** store,int cnt)676 kstack_release(void *arg, void **store, int cnt)
677 {
678 	vm_offset_t ks;
679 	int i;
680 
681 	for (i = 0; i < cnt; i++) {
682 		ks = (vm_offset_t)store[i];
683 		vm_thread_stack_dispose(ks, kstack_pages);
684 	}
685 }
686 
687 static void
kstack_cache_init(void * null)688 kstack_cache_init(void *null)
689 {
690 	vm_size_t kstack_quantum;
691 	int domain;
692 
693 	kstack_object = vm_object_allocate(OBJT_PHYS,
694 	    atop(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS));
695 	kstack_cache = uma_zcache_create("kstack_cache",
696 	    kstack_pages * PAGE_SIZE, NULL, NULL, NULL, NULL,
697 	    kstack_import, kstack_release, NULL,
698 	    UMA_ZONE_FIRSTTOUCH);
699 	kstack_cache_size = imax(128, mp_ncpus * 4);
700 	uma_zone_set_maxcache(kstack_cache, kstack_cache_size);
701 
702 	kstack_alt_object = vm_object_allocate(OBJT_PHYS,
703 	    atop(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS));
704 
705 	kstack_quantum = vm_thread_kstack_import_quantum();
706 	/*
707 	 * Reduce size used by the kstack arena to allow for
708 	 * alignment adjustments in vm_thread_kstack_arena_import.
709 	 */
710 	kstack_quantum -= (kstack_pages + KSTACK_GUARD_PAGES) * PAGE_SIZE;
711 	/*
712 	 * Create the kstack_arena for each domain and set kernel_arena as
713 	 * parent.
714 	 */
715 	for (domain = 0; domain < vm_ndomains; domain++) {
716 		vmd_kstack_arena[domain] = vmem_create("kstack arena", 0, 0,
717 		    PAGE_SIZE, 0, M_WAITOK);
718 		KASSERT(vmd_kstack_arena[domain] != NULL,
719 		    ("%s: failed to create domain %d kstack_arena", __func__,
720 		    domain));
721 		vmem_set_import(vmd_kstack_arena[domain],
722 		    vm_thread_kstack_arena_import,
723 		    vm_thread_kstack_arena_release,
724 		    vm_dom[domain].vmd_kernel_arena, kstack_quantum);
725 	}
726 }
727 SYSINIT(vm_kstacks, SI_SUB_KMEM, SI_ORDER_ANY, kstack_cache_init, NULL);
728 
729 #ifdef KSTACK_USAGE_PROF
730 /*
731  * Track maximum stack used by a thread in kernel.
732  */
733 static int max_kstack_used;
734 
735 SYSCTL_INT(_debug, OID_AUTO, max_kstack_used, CTLFLAG_RD,
736     &max_kstack_used, 0,
737     "Maximum stack depth used by a thread in kernel");
738 
739 void
intr_prof_stack_use(struct thread * td,struct trapframe * frame)740 intr_prof_stack_use(struct thread *td, struct trapframe *frame)
741 {
742 	vm_offset_t stack_top;
743 	vm_offset_t current;
744 	int used, prev_used;
745 
746 	/*
747 	 * Testing for interrupted kernel mode isn't strictly
748 	 * needed. It optimizes the execution, since interrupts from
749 	 * usermode will have only the trap frame on the stack.
750 	 */
751 	if (TRAPF_USERMODE(frame))
752 		return;
753 
754 	stack_top = td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
755 	current = (vm_offset_t)(uintptr_t)&stack_top;
756 
757 	/*
758 	 * Try to detect if interrupt is using kernel thread stack.
759 	 * Hardware could use a dedicated stack for interrupt handling.
760 	 */
761 	if (stack_top <= current || current < td->td_kstack)
762 		return;
763 
764 	used = stack_top - current;
765 	for (;;) {
766 		prev_used = max_kstack_used;
767 		if (prev_used >= used)
768 			break;
769 		if (atomic_cmpset_int(&max_kstack_used, prev_used, used))
770 			break;
771 	}
772 }
773 #endif /* KSTACK_USAGE_PROF */
774 
775 /*
776  * Implement fork's actions on an address space.
777  * Here we arrange for the address space to be copied or referenced,
778  * allocate a user struct (pcb and kernel stack), then call the
779  * machine-dependent layer to fill those in and make the new process
780  * ready to run.  The new process is set up so that it returns directly
781  * to user mode to avoid stack copying and relocation problems.
782  */
783 int
vm_forkproc(struct thread * td,struct proc * p2,struct thread * td2,struct vmspace * vm2,int flags)784 vm_forkproc(struct thread *td, struct proc *p2, struct thread *td2,
785     struct vmspace *vm2, int flags)
786 {
787 	struct proc *p1 = td->td_proc;
788 	struct domainset *dset;
789 	int error;
790 
791 	if ((flags & RFPROC) == 0) {
792 		/*
793 		 * Divorce the memory, if it is shared, essentially
794 		 * this changes shared memory amongst threads, into
795 		 * COW locally.
796 		 */
797 		if ((flags & RFMEM) == 0) {
798 			error = vmspace_unshare(p1);
799 			if (error)
800 				return (error);
801 		}
802 		cpu_fork(td, p2, td2, flags);
803 		return (0);
804 	}
805 
806 	if (flags & RFMEM) {
807 		p2->p_vmspace = p1->p_vmspace;
808 		refcount_acquire(&p1->p_vmspace->vm_refcnt);
809 	}
810 	dset = td2->td_domain.dr_policy;
811 	while (vm_page_count_severe_set(&dset->ds_mask)) {
812 		vm_wait_doms(&dset->ds_mask, 0);
813 	}
814 
815 	if ((flags & RFMEM) == 0) {
816 		p2->p_vmspace = vm2;
817 		if (p1->p_vmspace->vm_shm)
818 			shmfork(p1, p2);
819 	}
820 
821 	/*
822 	 * cpu_fork will copy and update the pcb, set up the kernel stack,
823 	 * and make the child ready to run.
824 	 */
825 	cpu_fork(td, p2, td2, flags);
826 	return (0);
827 }
828 
829 /*
830  * Called after process has been wait(2)'ed upon and is being reaped.
831  * The idea is to reclaim resources that we could not reclaim while
832  * the process was still executing.
833  */
834 void
vm_waitproc(struct proc * p)835 vm_waitproc(struct proc *p)
836 {
837 
838 	vmspace_exitfree(p);		/* and clean-out the vmspace */
839 }
840