xref: /freebsd/sys/vm/vm_kern.c (revision cd2512eaab5cd402a0177a8078b9234b215b39bd)
1 /*-
2  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60 
61 /*
62  *	Kernel memory management.
63  */
64 
65 #include <sys/cdefs.h>
66 #include "opt_vm.h"
67 
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/asan.h>
71 #include <sys/domainset.h>
72 #include <sys/eventhandler.h>
73 #include <sys/kernel.h>
74 #include <sys/lock.h>
75 #include <sys/malloc.h>
76 #include <sys/msan.h>
77 #include <sys/proc.h>
78 #include <sys/rwlock.h>
79 #include <sys/smp.h>
80 #include <sys/sysctl.h>
81 #include <sys/vmem.h>
82 #include <sys/vmmeter.h>
83 
84 #include <vm/vm.h>
85 #include <vm/vm_param.h>
86 #include <vm/vm_domainset.h>
87 #include <vm/vm_kern.h>
88 #include <vm/pmap.h>
89 #include <vm/vm_map.h>
90 #include <vm/vm_object.h>
91 #include <vm/vm_page.h>
92 #include <vm/vm_pageout.h>
93 #include <vm/vm_pagequeue.h>
94 #include <vm/vm_phys.h>
95 #include <vm/vm_radix.h>
96 #include <vm/vm_extern.h>
97 #include <vm/uma.h>
98 
99 struct vm_map kernel_map_store;
100 struct vm_map exec_map_store;
101 struct vm_map pipe_map_store;
102 
103 const void *zero_region;
104 CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0);
105 
106 /* NB: Used by kernel debuggers. */
107 const u_long vm_maxuser_address = VM_MAXUSER_ADDRESS;
108 
109 u_int exec_map_entry_size;
110 u_int exec_map_guard_pages;
111 u_int exec_map_entries;
112 
113 SYSCTL_ULONG(_vm, OID_AUTO, min_kernel_address, CTLFLAG_RD,
114 #if defined(__amd64__)
115     &kva_layout.km_low, 0,
116 #else
117     SYSCTL_NULL_ULONG_PTR, VM_MIN_KERNEL_ADDRESS,
118 #endif
119     "Min kernel address");
120 
121 SYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD,
122 #if defined(__arm__)
123     &vm_max_kernel_address, 0,
124 #elif defined(__amd64__)
125     &kva_layout.km_high, 0,
126 #else
127     SYSCTL_NULL_ULONG_PTR, VM_MAX_KERNEL_ADDRESS,
128 #endif
129     "Max kernel address");
130 
131 #if VM_NRESERVLEVEL > 1
132 #define	KVA_QUANTUM_SHIFT	(VM_LEVEL_1_ORDER + VM_LEVEL_0_ORDER + \
133     PAGE_SHIFT)
134 #elif VM_NRESERVLEVEL > 0
135 #define	KVA_QUANTUM_SHIFT	(VM_LEVEL_0_ORDER + PAGE_SHIFT)
136 #else
137 /* On non-superpage architectures we want large import sizes. */
138 #define	KVA_QUANTUM_SHIFT	(8 + PAGE_SHIFT)
139 #endif
140 #define	KVA_QUANTUM		(1ul << KVA_QUANTUM_SHIFT)
141 #define	KVA_NUMA_IMPORT_QUANTUM	(KVA_QUANTUM * 128)
142 
143 extern void     uma_startup2(void);
144 
145 /*
146  *	kva_alloc:
147  *
148  *	Allocate a virtual address range with no underlying object and
149  *	no initial mapping to physical memory.  Any mapping from this
150  *	range to physical memory must be explicitly created prior to
151  *	its use, typically with pmap_qenter().  Any attempt to create
152  *	a mapping on demand through vm_fault() will result in a panic.
153  */
154 void *
kva_alloc(vm_size_t size)155 kva_alloc(vm_size_t size)
156 {
157 	vmem_addr_t addr;
158 
159 	TSENTER();
160 	size = round_page(size);
161 	if (vmem_xalloc(kernel_arena, size, 0, 0, 0, VMEM_ADDR_MIN,
162 	    VMEM_ADDR_MAX, M_BESTFIT | M_NOWAIT, &addr))
163 		return (0);
164 	TSEXIT();
165 
166 	return ((void *)addr);
167 }
168 
169 /*
170  *	kva_alloc_aligned:
171  *
172  *	Allocate a virtual address range as in kva_alloc where the base
173  *	address is aligned to align.
174  */
175 void *
kva_alloc_aligned(vm_size_t size,vm_size_t align)176 kva_alloc_aligned(vm_size_t size, vm_size_t align)
177 {
178 	vmem_addr_t addr;
179 
180 	TSENTER();
181 	size = round_page(size);
182 	if (vmem_xalloc(kernel_arena, size, align, 0, 0, VMEM_ADDR_MIN,
183 	    VMEM_ADDR_MAX, M_BESTFIT | M_NOWAIT, &addr))
184 		return (0);
185 	TSEXIT();
186 
187 	return ((void *)addr);
188 }
189 
190 /*
191  *	kva_free:
192  *
193  *	Release a region of kernel virtual memory allocated
194  *	with kva_alloc, and return the physical pages
195  *	associated with that region.
196  *
197  *	This routine may not block on kernel maps.
198  */
199 void
kva_free(void * addr,vm_size_t size)200 kva_free(void *addr, vm_size_t size)
201 {
202 
203 	size = round_page(size);
204 	vmem_xfree(kernel_arena, (uintptr_t)addr, size);
205 }
206 
207 /*
208  * Update sanitizer shadow state to reflect a new allocation.  Force inlining to
209  * help make KMSAN origin tracking more precise.
210  */
211 static __always_inline void
kmem_alloc_san(vm_offset_t addr,vm_size_t size,vm_size_t asize,int flags)212 kmem_alloc_san(vm_offset_t addr, vm_size_t size, vm_size_t asize, int flags)
213 {
214 	if ((flags & M_ZERO) == 0) {
215 		kmsan_mark((void *)addr, asize, KMSAN_STATE_UNINIT);
216 		kmsan_orig((void *)addr, asize, KMSAN_TYPE_KMEM,
217 		    KMSAN_RET_ADDR);
218 	} else {
219 		kmsan_mark((void *)addr, asize, KMSAN_STATE_INITED);
220 	}
221 	kasan_mark((void *)addr, size, asize, KASAN_KMEM_REDZONE);
222 }
223 
224 static vm_page_t
kmem_alloc_contig_pages(vm_object_t object,vm_pindex_t pindex,int domain,int pflags,u_long npages,vm_paddr_t low,vm_paddr_t high,u_long alignment,vm_paddr_t boundary,vm_memattr_t memattr)225 kmem_alloc_contig_pages(vm_object_t object, vm_pindex_t pindex, int domain,
226     int pflags, u_long npages, vm_paddr_t low, vm_paddr_t high,
227     u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
228 {
229 	vm_page_t m;
230 	int tries;
231 	bool wait, reclaim;
232 
233 	VM_OBJECT_ASSERT_WLOCKED(object);
234 
235 	wait = (pflags & VM_ALLOC_WAITOK) != 0;
236 	reclaim = (pflags & VM_ALLOC_NORECLAIM) == 0;
237 	pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
238 	pflags |= VM_ALLOC_NOWAIT;
239 	for (tries = wait ? 3 : 1;; tries--) {
240 		m = vm_page_alloc_contig_domain(object, pindex, domain, pflags,
241 		    npages, low, high, alignment, boundary, memattr);
242 		if (m != NULL || tries == 0 || !reclaim)
243 			break;
244 
245 		VM_OBJECT_WUNLOCK(object);
246 		if (vm_page_reclaim_contig_domain(domain, pflags, npages,
247 		    low, high, alignment, boundary) == ENOMEM && wait)
248 			vm_wait_domain(domain);
249 		VM_OBJECT_WLOCK(object);
250 	}
251 	return (m);
252 }
253 
254 /*
255  *	Allocates a region from the kernel address map and physical pages
256  *	within the specified address range to the kernel object.  Creates a
257  *	wired mapping from this region to these pages, and returns the
258  *	region's starting virtual address.  The allocated pages are not
259  *	necessarily physically contiguous.  If M_ZERO is specified through the
260  *	given flags, then the pages are zeroed before they are mapped.
261  */
262 static void *
kmem_alloc_attr_domain(int domain,vm_size_t size,int flags,vm_paddr_t low,vm_paddr_t high,vm_memattr_t memattr)263 kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
264     vm_paddr_t high, vm_memattr_t memattr)
265 {
266 	vmem_t *vmem;
267 	vm_object_t object;
268 	vm_offset_t addr, i, offset;
269 	vm_page_t m;
270 	vm_size_t asize;
271 	int pflags;
272 	vm_prot_t prot;
273 	u_int pmap_enter_flags;
274 
275 	object = kernel_object;
276 	asize = round_page(size);
277 	vmem = vm_dom[domain].vmd_kernel_arena;
278 	if (vmem_alloc(vmem, asize, M_BESTFIT | flags, &addr))
279 		return (0);
280 	offset = addr - VM_MIN_KERNEL_ADDRESS;
281 	pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
282 	prot = (flags & M_EXEC) != 0 ? VM_PROT_ALL : VM_PROT_RW;
283 
284 	pmap_enter_flags = prot | PMAP_ENTER_WIRED;
285 	if ((flags & M_UNPROTECTED) != 0)
286 		pmap_enter_flags |= PMAP_ENTER_UNPROTECTED;
287 
288 	VM_OBJECT_WLOCK(object);
289 	for (i = 0; i < asize; i += PAGE_SIZE) {
290 		m = kmem_alloc_contig_pages(object, atop(offset + i),
291 		    domain, pflags, 1, low, high, PAGE_SIZE, 0, memattr);
292 		if (m == NULL) {
293 			VM_OBJECT_WUNLOCK(object);
294 			kmem_unback(object, addr, i);
295 			vmem_free(vmem, addr, asize);
296 			return (0);
297 		}
298 		KASSERT(vm_page_domain(m) == domain,
299 		    ("kmem_alloc_attr_domain: Domain mismatch %d != %d",
300 		    vm_page_domain(m), domain));
301 		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
302 			pmap_zero_page(m);
303 		vm_page_valid(m);
304 		pmap_enter(kernel_pmap, addr + i, m, prot,
305 		    pmap_enter_flags, 0);
306 	}
307 	VM_OBJECT_WUNLOCK(object);
308 	kmem_alloc_san(addr, size, asize, flags);
309 	return ((void *)addr);
310 }
311 
312 void *
kmem_alloc_attr(vm_size_t size,int flags,vm_paddr_t low,vm_paddr_t high,vm_memattr_t memattr)313 kmem_alloc_attr(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high,
314     vm_memattr_t memattr)
315 {
316 
317 	return (kmem_alloc_attr_domainset(DOMAINSET_RR(), size, flags, low,
318 	    high, memattr));
319 }
320 
321 void *
kmem_alloc_attr_domainset(struct domainset * ds,vm_size_t size,int flags,vm_paddr_t low,vm_paddr_t high,vm_memattr_t memattr)322 kmem_alloc_attr_domainset(struct domainset *ds, vm_size_t size, int flags,
323     vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr)
324 {
325 	struct vm_domainset_iter di;
326 	vm_page_t bounds[2];
327 	void *addr;
328 	int domain;
329 	int start_segind;
330 
331 	start_segind = -1;
332 
333 	if (vm_domainset_iter_policy_init(&di, ds, &domain, &flags) != 0)
334 		return (NULL);
335 
336 	do {
337 		addr = kmem_alloc_attr_domain(domain, size, flags, low, high,
338 		    memattr);
339 		if (addr != NULL)
340 			break;
341 		if (start_segind == -1)
342 			start_segind = vm_phys_lookup_segind(low);
343 		if (vm_phys_find_range(bounds, start_segind, domain,
344 		    atop(round_page(size)), low, high) == -1) {
345 			vm_domainset_iter_ignore(&di, domain);
346 		}
347 	} while (vm_domainset_iter_policy(&di, &domain) == 0);
348 
349 	return (addr);
350 }
351 
352 /*
353  *	Allocates a region from the kernel address map and physically
354  *	contiguous pages within the specified address range to the kernel
355  *	object.  Creates a wired mapping from this region to these pages, and
356  *	returns the region's starting virtual address.  If M_ZERO is specified
357  *	through the given flags, then the pages are zeroed before they are
358  *	mapped.
359  */
360 static void *
kmem_alloc_contig_domain(int domain,vm_size_t size,int flags,vm_paddr_t low,vm_paddr_t high,u_long alignment,vm_paddr_t boundary,vm_memattr_t memattr)361 kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
362     vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
363     vm_memattr_t memattr)
364 {
365 	vmem_t *vmem;
366 	vm_object_t object;
367 	vm_offset_t addr, offset, tmp;
368 	vm_page_t end_m, m;
369 	vm_size_t asize;
370 	u_long npages;
371 	int pflags;
372 	u_int pmap_enter_flags;
373 
374 	object = kernel_object;
375 	asize = round_page(size);
376 	vmem = vm_dom[domain].vmd_kernel_arena;
377 	if (vmem_alloc(vmem, asize, flags | M_BESTFIT, &addr))
378 		return (NULL);
379 	offset = addr - VM_MIN_KERNEL_ADDRESS;
380 	pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
381 	npages = atop(asize);
382 	VM_OBJECT_WLOCK(object);
383 	m = kmem_alloc_contig_pages(object, atop(offset), domain,
384 	    pflags, npages, low, high, alignment, boundary, memattr);
385 	if (m == NULL) {
386 		VM_OBJECT_WUNLOCK(object);
387 		vmem_free(vmem, addr, asize);
388 		return (NULL);
389 	}
390 	KASSERT(vm_page_domain(m) == domain,
391 	    ("kmem_alloc_contig_domain: Domain mismatch %d != %d",
392 	    vm_page_domain(m), domain));
393 	end_m = m + npages;
394 	tmp = addr;
395 
396 	pmap_enter_flags = VM_PROT_RW | PMAP_ENTER_WIRED;
397 	if ((flags & M_UNPROTECTED) != 0)
398                 pmap_enter_flags |= PMAP_ENTER_UNPROTECTED;
399 
400 	for (; m < end_m; m++) {
401 		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
402 			pmap_zero_page(m);
403 		vm_page_valid(m);
404 		pmap_enter(kernel_pmap, tmp, m, VM_PROT_RW,
405 		    pmap_enter_flags, 0);
406 		tmp += PAGE_SIZE;
407 	}
408 	VM_OBJECT_WUNLOCK(object);
409 	kmem_alloc_san(addr, size, asize, flags);
410 	return ((void *)addr);
411 }
412 
413 void *
kmem_alloc_contig(vm_size_t size,int flags,vm_paddr_t low,vm_paddr_t high,u_long alignment,vm_paddr_t boundary,vm_memattr_t memattr)414 kmem_alloc_contig(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high,
415     u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
416 {
417 
418 	return (kmem_alloc_contig_domainset(DOMAINSET_RR(), size, flags, low,
419 	    high, alignment, boundary, memattr));
420 }
421 
422 void *
kmem_alloc_contig_domainset(struct domainset * ds,vm_size_t size,int flags,vm_paddr_t low,vm_paddr_t high,u_long alignment,vm_paddr_t boundary,vm_memattr_t memattr)423 kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size, int flags,
424     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
425     vm_memattr_t memattr)
426 {
427 	struct vm_domainset_iter di;
428 	vm_page_t bounds[2];
429 	void *addr;
430 	int domain;
431 	int start_segind;
432 
433 	start_segind = -1;
434 
435 	if (vm_domainset_iter_policy_init(&di, ds, &domain, &flags))
436 		return (NULL);
437 
438 	do {
439 		addr = kmem_alloc_contig_domain(domain, size, flags, low, high,
440 		    alignment, boundary, memattr);
441 		if (addr != NULL)
442 			break;
443 		if (start_segind == -1)
444 			start_segind = vm_phys_lookup_segind(low);
445 		if (vm_phys_find_range(bounds, start_segind, domain,
446 		    atop(round_page(size)), low, high) == -1) {
447 			vm_domainset_iter_ignore(&di, domain);
448 		}
449 	} while (vm_domainset_iter_policy(&di, &domain) == 0);
450 
451 	return (addr);
452 }
453 
454 /*
455  *	kmem_subinit:
456  *
457  *	Initializes a map to manage a subrange
458  *	of the kernel virtual address space.
459  *
460  *	Arguments are as follows:
461  *
462  *	parent		Map to take range from
463  *	min, max	Returned endpoints of map
464  *	size		Size of range to find
465  *	superpage_align	Request that min is superpage aligned
466  */
467 void
kmem_subinit(vm_map_t map,vm_map_t parent,vm_offset_t * min,vm_offset_t * max,vm_size_t size,bool superpage_align)468 kmem_subinit(vm_map_t map, vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
469     vm_size_t size, bool superpage_align)
470 {
471 	int ret;
472 
473 	size = round_page(size);
474 
475 	*min = vm_map_min(parent);
476 	ret = vm_map_find(parent, NULL, 0, min, size, 0, superpage_align ?
477 	    VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
478 	    MAP_ACC_NO_CHARGE);
479 	if (ret != KERN_SUCCESS)
480 		panic("kmem_subinit: bad status return of %d", ret);
481 	*max = *min + size;
482 	vm_map_init(map, vm_map_pmap(parent), *min, *max);
483 	if (vm_map_submap(parent, *min, *max, map) != KERN_SUCCESS)
484 		panic("kmem_subinit: unable to change range to submap");
485 }
486 
487 /*
488  *	kmem_malloc_domain:
489  *
490  *	Allocate wired-down pages in the kernel's address space.
491  */
492 static void *
kmem_malloc_domain(int domain,vm_size_t size,int flags)493 kmem_malloc_domain(int domain, vm_size_t size, int flags)
494 {
495 	vmem_t *arena;
496 	vm_offset_t addr;
497 	vm_size_t asize;
498 	int rv;
499 
500 	if (__predict_true((flags & (M_EXEC | M_NEVERFREED)) == 0))
501 		arena = vm_dom[domain].vmd_kernel_arena;
502 	else if ((flags & M_EXEC) != 0)
503 		arena = vm_dom[domain].vmd_kernel_rwx_arena;
504 	else
505 		arena = vm_dom[domain].vmd_kernel_nofree_arena;
506 	asize = round_page(size);
507 	if (vmem_alloc(arena, asize, flags | M_BESTFIT, &addr))
508 		return (0);
509 
510 	rv = kmem_back_domain(domain, kernel_object, addr, asize, flags);
511 	if (rv != KERN_SUCCESS) {
512 		vmem_free(arena, addr, asize);
513 		return (0);
514 	}
515 	kasan_mark((void *)addr, size, asize, KASAN_KMEM_REDZONE);
516 	return ((void *)addr);
517 }
518 
519 void *
kmem_malloc(vm_size_t size,int flags)520 kmem_malloc(vm_size_t size, int flags)
521 {
522 	void * p;
523 
524 	TSENTER();
525 	p = kmem_malloc_domainset(DOMAINSET_RR(), size, flags);
526 	TSEXIT();
527 	return (p);
528 }
529 
530 void *
kmem_malloc_domainset(struct domainset * ds,vm_size_t size,int flags)531 kmem_malloc_domainset(struct domainset *ds, vm_size_t size, int flags)
532 {
533 	struct vm_domainset_iter di;
534 	void *addr;
535 	int domain;
536 
537 	if (vm_domainset_iter_policy_init(&di, ds, &domain, &flags) != 0)
538 		return (NULL);
539 
540 	do {
541 		addr = kmem_malloc_domain(domain, size, flags);
542 		if (addr != NULL)
543 			break;
544 	} while (vm_domainset_iter_policy(&di, &domain) == 0);
545 
546 	return (addr);
547 }
548 
549 /*
550  *	kmem_back_domain:
551  *
552  *	Allocate physical pages from the specified domain for the specified
553  *	virtual address range.
554  */
555 int
kmem_back_domain(int domain,vm_object_t object,vm_offset_t addr,vm_size_t size,int flags)556 kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr,
557     vm_size_t size, int flags)
558 {
559 	struct pctrie_iter pages;
560 	vm_offset_t offset, i;
561 	vm_page_t m;
562 	vm_prot_t prot;
563 	int pflags;
564 	u_int pmap_enter_flags;
565 
566 	KASSERT(object == kernel_object,
567 	    ("kmem_back_domain: only supports kernel object."));
568 
569 	offset = addr - VM_MIN_KERNEL_ADDRESS;
570 	pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
571 	pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
572 	if (flags & M_WAITOK)
573 		pflags |= VM_ALLOC_WAITFAIL;
574 	prot = (flags & M_EXEC) != 0 ? VM_PROT_ALL : VM_PROT_RW;
575 
576 	pmap_enter_flags = prot | PMAP_ENTER_WIRED;
577 	if ((flags & M_UNPROTECTED) != 0)
578 		pmap_enter_flags |= PMAP_ENTER_UNPROTECTED;
579 
580 	i = 0;
581 	vm_page_iter_init(&pages, object);
582 	VM_OBJECT_WLOCK(object);
583 retry:
584 	for (; i < size; i += PAGE_SIZE) {
585 		m = vm_page_alloc_domain_iter(object, atop(offset + i),
586 		    domain, pflags, &pages);
587 
588 		/*
589 		 * Ran out of space, free everything up and return. Don't need
590 		 * to lock page queues here as we know that the pages we got
591 		 * aren't on any queues.
592 		 */
593 		if (m == NULL) {
594 			if ((flags & M_NOWAIT) == 0)
595 				goto retry;
596 			VM_OBJECT_WUNLOCK(object);
597 			kmem_unback(object, addr, i);
598 			return (KERN_NO_SPACE);
599 		}
600 		KASSERT(vm_page_domain(m) == domain,
601 		    ("kmem_back_domain: Domain mismatch %d != %d",
602 		    vm_page_domain(m), domain));
603 		if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
604 			pmap_zero_page(m);
605 		KASSERT((m->oflags & VPO_UNMANAGED) != 0,
606 		    ("kmem_malloc: page %p is managed", m));
607 		vm_page_valid(m);
608 		pmap_enter(kernel_pmap, addr + i, m, prot,
609 		    pmap_enter_flags, 0);
610 		if (__predict_false((prot & VM_PROT_EXECUTE) != 0))
611 			m->oflags |= VPO_KMEM_EXEC;
612 	}
613 	VM_OBJECT_WUNLOCK(object);
614 	kmem_alloc_san(addr, size, size, flags);
615 	return (KERN_SUCCESS);
616 }
617 
618 /*
619  *	kmem_back:
620  *
621  *	Allocate physical pages for the specified virtual address range.
622  */
623 int
kmem_back(vm_object_t object,vm_offset_t addr,vm_size_t size,int flags)624 kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
625 {
626 	vm_offset_t end, next, start;
627 	int domain, rv;
628 
629 	KASSERT(object == kernel_object,
630 	    ("kmem_back: only supports kernel object."));
631 
632 	for (start = addr, end = addr + size; addr < end; addr = next) {
633 		/*
634 		 * We must ensure that pages backing a given large virtual page
635 		 * all come from the same physical domain.
636 		 */
637 		if (vm_ndomains > 1) {
638 			domain = (addr >> KVA_QUANTUM_SHIFT) % vm_ndomains;
639 			while (VM_DOMAIN_EMPTY(domain))
640 				domain++;
641 			next = roundup2(addr + 1, KVA_QUANTUM);
642 			if (next > end || next < start)
643 				next = end;
644 		} else {
645 			domain = 0;
646 			next = end;
647 		}
648 		rv = kmem_back_domain(domain, object, addr, next - addr, flags);
649 		if (rv != KERN_SUCCESS) {
650 			kmem_unback(object, start, addr - start);
651 			break;
652 		}
653 	}
654 	return (rv);
655 }
656 
657 /*
658  *	kmem_unback:
659  *
660  *	Unmap and free the physical pages underlying the specified virtual
661  *	address range.
662  *
663  *	A physical page must exist within the specified object at each index
664  *	that is being unmapped.
665  */
666 static struct vmem *
_kmem_unback(vm_object_t object,vm_offset_t addr,vm_size_t size)667 _kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
668 {
669 	struct pctrie_iter pages;
670 	struct vmem *arena;
671 	vm_page_t m;
672 	vm_offset_t end, offset;
673 	int domain;
674 
675 	KASSERT(object == kernel_object,
676 	    ("kmem_unback: only supports kernel object."));
677 
678 	if (size == 0)
679 		return (NULL);
680 	pmap_remove(kernel_pmap, addr, addr + size);
681 	offset = addr - VM_MIN_KERNEL_ADDRESS;
682 	end = offset + size;
683 	vm_page_iter_init(&pages, object);
684 	VM_OBJECT_WLOCK(object);
685 	m = vm_radix_iter_lookup(&pages, atop(offset));
686 	domain = vm_page_domain(m);
687 	if (__predict_true((m->oflags & VPO_KMEM_EXEC) == 0))
688 		arena = vm_dom[domain].vmd_kernel_arena;
689 	else
690 		arena = vm_dom[domain].vmd_kernel_rwx_arena;
691 	for (; offset < end; offset += PAGE_SIZE,
692 	    m = vm_radix_iter_lookup(&pages, atop(offset))) {
693 		vm_page_xbusy_claim(m);
694 		vm_page_unwire_noq(m);
695 		vm_page_iter_free(&pages, m);
696 	}
697 	VM_OBJECT_WUNLOCK(object);
698 
699 	return (arena);
700 }
701 
702 void
kmem_unback(vm_object_t object,vm_offset_t addr,vm_size_t size)703 kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
704 {
705 
706 	(void)_kmem_unback(object, addr, size);
707 }
708 
709 /*
710  *	kmem_free:
711  *
712  *	Free memory allocated with kmem_malloc.  The size must match the
713  *	original allocation.
714  */
715 void
kmem_free(void * addr,vm_size_t size)716 kmem_free(void *addr, vm_size_t size)
717 {
718 	struct vmem *arena;
719 
720 	size = round_page(size);
721 	kasan_mark(addr, size, size, 0);
722 	arena = _kmem_unback(kernel_object, (uintptr_t)addr, size);
723 	if (arena != NULL)
724 		vmem_free(arena, (uintptr_t)addr, size);
725 }
726 
727 static void
kmap_alloc_map(vm_map_t map,vm_offset_t addr,vm_size_t size,vm_prot_t prot,int flags)728 kmap_alloc_map(vm_map_t map, vm_offset_t addr, vm_size_t size,
729     vm_prot_t prot, int flags)
730 {
731 	int error __diagused;
732 
733 	error = vm_map_insert(map, NULL, 0,
734 	    addr, addr + size, prot, prot, flags);
735 	KASSERT(error == KERN_SUCCESS,
736 	    ("%s: unexpected error %d", __func__, error));
737 }
738 
739 /*
740  *	kmap_alloc_wait:
741  *
742  *	Allocates pageable memory from a sub-map of the kernel.  If the submap
743  *	has no room, the caller sleeps waiting for more memory in the submap.
744  *	If "guard_size" is non-zero, then unmapped KVA is left at the beginning
745  *	and end of the allocated range.
746  *
747  *	This routine may block.
748  */
749 void *
kmap_alloc_wait(vm_map_t map,vm_size_t size,vm_size_t guard_size)750 kmap_alloc_wait(vm_map_t map, vm_size_t size, vm_size_t guard_size)
751 {
752 	vm_offset_t addr;
753 	vm_size_t total_size;
754 
755 	KASSERT(size % PAGE_SIZE == 0 && guard_size % PAGE_SIZE == 0,
756 	    ("%s: size %zu guard_size %zu", __func__, size, guard_size));
757 
758 	if (!swap_reserve(size))
759 		return (NULL);
760 
761 	total_size = size + 2 * guard_size;
762 	for (;;) {
763 		/*
764 		 * To make this work for more than one map, use the map's lock
765 		 * to lock out sleepers/wakers.
766 		 */
767 		vm_map_lock(map);
768 		addr = vm_map_findspace(map, vm_map_min(map), total_size);
769 		if (addr + total_size <= vm_map_max(map))
770 			break;
771 		/* no space now; see if we can ever get space */
772 		if (vm_map_max(map) - vm_map_min(map) < total_size) {
773 			vm_map_unlock(map);
774 			swap_release(size);
775 			return (0);
776 		}
777 		vm_map_modflags(map, MAP_NEEDS_WAKEUP, 0);
778 		vm_map_unlock_and_wait(map, 0);
779 	}
780 	if (guard_size != 0) {
781 		kmap_alloc_map(map, addr, guard_size,
782 		    VM_PROT_NONE, MAP_CREATE_GUARD);
783 		kmap_alloc_map(map, addr + guard_size + size, guard_size,
784 		    VM_PROT_NONE, MAP_CREATE_GUARD);
785 	}
786 	kmap_alloc_map(map, addr + guard_size, size, VM_PROT_RW,
787 	    MAP_ACC_CHARGED);
788 	vm_map_unlock(map);
789 	return ((void *)(addr + guard_size));
790 }
791 
792 /*
793  *	kmap_free_wakeup:
794  *
795  *	Returns memory to a submap of the kernel, and wakes up any processes
796  *	waiting for memory in that map.
797  */
798 void
kmap_free_wakeup(vm_map_t map,void * va,vm_size_t size)799 kmap_free_wakeup(vm_map_t map, void *va, vm_size_t size)
800 {
801 	vm_offset_t addr;
802 
803 	addr = (vm_offset_t)va;
804 	vm_map_lock(map);
805 	(void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
806 	if ((map->flags & MAP_NEEDS_WAKEUP) != 0) {
807 		vm_map_modflags(map, 0, MAP_NEEDS_WAKEUP);
808 		vm_map_wakeup(map);
809 	}
810 	vm_map_unlock(map);
811 }
812 
813 void
kmem_init_zero_region(void)814 kmem_init_zero_region(void)
815 {
816 	char *addr;
817 	vm_offset_t i;
818 	vm_page_t m;
819 
820 	/*
821 	 * Map a single physical page of zeros to a larger virtual range.
822 	 * This requires less looping in places that want large amounts of
823 	 * zeros, while not using much more physical resources.
824 	 */
825 	addr = kva_alloc(ZERO_REGION_SIZE);
826 	m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO |
827 	    VM_ALLOC_NOFREE);
828 	for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE)
829 		pmap_qenter(addr + i, &m, 1);
830 	pmap_protect(kernel_pmap, (vm_offset_t)addr,
831 	    (vm_offset_t)addr + ZERO_REGION_SIZE, VM_PROT_READ);
832 
833 	zero_region = (const void *)addr;
834 }
835 
836 /*
837  * Import KVA from the kernel map into the kernel arena.
838  */
839 static int
kva_import(void * unused,vmem_size_t size,int flags,vmem_addr_t * addrp)840 kva_import(void *unused, vmem_size_t size, int flags, vmem_addr_t *addrp)
841 {
842 	vm_offset_t addr;
843 	int result;
844 
845 	TSENTER();
846 	KASSERT((size % KVA_QUANTUM) == 0,
847 	    ("kva_import: Size %jd is not a multiple of %d",
848 	    (intmax_t)size, (int)KVA_QUANTUM));
849 	addr = vm_map_min(kernel_map);
850 	result = vm_map_find(kernel_map, NULL, 0, &addr, size, 0,
851 	    VMFS_SUPER_SPACE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
852 	if (result != KERN_SUCCESS) {
853 		TSEXIT();
854                 return (ENOMEM);
855 	}
856 
857 	*addrp = addr;
858 
859 	TSEXIT();
860 	return (0);
861 }
862 
863 /*
864  * Import KVA from a parent arena into a per-domain arena.  Imports must be
865  * KVA_QUANTUM-aligned and a multiple of KVA_QUANTUM in size.
866  */
867 static int
kva_import_domain(void * arena,vmem_size_t size,int flags,vmem_addr_t * addrp)868 kva_import_domain(void *arena, vmem_size_t size, int flags, vmem_addr_t *addrp)
869 {
870 
871 	KASSERT((size % KVA_QUANTUM) == 0,
872 	    ("kva_import_domain: Size %jd is not a multiple of %d",
873 	    (intmax_t)size, (int)KVA_QUANTUM));
874 	return (vmem_xalloc(arena, size, KVA_QUANTUM, 0, 0, VMEM_ADDR_MIN,
875 	    VMEM_ADDR_MAX, flags, addrp));
876 }
877 
878 /*
879  * 	kmem_init:
880  *
881  *	Create the kernel map; insert a mapping covering kernel text,
882  *	data, bss, and all space allocated thus far (`boostrap' data).  The
883  *	new map will thus map the range between VM_MIN_KERNEL_ADDRESS and
884  *	`start' as allocated, and the range between `start' and `end' as free.
885  *	Create the kernel vmem arena and its per-domain children.
886  */
887 void
kmem_init(vm_offset_t start,vm_offset_t end)888 kmem_init(vm_offset_t start, vm_offset_t end)
889 {
890 	vm_size_t quantum;
891 	int domain;
892 
893 	vm_map_init_system(kernel_map, kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
894 	vm_map_lock(kernel_map);
895 	/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
896 	(void)vm_map_insert(kernel_map, NULL, 0,
897 #ifdef __amd64__
898 	    KERNBASE,
899 #else
900 	    VM_MIN_KERNEL_ADDRESS,
901 #endif
902 	    start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
903 	/* ... and ending with the completion of the above `insert' */
904 
905 #ifdef __amd64__
906 	/*
907 	 * Mark KVA used for the page array as allocated.  Other platforms
908 	 * that handle vm_page_array allocation can simply adjust virtual_avail
909 	 * instead.
910 	 */
911 	(void)vm_map_insert(kernel_map, NULL, 0, (vm_offset_t)vm_page_array,
912 	    (vm_offset_t)vm_page_array + round_2mpage(vm_page_array_size *
913 	    sizeof(struct vm_page)),
914 	    VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT);
915 #endif
916 	vm_map_unlock(kernel_map);
917 
918 	/*
919 	 * Use a large import quantum on NUMA systems.  This helps minimize
920 	 * interleaving of superpages, reducing internal fragmentation within
921 	 * the per-domain arenas.
922 	 */
923 	if (vm_ndomains > 1 && PMAP_HAS_DMAP)
924 		quantum = KVA_NUMA_IMPORT_QUANTUM;
925 	else
926 		quantum = KVA_QUANTUM;
927 
928 	/*
929 	 * Initialize the kernel_arena.  This can grow on demand.
930 	 */
931 	vmem_init(kernel_arena, "kernel arena", 0, 0, PAGE_SIZE, 0, 0);
932 	vmem_set_import(kernel_arena, kva_import, NULL, NULL, quantum);
933 
934 	for (domain = 0; domain < vm_ndomains; domain++) {
935 		/*
936 		 * Initialize the per-domain arenas.  These are used to color
937 		 * the KVA space in a way that ensures that virtual large pages
938 		 * are backed by memory from the same physical domain,
939 		 * maximizing the potential for superpage promotion.
940 		 */
941 		vm_dom[domain].vmd_kernel_arena = vmem_create(
942 		    "kernel arena domain", 0, 0, PAGE_SIZE, 0, M_WAITOK);
943 		vmem_set_import(vm_dom[domain].vmd_kernel_arena,
944 		    kva_import_domain, NULL, kernel_arena, quantum);
945 
946 		/*
947 		 * In architectures with superpages, maintain separate arenas
948 		 * for allocations with permissions that differ from the
949 		 * "standard" read/write permissions used for kernel memory
950 		 * and pages that are never released, so as not to inhibit
951 		 * superpage promotion.
952 		 *
953 		 * Use the base import quantum since these arenas are rarely
954 		 * used.
955 		 */
956 #if VM_NRESERVLEVEL > 0
957 		vm_dom[domain].vmd_kernel_rwx_arena = vmem_create(
958 		    "kernel rwx arena domain", 0, 0, PAGE_SIZE, 0, M_WAITOK);
959 		vm_dom[domain].vmd_kernel_nofree_arena = vmem_create(
960 		    "kernel NOFREE arena domain", 0, 0, PAGE_SIZE, 0, M_WAITOK);
961 		vmem_set_import(vm_dom[domain].vmd_kernel_rwx_arena,
962 		    kva_import_domain, (vmem_release_t *)vmem_xfree,
963 		    kernel_arena, KVA_QUANTUM);
964 		vmem_set_import(vm_dom[domain].vmd_kernel_nofree_arena,
965 		    kva_import_domain, (vmem_release_t *)vmem_xfree,
966 		    kernel_arena, KVA_QUANTUM);
967 #else
968 		vm_dom[domain].vmd_kernel_rwx_arena =
969 		    vm_dom[domain].vmd_kernel_arena;
970 		vm_dom[domain].vmd_kernel_nofree_arena =
971 		    vm_dom[domain].vmd_kernel_arena;
972 #endif
973 	}
974 
975 	/*
976 	 * This must be the very first call so that the virtual address
977 	 * space used for early allocations is properly marked used in
978 	 * the map.
979 	 */
980 	uma_startup2();
981 }
982 
983 /*
984  *	kmem_bootstrap_free:
985  *
986  *	Free pages backing preloaded data (e.g., kernel modules) to the
987  *	system.  Currently only supported on platforms that create a
988  *	vm_phys segment for preloaded data.
989  */
990 void
kmem_bootstrap_free(vm_offset_t start,vm_size_t size)991 kmem_bootstrap_free(vm_offset_t start, vm_size_t size)
992 {
993 #if defined(__i386__) || defined(__amd64__)
994 	struct vm_domain *vmd;
995 	vm_offset_t end, va;
996 	vm_paddr_t pa;
997 	vm_page_t m;
998 
999 	end = trunc_page(start + size);
1000 	start = round_page(start);
1001 
1002 	for (va = start; va < end; va += PAGE_SIZE) {
1003 		pa = pmap_kextract(va);
1004 		m = PHYS_TO_VM_PAGE(pa);
1005 
1006 		vmd = vm_pagequeue_domain(m);
1007 		vm_domain_free_lock(vmd);
1008 		vm_phys_free_pages(m, m->pool, 0);
1009 		vm_domain_free_unlock(vmd);
1010 
1011 		vm_domain_freecnt_inc(vmd, 1);
1012 		vm_cnt.v_page_count++;
1013 	}
1014 	pmap_remove(kernel_pmap, start, end);
1015 	(void)vmem_add(kernel_arena, start, end - start, M_WAITOK);
1016 #endif
1017 }
1018 
1019 #ifdef PMAP_WANT_ACTIVE_CPUS_NAIVE
1020 void
pmap_active_cpus(pmap_t pmap,cpuset_t * res)1021 pmap_active_cpus(pmap_t pmap, cpuset_t *res)
1022 {
1023 	struct thread *td;
1024 	struct proc *p;
1025 	struct vmspace *vm;
1026 	int c;
1027 
1028 	CPU_ZERO(res);
1029 	CPU_FOREACH(c) {
1030 		td = cpuid_to_pcpu[c]->pc_curthread;
1031 		p = td->td_proc;
1032 		if (p == NULL)
1033 			continue;
1034 		vm = vmspace_acquire_ref(p);
1035 		if (vm == NULL)
1036 			continue;
1037 		if (pmap == vmspace_pmap(vm))
1038 			CPU_SET(c, res);
1039 		vmspace_free(vm);
1040 	}
1041 }
1042 #endif
1043 
1044 /*
1045  * Allow userspace to directly trigger the VM drain routine for testing
1046  * purposes.
1047  */
1048 static int
debug_vm_lowmem(SYSCTL_HANDLER_ARGS)1049 debug_vm_lowmem(SYSCTL_HANDLER_ARGS)
1050 {
1051 	int error, i;
1052 
1053 	i = 0;
1054 	error = sysctl_handle_int(oidp, &i, 0, req);
1055 	if (error != 0)
1056 		return (error);
1057 	if ((i & ~(VM_LOW_KMEM | VM_LOW_PAGES)) != 0)
1058 		return (EINVAL);
1059 	if (i != 0)
1060 		EVENTHANDLER_INVOKE(vm_lowmem, i);
1061 	return (0);
1062 }
1063 SYSCTL_PROC(_debug, OID_AUTO, vm_lowmem,
1064     CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, 0, debug_vm_lowmem, "I",
1065     "set to trigger vm_lowmem event with given flags");
1066 
1067 static int
debug_uma_reclaim(SYSCTL_HANDLER_ARGS)1068 debug_uma_reclaim(SYSCTL_HANDLER_ARGS)
1069 {
1070 	int error, i;
1071 
1072 	i = 0;
1073 	error = sysctl_handle_int(oidp, &i, 0, req);
1074 	if (error != 0 || req->newptr == NULL)
1075 		return (error);
1076 	if (i != UMA_RECLAIM_TRIM && i != UMA_RECLAIM_DRAIN &&
1077 	    i != UMA_RECLAIM_DRAIN_CPU)
1078 		return (EINVAL);
1079 	uma_reclaim(i);
1080 	return (0);
1081 }
1082 SYSCTL_PROC(_debug, OID_AUTO, uma_reclaim,
1083     CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, 0, debug_uma_reclaim, "I",
1084     "set to generate request to reclaim uma caches");
1085 
1086 static int
debug_uma_reclaim_domain(SYSCTL_HANDLER_ARGS)1087 debug_uma_reclaim_domain(SYSCTL_HANDLER_ARGS)
1088 {
1089 	int domain, error, request;
1090 
1091 	request = 0;
1092 	error = sysctl_handle_int(oidp, &request, 0, req);
1093 	if (error != 0 || req->newptr == NULL)
1094 		return (error);
1095 
1096 	domain = request >> 4;
1097 	request &= 0xf;
1098 	if (request != UMA_RECLAIM_TRIM && request != UMA_RECLAIM_DRAIN &&
1099 	    request != UMA_RECLAIM_DRAIN_CPU)
1100 		return (EINVAL);
1101 	if (domain < 0 || domain >= vm_ndomains)
1102 		return (EINVAL);
1103 	uma_reclaim_domain(request, domain);
1104 	return (0);
1105 }
1106 SYSCTL_PROC(_debug, OID_AUTO, uma_reclaim_domain,
1107     CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, 0,
1108     debug_uma_reclaim_domain, "I",
1109     "");
1110