xref: /freebsd/sys/kern/kern_malloc.c (revision b3e7694832e81d7a904a10f525f8797b753bf0d3)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1987, 1991, 1993
5  *	The Regents of the University of California.
6  * Copyright (c) 2005-2009 Robert N. M. Watson
7  * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> (mallocarray)
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)kern_malloc.c	8.3 (Berkeley) 1/4/94
35  */
36 
37 /*
38  * Kernel malloc(9) implementation -- general purpose kernel memory allocator
39  * based on memory types.  Back end is implemented using the UMA(9) zone
40  * allocator.  A set of fixed-size buckets are used for smaller allocations,
41  * and a special UMA allocation interface is used for larger allocations.
42  * Callers declare memory types, and statistics are maintained independently
43  * for each memory type.  Statistics are maintained per-CPU for performance
44  * reasons.  See malloc(9) and comments in malloc.h for a detailed
45  * description.
46  */
47 
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
50 
51 #include "opt_ddb.h"
52 #include "opt_vm.h"
53 
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/asan.h>
57 #include <sys/kdb.h>
58 #include <sys/kernel.h>
59 #include <sys/lock.h>
60 #include <sys/malloc.h>
61 #include <sys/msan.h>
62 #include <sys/mutex.h>
63 #include <sys/vmmeter.h>
64 #include <sys/proc.h>
65 #include <sys/queue.h>
66 #include <sys/sbuf.h>
67 #include <sys/smp.h>
68 #include <sys/sysctl.h>
69 #include <sys/time.h>
70 #include <sys/vmem.h>
71 #ifdef EPOCH_TRACE
72 #include <sys/epoch.h>
73 #endif
74 
75 #include <vm/vm.h>
76 #include <vm/pmap.h>
77 #include <vm/vm_domainset.h>
78 #include <vm/vm_pageout.h>
79 #include <vm/vm_param.h>
80 #include <vm/vm_kern.h>
81 #include <vm/vm_extern.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_phys.h>
85 #include <vm/vm_pagequeue.h>
86 #include <vm/uma.h>
87 #include <vm/uma_int.h>
88 #include <vm/uma_dbg.h>
89 
90 #ifdef DEBUG_MEMGUARD
91 #include <vm/memguard.h>
92 #endif
93 #ifdef DEBUG_REDZONE
94 #include <vm/redzone.h>
95 #endif
96 
97 #if defined(INVARIANTS) && defined(__i386__)
98 #include <machine/cpu.h>
99 #endif
100 
101 #include <ddb/ddb.h>
102 
103 #ifdef KDTRACE_HOOKS
104 #include <sys/dtrace_bsd.h>
105 
106 bool	__read_frequently			dtrace_malloc_enabled;
107 dtrace_malloc_probe_func_t __read_mostly	dtrace_malloc_probe;
108 #endif
109 
110 #if defined(INVARIANTS) || defined(MALLOC_MAKE_FAILURES) ||		\
111     defined(DEBUG_MEMGUARD) || defined(DEBUG_REDZONE)
112 #define	MALLOC_DEBUG	1
113 #endif
114 
115 #if defined(KASAN) || defined(DEBUG_REDZONE)
116 #define	DEBUG_REDZONE_ARG_DEF	, unsigned long osize
117 #define	DEBUG_REDZONE_ARG	, osize
118 #else
119 #define	DEBUG_REDZONE_ARG_DEF
120 #define	DEBUG_REDZONE_ARG
121 #endif
122 
123 /*
124  * When realloc() is called, if the new size is sufficiently smaller than
125  * the old size, realloc() will allocate a new, smaller block to avoid
126  * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
127  * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
128  */
129 #ifndef REALLOC_FRACTION
130 #define	REALLOC_FRACTION	1	/* new block if <= half the size */
131 #endif
132 
133 /*
134  * Centrally define some common malloc types.
135  */
136 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
137 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
138 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
139 
140 static struct malloc_type *kmemstatistics;
141 static int kmemcount;
142 
143 #define KMEM_ZSHIFT	4
144 #define KMEM_ZBASE	16
145 #define KMEM_ZMASK	(KMEM_ZBASE - 1)
146 
147 #define KMEM_ZMAX	65536
148 #define KMEM_ZSIZE	(KMEM_ZMAX >> KMEM_ZSHIFT)
149 static uint8_t kmemsize[KMEM_ZSIZE + 1];
150 
151 #ifndef MALLOC_DEBUG_MAXZONES
152 #define	MALLOC_DEBUG_MAXZONES	1
153 #endif
154 static int numzones = MALLOC_DEBUG_MAXZONES;
155 
156 /*
157  * Small malloc(9) memory allocations are allocated from a set of UMA buckets
158  * of various sizes.
159  *
160  * Warning: the layout of the struct is duplicated in libmemstat for KVM support.
161  *
162  * XXX: The comment here used to read "These won't be powers of two for
163  * long."  It's possible that a significant amount of wasted memory could be
164  * recovered by tuning the sizes of these buckets.
165  */
166 struct {
167 	int kz_size;
168 	const char *kz_name;
169 	uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES];
170 } kmemzones[] = {
171 	{16, "malloc-16", },
172 	{32, "malloc-32", },
173 	{64, "malloc-64", },
174 	{128, "malloc-128", },
175 	{256, "malloc-256", },
176 	{384, "malloc-384", },
177 	{512, "malloc-512", },
178 	{1024, "malloc-1024", },
179 	{2048, "malloc-2048", },
180 	{4096, "malloc-4096", },
181 	{8192, "malloc-8192", },
182 	{16384, "malloc-16384", },
183 	{32768, "malloc-32768", },
184 	{65536, "malloc-65536", },
185 	{0, NULL},
186 };
187 
188 u_long vm_kmem_size;
189 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0,
190     "Size of kernel memory");
191 
192 static u_long kmem_zmax = KMEM_ZMAX;
193 SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0,
194     "Maximum allocation size that malloc(9) would use UMA as backend");
195 
196 static u_long vm_kmem_size_min;
197 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0,
198     "Minimum size of kernel memory");
199 
200 static u_long vm_kmem_size_max;
201 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0,
202     "Maximum size of kernel memory");
203 
204 static u_int vm_kmem_size_scale;
205 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0,
206     "Scale factor for kernel memory size");
207 
208 static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
209 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
210     CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
211     sysctl_kmem_map_size, "LU", "Current kmem allocation size");
212 
213 static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
214 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
215     CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
216     sysctl_kmem_map_free, "LU", "Free space in kmem");
217 
218 static SYSCTL_NODE(_vm, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
219     "Malloc information");
220 
221 static u_int vm_malloc_zone_count = nitems(kmemzones);
222 SYSCTL_UINT(_vm_malloc, OID_AUTO, zone_count,
223     CTLFLAG_RD, &vm_malloc_zone_count, 0,
224     "Number of malloc zones");
225 
226 static int sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS);
227 SYSCTL_PROC(_vm_malloc, OID_AUTO, zone_sizes,
228     CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, NULL, 0,
229     sysctl_vm_malloc_zone_sizes, "S", "Zone sizes used by malloc");
230 
231 /*
232  * The malloc_mtx protects the kmemstatistics linked list.
233  */
234 struct mtx malloc_mtx;
235 
236 static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS);
237 
238 #if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1)
239 static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
240     "Kernel malloc debugging options");
241 #endif
242 
243 /*
244  * malloc(9) fault injection -- cause malloc failures every (n) mallocs when
245  * the caller specifies M_NOWAIT.  If set to 0, no failures are caused.
246  */
247 #ifdef MALLOC_MAKE_FAILURES
248 static int malloc_failure_rate;
249 static int malloc_nowait_count;
250 static int malloc_failure_count;
251 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN,
252     &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
253 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
254     &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
255 #endif
256 
257 static int
258 sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
259 {
260 	u_long size;
261 
262 	size = uma_size();
263 	return (sysctl_handle_long(oidp, &size, 0, req));
264 }
265 
266 static int
267 sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
268 {
269 	u_long size, limit;
270 
271 	/* The sysctl is unsigned, implement as a saturation value. */
272 	size = uma_size();
273 	limit = uma_limit();
274 	if (size > limit)
275 		size = 0;
276 	else
277 		size = limit - size;
278 	return (sysctl_handle_long(oidp, &size, 0, req));
279 }
280 
281 static int
282 sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS)
283 {
284 	int sizes[nitems(kmemzones)];
285 	int i;
286 
287 	for (i = 0; i < nitems(kmemzones); i++) {
288 		sizes[i] = kmemzones[i].kz_size;
289 	}
290 
291 	return (SYSCTL_OUT(req, &sizes, sizeof(sizes)));
292 }
293 
294 /*
295  * malloc(9) uma zone separation -- sub-page buffer overruns in one
296  * malloc type will affect only a subset of other malloc types.
297  */
298 #if MALLOC_DEBUG_MAXZONES > 1
299 static void
300 tunable_set_numzones(void)
301 {
302 
303 	TUNABLE_INT_FETCH("debug.malloc.numzones",
304 	    &numzones);
305 
306 	/* Sanity check the number of malloc uma zones. */
307 	if (numzones <= 0)
308 		numzones = 1;
309 	if (numzones > MALLOC_DEBUG_MAXZONES)
310 		numzones = MALLOC_DEBUG_MAXZONES;
311 }
312 SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL);
313 SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
314     &numzones, 0, "Number of malloc uma subzones");
315 
316 /*
317  * Any number that changes regularly is an okay choice for the
318  * offset.  Build numbers are pretty good of you have them.
319  */
320 static u_int zone_offset = __FreeBSD_version;
321 TUNABLE_INT("debug.malloc.zone_offset", &zone_offset);
322 SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN,
323     &zone_offset, 0, "Separate malloc types by examining the "
324     "Nth character in the malloc type short description.");
325 
326 static void
327 mtp_set_subzone(struct malloc_type *mtp)
328 {
329 	struct malloc_type_internal *mtip;
330 	const char *desc;
331 	size_t len;
332 	u_int val;
333 
334 	mtip = &mtp->ks_mti;
335 	desc = mtp->ks_shortdesc;
336 	if (desc == NULL || (len = strlen(desc)) == 0)
337 		val = 0;
338 	else
339 		val = desc[zone_offset % len];
340 	mtip->mti_zone = (val % numzones);
341 }
342 
343 static inline u_int
344 mtp_get_subzone(struct malloc_type *mtp)
345 {
346 	struct malloc_type_internal *mtip;
347 
348 	mtip = &mtp->ks_mti;
349 
350 	KASSERT(mtip->mti_zone < numzones,
351 	    ("mti_zone %u out of range %d",
352 	    mtip->mti_zone, numzones));
353 	return (mtip->mti_zone);
354 }
355 #elif MALLOC_DEBUG_MAXZONES == 0
356 #error "MALLOC_DEBUG_MAXZONES must be positive."
357 #else
358 static void
359 mtp_set_subzone(struct malloc_type *mtp)
360 {
361 	struct malloc_type_internal *mtip;
362 
363 	mtip = &mtp->ks_mti;
364 	mtip->mti_zone = 0;
365 }
366 
367 static inline u_int
368 mtp_get_subzone(struct malloc_type *mtp)
369 {
370 
371 	return (0);
372 }
373 #endif /* MALLOC_DEBUG_MAXZONES > 1 */
374 
375 /*
376  * An allocation has succeeded -- update malloc type statistics for the
377  * amount of bucket size.  Occurs within a critical section so that the
378  * thread isn't preempted and doesn't migrate while updating per-PCU
379  * statistics.
380  */
381 static void
382 malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
383     int zindx)
384 {
385 	struct malloc_type_internal *mtip;
386 	struct malloc_type_stats *mtsp;
387 
388 	critical_enter();
389 	mtip = &mtp->ks_mti;
390 	mtsp = zpcpu_get(mtip->mti_stats);
391 	if (size > 0) {
392 		mtsp->mts_memalloced += size;
393 		mtsp->mts_numallocs++;
394 	}
395 	if (zindx != -1)
396 		mtsp->mts_size |= 1 << zindx;
397 
398 #ifdef KDTRACE_HOOKS
399 	if (__predict_false(dtrace_malloc_enabled)) {
400 		uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC];
401 		if (probe_id != 0)
402 			(dtrace_malloc_probe)(probe_id,
403 			    (uintptr_t) mtp, (uintptr_t) mtip,
404 			    (uintptr_t) mtsp, size, zindx);
405 	}
406 #endif
407 
408 	critical_exit();
409 }
410 
411 void
412 malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
413 {
414 
415 	if (size > 0)
416 		malloc_type_zone_allocated(mtp, size, -1);
417 }
418 
419 /*
420  * A free operation has occurred -- update malloc type statistics for the
421  * amount of the bucket size.  Occurs within a critical section so that the
422  * thread isn't preempted and doesn't migrate while updating per-CPU
423  * statistics.
424  */
425 void
426 malloc_type_freed(struct malloc_type *mtp, unsigned long size)
427 {
428 	struct malloc_type_internal *mtip;
429 	struct malloc_type_stats *mtsp;
430 
431 	critical_enter();
432 	mtip = &mtp->ks_mti;
433 	mtsp = zpcpu_get(mtip->mti_stats);
434 	mtsp->mts_memfreed += size;
435 	mtsp->mts_numfrees++;
436 
437 #ifdef KDTRACE_HOOKS
438 	if (__predict_false(dtrace_malloc_enabled)) {
439 		uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE];
440 		if (probe_id != 0)
441 			(dtrace_malloc_probe)(probe_id,
442 			    (uintptr_t) mtp, (uintptr_t) mtip,
443 			    (uintptr_t) mtsp, size, 0);
444 	}
445 #endif
446 
447 	critical_exit();
448 }
449 
450 /*
451  *	contigmalloc:
452  *
453  *	Allocate a block of physically contiguous memory.
454  *
455  *	If M_NOWAIT is set, this routine will not block and return NULL if
456  *	the allocation fails.
457  */
458 void *
459 contigmalloc(unsigned long size, struct malloc_type *type, int flags,
460     vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
461     vm_paddr_t boundary)
462 {
463 	void *ret;
464 
465 	ret = (void *)kmem_alloc_contig(size, flags, low, high, alignment,
466 	    boundary, VM_MEMATTR_DEFAULT);
467 	if (ret != NULL)
468 		malloc_type_allocated(type, round_page(size));
469 	return (ret);
470 }
471 
472 void *
473 contigmalloc_domainset(unsigned long size, struct malloc_type *type,
474     struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high,
475     unsigned long alignment, vm_paddr_t boundary)
476 {
477 	void *ret;
478 
479 	ret = (void *)kmem_alloc_contig_domainset(ds, size, flags, low, high,
480 	    alignment, boundary, VM_MEMATTR_DEFAULT);
481 	if (ret != NULL)
482 		malloc_type_allocated(type, round_page(size));
483 	return (ret);
484 }
485 
486 /*
487  *	contigfree:
488  *
489  *	Free a block of memory allocated by contigmalloc.
490  *
491  *	This routine may not block.
492  */
493 void
494 contigfree(void *addr, unsigned long size, struct malloc_type *type)
495 {
496 
497 	kmem_free(addr, size);
498 	malloc_type_freed(type, round_page(size));
499 }
500 
501 #ifdef MALLOC_DEBUG
502 static int
503 malloc_dbg(caddr_t *vap, size_t *sizep, struct malloc_type *mtp,
504     int flags)
505 {
506 #ifdef INVARIANTS
507 	int indx;
508 
509 	KASSERT(mtp->ks_version == M_VERSION, ("malloc: bad malloc type version"));
510 	/*
511 	 * Check that exactly one of M_WAITOK or M_NOWAIT is specified.
512 	 */
513 	indx = flags & (M_WAITOK | M_NOWAIT);
514 	if (indx != M_NOWAIT && indx != M_WAITOK) {
515 		static	struct timeval lasterr;
516 		static	int curerr, once;
517 		if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
518 			printf("Bad malloc flags: %x\n", indx);
519 			kdb_backtrace();
520 			flags |= M_WAITOK;
521 			once++;
522 		}
523 	}
524 #endif
525 #ifdef MALLOC_MAKE_FAILURES
526 	if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
527 		atomic_add_int(&malloc_nowait_count, 1);
528 		if ((malloc_nowait_count % malloc_failure_rate) == 0) {
529 			atomic_add_int(&malloc_failure_count, 1);
530 			*vap = NULL;
531 			return (EJUSTRETURN);
532 		}
533 	}
534 #endif
535 	if (flags & M_WAITOK) {
536 		KASSERT(curthread->td_intr_nesting_level == 0,
537 		   ("malloc(M_WAITOK) in interrupt context"));
538 		if (__predict_false(!THREAD_CAN_SLEEP())) {
539 #ifdef EPOCH_TRACE
540 			epoch_trace_list(curthread);
541 #endif
542 			KASSERT(0,
543 			    ("malloc(M_WAITOK) with sleeping prohibited"));
544 		}
545 	}
546 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
547 	    ("malloc: called with spinlock or critical section held"));
548 
549 #ifdef DEBUG_MEMGUARD
550 	if (memguard_cmp_mtp(mtp, *sizep)) {
551 		*vap = memguard_alloc(*sizep, flags);
552 		if (*vap != NULL)
553 			return (EJUSTRETURN);
554 		/* This is unfortunate but should not be fatal. */
555 	}
556 #endif
557 
558 #ifdef DEBUG_REDZONE
559 	*sizep = redzone_size_ntor(*sizep);
560 #endif
561 
562 	return (0);
563 }
564 #endif
565 
566 /*
567  * Handle large allocations and frees by using kmem_malloc directly.
568  */
569 static inline bool
570 malloc_large_slab(uma_slab_t slab)
571 {
572 	uintptr_t va;
573 
574 	va = (uintptr_t)slab;
575 	return ((va & 1) != 0);
576 }
577 
578 static inline size_t
579 malloc_large_size(uma_slab_t slab)
580 {
581 	uintptr_t va;
582 
583 	va = (uintptr_t)slab;
584 	return (va >> 1);
585 }
586 
587 static caddr_t __noinline
588 malloc_large(size_t size, struct malloc_type *mtp, struct domainset *policy,
589     int flags DEBUG_REDZONE_ARG_DEF)
590 {
591 	void *va;
592 
593 	size = roundup(size, PAGE_SIZE);
594 	va = kmem_malloc_domainset(policy, size, flags);
595 	if (va != NULL) {
596 		/* The low bit is unused for slab pointers. */
597 		vsetzoneslab((uintptr_t)va, NULL, (void *)((size << 1) | 1));
598 		uma_total_inc(size);
599 	}
600 	malloc_type_allocated(mtp, va == NULL ? 0 : size);
601 	if (__predict_false(va == NULL)) {
602 		KASSERT((flags & M_WAITOK) == 0,
603 		    ("malloc(M_WAITOK) returned NULL"));
604 	} else {
605 #ifdef DEBUG_REDZONE
606 		va = redzone_setup(va, osize);
607 #endif
608 		kasan_mark(va, osize, size, KASAN_MALLOC_REDZONE);
609 	}
610 	return (va);
611 }
612 
613 static void
614 free_large(void *addr, size_t size)
615 {
616 
617 	kmem_free(addr, size);
618 	uma_total_dec(size);
619 }
620 
621 /*
622  *	malloc:
623  *
624  *	Allocate a block of memory.
625  *
626  *	If M_NOWAIT is set, this routine will not block and return NULL if
627  *	the allocation fails.
628  */
629 void *
630 (malloc)(size_t size, struct malloc_type *mtp, int flags)
631 {
632 	int indx;
633 	caddr_t va;
634 	uma_zone_t zone;
635 #if defined(DEBUG_REDZONE) || defined(KASAN)
636 	unsigned long osize = size;
637 #endif
638 
639 	MPASS((flags & M_EXEC) == 0);
640 
641 #ifdef MALLOC_DEBUG
642 	va = NULL;
643 	if (malloc_dbg(&va, &size, mtp, flags) != 0)
644 		return (va);
645 #endif
646 
647 	if (__predict_false(size > kmem_zmax))
648 		return (malloc_large(size, mtp, DOMAINSET_RR(), flags
649 		    DEBUG_REDZONE_ARG));
650 
651 	if (size & KMEM_ZMASK)
652 		size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
653 	indx = kmemsize[size >> KMEM_ZSHIFT];
654 	zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
655 	va = uma_zalloc(zone, flags);
656 	if (va != NULL) {
657 		size = zone->uz_size;
658 		if ((flags & M_ZERO) == 0) {
659 			kmsan_mark(va, size, KMSAN_STATE_UNINIT);
660 			kmsan_orig(va, size, KMSAN_TYPE_MALLOC, KMSAN_RET_ADDR);
661 		}
662 	}
663 	malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
664 	if (__predict_false(va == NULL)) {
665 		KASSERT((flags & M_WAITOK) == 0,
666 		    ("malloc(M_WAITOK) returned NULL"));
667 	}
668 #ifdef DEBUG_REDZONE
669 	if (va != NULL)
670 		va = redzone_setup(va, osize);
671 #endif
672 #ifdef KASAN
673 	if (va != NULL)
674 		kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE);
675 #endif
676 	return ((void *) va);
677 }
678 
679 static void *
680 malloc_domain(size_t *sizep, int *indxp, struct malloc_type *mtp, int domain,
681     int flags)
682 {
683 	uma_zone_t zone;
684 	caddr_t va;
685 	size_t size;
686 	int indx;
687 
688 	size = *sizep;
689 	KASSERT(size <= kmem_zmax && (flags & M_EXEC) == 0,
690 	    ("malloc_domain: Called with bad flag / size combination."));
691 	if (size & KMEM_ZMASK)
692 		size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
693 	indx = kmemsize[size >> KMEM_ZSHIFT];
694 	zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
695 	va = uma_zalloc_domain(zone, NULL, domain, flags);
696 	if (va != NULL)
697 		*sizep = zone->uz_size;
698 	*indxp = indx;
699 	return ((void *)va);
700 }
701 
702 void *
703 malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds,
704     int flags)
705 {
706 	struct vm_domainset_iter di;
707 	caddr_t va;
708 	int domain;
709 	int indx;
710 #if defined(KASAN) || defined(DEBUG_REDZONE)
711 	unsigned long osize = size;
712 #endif
713 
714 	MPASS((flags & M_EXEC) == 0);
715 
716 #ifdef MALLOC_DEBUG
717 	va = NULL;
718 	if (malloc_dbg(&va, &size, mtp, flags) != 0)
719 		return (va);
720 #endif
721 
722 	if (__predict_false(size > kmem_zmax))
723 		return (malloc_large(size, mtp, DOMAINSET_RR(), flags
724 		    DEBUG_REDZONE_ARG));
725 
726 	vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
727 	do {
728 		va = malloc_domain(&size, &indx, mtp, domain, flags);
729 	} while (va == NULL && vm_domainset_iter_policy(&di, &domain) == 0);
730 	malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
731 	if (__predict_false(va == NULL)) {
732 		KASSERT((flags & M_WAITOK) == 0,
733 		    ("malloc(M_WAITOK) returned NULL"));
734 	}
735 #ifdef DEBUG_REDZONE
736 	if (va != NULL)
737 		va = redzone_setup(va, osize);
738 #endif
739 #ifdef KASAN
740 	if (va != NULL)
741 		kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE);
742 #endif
743 #ifdef KMSAN
744 	if ((flags & M_ZERO) == 0) {
745 		kmsan_mark(va, size, KMSAN_STATE_UNINIT);
746 		kmsan_orig(va, size, KMSAN_TYPE_MALLOC, KMSAN_RET_ADDR);
747 	}
748 #endif
749 	return (va);
750 }
751 
752 /*
753  * Allocate an executable area.
754  */
755 void *
756 malloc_exec(size_t size, struct malloc_type *mtp, int flags)
757 {
758 
759 	return (malloc_domainset_exec(size, mtp, DOMAINSET_RR(), flags));
760 }
761 
762 void *
763 malloc_domainset_exec(size_t size, struct malloc_type *mtp, struct domainset *ds,
764     int flags)
765 {
766 #if defined(DEBUG_REDZONE) || defined(KASAN)
767 	unsigned long osize = size;
768 #endif
769 #ifdef MALLOC_DEBUG
770 	caddr_t va;
771 #endif
772 
773 	flags |= M_EXEC;
774 
775 #ifdef MALLOC_DEBUG
776 	va = NULL;
777 	if (malloc_dbg(&va, &size, mtp, flags) != 0)
778 		return (va);
779 #endif
780 
781 	return (malloc_large(size, mtp, ds, flags DEBUG_REDZONE_ARG));
782 }
783 
784 void *
785 malloc_aligned(size_t size, size_t align, struct malloc_type *type, int flags)
786 {
787 	return (malloc_domainset_aligned(size, align, type, DOMAINSET_RR(),
788 	    flags));
789 }
790 
791 void *
792 malloc_domainset_aligned(size_t size, size_t align,
793     struct malloc_type *mtp, struct domainset *ds, int flags)
794 {
795 	void *res;
796 	size_t asize;
797 
798 	KASSERT(powerof2(align),
799 	    ("malloc_domainset_aligned: wrong align %#zx size %#zx",
800 	    align, size));
801 	KASSERT(align <= PAGE_SIZE,
802 	    ("malloc_domainset_aligned: align %#zx (size %#zx) too large",
803 	    align, size));
804 
805 	/*
806 	 * Round the allocation size up to the next power of 2,
807 	 * because we can only guarantee alignment for
808 	 * power-of-2-sized allocations.  Further increase the
809 	 * allocation size to align if the rounded size is less than
810 	 * align, since malloc zones provide alignment equal to their
811 	 * size.
812 	 */
813 	if (size == 0)
814 		size = 1;
815 	asize = size <= align ? align : 1UL << flsl(size - 1);
816 
817 	res = malloc_domainset(asize, mtp, ds, flags);
818 	KASSERT(res == NULL || ((uintptr_t)res & (align - 1)) == 0,
819 	    ("malloc_domainset_aligned: result not aligned %p size %#zx "
820 	    "allocsize %#zx align %#zx", res, size, asize, align));
821 	return (res);
822 }
823 
824 void *
825 mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags)
826 {
827 
828 	if (WOULD_OVERFLOW(nmemb, size))
829 		panic("mallocarray: %zu * %zu overflowed", nmemb, size);
830 
831 	return (malloc(size * nmemb, type, flags));
832 }
833 
834 void *
835 mallocarray_domainset(size_t nmemb, size_t size, struct malloc_type *type,
836     struct domainset *ds, int flags)
837 {
838 
839 	if (WOULD_OVERFLOW(nmemb, size))
840 		panic("mallocarray_domainset: %zu * %zu overflowed", nmemb, size);
841 
842 	return (malloc_domainset(size * nmemb, type, ds, flags));
843 }
844 
845 #if defined(INVARIANTS) && !defined(KASAN)
846 static void
847 free_save_type(void *addr, struct malloc_type *mtp, u_long size)
848 {
849 	struct malloc_type **mtpp = addr;
850 
851 	/*
852 	 * Cache a pointer to the malloc_type that most recently freed
853 	 * this memory here.  This way we know who is most likely to
854 	 * have stepped on it later.
855 	 *
856 	 * This code assumes that size is a multiple of 8 bytes for
857 	 * 64 bit machines
858 	 */
859 	mtpp = (struct malloc_type **) ((unsigned long)mtpp & ~UMA_ALIGN_PTR);
860 	mtpp += (size - sizeof(struct malloc_type *)) /
861 	    sizeof(struct malloc_type *);
862 	*mtpp = mtp;
863 }
864 #endif
865 
866 #ifdef MALLOC_DEBUG
867 static int
868 free_dbg(void **addrp, struct malloc_type *mtp)
869 {
870 	void *addr;
871 
872 	addr = *addrp;
873 	KASSERT(mtp->ks_version == M_VERSION, ("free: bad malloc type version"));
874 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
875 	    ("free: called with spinlock or critical section held"));
876 
877 	/* free(NULL, ...) does nothing */
878 	if (addr == NULL)
879 		return (EJUSTRETURN);
880 
881 #ifdef DEBUG_MEMGUARD
882 	if (is_memguard_addr(addr)) {
883 		memguard_free(addr);
884 		return (EJUSTRETURN);
885 	}
886 #endif
887 
888 #ifdef DEBUG_REDZONE
889 	redzone_check(addr);
890 	*addrp = redzone_addr_ntor(addr);
891 #endif
892 
893 	return (0);
894 }
895 #endif
896 
897 /*
898  *	free:
899  *
900  *	Free a block of memory allocated by malloc.
901  *
902  *	This routine may not block.
903  */
904 void
905 free(void *addr, struct malloc_type *mtp)
906 {
907 	uma_zone_t zone;
908 	uma_slab_t slab;
909 	u_long size;
910 
911 #ifdef MALLOC_DEBUG
912 	if (free_dbg(&addr, mtp) != 0)
913 		return;
914 #endif
915 	/* free(NULL, ...) does nothing */
916 	if (addr == NULL)
917 		return;
918 
919 	vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
920 	if (slab == NULL)
921 		panic("free: address %p(%p) has not been allocated.\n",
922 		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
923 
924 	if (__predict_true(!malloc_large_slab(slab))) {
925 		size = zone->uz_size;
926 #if defined(INVARIANTS) && !defined(KASAN)
927 		free_save_type(addr, mtp, size);
928 #endif
929 		uma_zfree_arg(zone, addr, slab);
930 	} else {
931 		size = malloc_large_size(slab);
932 		free_large(addr, size);
933 	}
934 	malloc_type_freed(mtp, size);
935 }
936 
937 /*
938  *	zfree:
939  *
940  *	Zero then free a block of memory allocated by malloc.
941  *
942  *	This routine may not block.
943  */
944 void
945 zfree(void *addr, struct malloc_type *mtp)
946 {
947 	uma_zone_t zone;
948 	uma_slab_t slab;
949 	u_long size;
950 
951 #ifdef MALLOC_DEBUG
952 	if (free_dbg(&addr, mtp) != 0)
953 		return;
954 #endif
955 	/* free(NULL, ...) does nothing */
956 	if (addr == NULL)
957 		return;
958 
959 	vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
960 	if (slab == NULL)
961 		panic("free: address %p(%p) has not been allocated.\n",
962 		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
963 
964 	if (__predict_true(!malloc_large_slab(slab))) {
965 		size = zone->uz_size;
966 #if defined(INVARIANTS) && !defined(KASAN)
967 		free_save_type(addr, mtp, size);
968 #endif
969 		kasan_mark(addr, size, size, 0);
970 		explicit_bzero(addr, size);
971 		uma_zfree_arg(zone, addr, slab);
972 	} else {
973 		size = malloc_large_size(slab);
974 		kasan_mark(addr, size, size, 0);
975 		explicit_bzero(addr, size);
976 		free_large(addr, size);
977 	}
978 	malloc_type_freed(mtp, size);
979 }
980 
981 /*
982  *	realloc: change the size of a memory block
983  */
984 void *
985 realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
986 {
987 #ifndef DEBUG_REDZONE
988 	uma_zone_t zone;
989 	uma_slab_t slab;
990 #endif
991 	unsigned long alloc;
992 	void *newaddr;
993 
994 	KASSERT(mtp->ks_version == M_VERSION,
995 	    ("realloc: bad malloc type version"));
996 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
997 	    ("realloc: called with spinlock or critical section held"));
998 
999 	/* realloc(NULL, ...) is equivalent to malloc(...) */
1000 	if (addr == NULL)
1001 		return (malloc(size, mtp, flags));
1002 
1003 	/*
1004 	 * XXX: Should report free of old memory and alloc of new memory to
1005 	 * per-CPU stats.
1006 	 */
1007 
1008 #ifdef DEBUG_MEMGUARD
1009 	if (is_memguard_addr(addr))
1010 		return (memguard_realloc(addr, size, mtp, flags));
1011 #endif
1012 
1013 #ifdef DEBUG_REDZONE
1014 	alloc = redzone_get_size(addr);
1015 #else
1016 	vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
1017 
1018 	/* Sanity check */
1019 	KASSERT(slab != NULL,
1020 	    ("realloc: address %p out of range", (void *)addr));
1021 
1022 	/* Get the size of the original block */
1023 	if (!malloc_large_slab(slab))
1024 		alloc = zone->uz_size;
1025 	else
1026 		alloc = malloc_large_size(slab);
1027 
1028 	/* Reuse the original block if appropriate */
1029 	if (size <= alloc &&
1030 	    (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) {
1031 		kasan_mark((void *)addr, size, alloc, KASAN_MALLOC_REDZONE);
1032 		return (addr);
1033 	}
1034 #endif /* !DEBUG_REDZONE */
1035 
1036 	/* Allocate a new, bigger (or smaller) block */
1037 	if ((newaddr = malloc(size, mtp, flags)) == NULL)
1038 		return (NULL);
1039 
1040 	/*
1041 	 * Copy over original contents.  For KASAN, the redzone must be marked
1042 	 * valid before performing the copy.
1043 	 */
1044 	kasan_mark(addr, alloc, alloc, 0);
1045 	bcopy(addr, newaddr, min(size, alloc));
1046 	free(addr, mtp);
1047 	return (newaddr);
1048 }
1049 
1050 /*
1051  *	reallocf: same as realloc() but free memory on failure.
1052  */
1053 void *
1054 reallocf(void *addr, size_t size, struct malloc_type *mtp, int flags)
1055 {
1056 	void *mem;
1057 
1058 	if ((mem = realloc(addr, size, mtp, flags)) == NULL)
1059 		free(addr, mtp);
1060 	return (mem);
1061 }
1062 
1063 /*
1064  * 	malloc_size: returns the number of bytes allocated for a request of the
1065  * 		     specified size
1066  */
1067 size_t
1068 malloc_size(size_t size)
1069 {
1070 	int indx;
1071 
1072 	if (size > kmem_zmax)
1073 		return (0);
1074 	if (size & KMEM_ZMASK)
1075 		size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
1076 	indx = kmemsize[size >> KMEM_ZSHIFT];
1077 	return (kmemzones[indx].kz_size);
1078 }
1079 
1080 /*
1081  *	malloc_usable_size: returns the usable size of the allocation.
1082  */
1083 size_t
1084 malloc_usable_size(const void *addr)
1085 {
1086 #ifndef DEBUG_REDZONE
1087 	uma_zone_t zone;
1088 	uma_slab_t slab;
1089 #endif
1090 	u_long size;
1091 
1092 	if (addr == NULL)
1093 		return (0);
1094 
1095 #ifdef DEBUG_MEMGUARD
1096 	if (is_memguard_addr(__DECONST(void *, addr)))
1097 		return (memguard_get_req_size(addr));
1098 #endif
1099 
1100 #ifdef DEBUG_REDZONE
1101 	size = redzone_get_size(__DECONST(void *, addr));
1102 #else
1103 	vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
1104 	if (slab == NULL)
1105 		panic("malloc_usable_size: address %p(%p) is not allocated.\n",
1106 		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
1107 
1108 	if (!malloc_large_slab(slab))
1109 		size = zone->uz_size;
1110 	else
1111 		size = malloc_large_size(slab);
1112 #endif
1113 
1114 	/*
1115 	 * Unmark the redzone to avoid reports from consumers who are
1116 	 * (presumably) about to use the full allocation size.
1117 	 */
1118 	kasan_mark(addr, size, size, 0);
1119 
1120 	return (size);
1121 }
1122 
1123 CTASSERT(VM_KMEM_SIZE_SCALE >= 1);
1124 
1125 /*
1126  * Initialize the kernel memory (kmem) arena.
1127  */
1128 void
1129 kmeminit(void)
1130 {
1131 	u_long mem_size;
1132 	u_long tmp;
1133 
1134 #ifdef VM_KMEM_SIZE
1135 	if (vm_kmem_size == 0)
1136 		vm_kmem_size = VM_KMEM_SIZE;
1137 #endif
1138 #ifdef VM_KMEM_SIZE_MIN
1139 	if (vm_kmem_size_min == 0)
1140 		vm_kmem_size_min = VM_KMEM_SIZE_MIN;
1141 #endif
1142 #ifdef VM_KMEM_SIZE_MAX
1143 	if (vm_kmem_size_max == 0)
1144 		vm_kmem_size_max = VM_KMEM_SIZE_MAX;
1145 #endif
1146 	/*
1147 	 * Calculate the amount of kernel virtual address (KVA) space that is
1148 	 * preallocated to the kmem arena.  In order to support a wide range
1149 	 * of machines, it is a function of the physical memory size,
1150 	 * specifically,
1151 	 *
1152 	 *	min(max(physical memory size / VM_KMEM_SIZE_SCALE,
1153 	 *	    VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX)
1154 	 *
1155 	 * Every architecture must define an integral value for
1156 	 * VM_KMEM_SIZE_SCALE.  However, the definitions of VM_KMEM_SIZE_MIN
1157 	 * and VM_KMEM_SIZE_MAX, which represent respectively the floor and
1158 	 * ceiling on this preallocation, are optional.  Typically,
1159 	 * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on
1160 	 * a given architecture.
1161 	 */
1162 	mem_size = vm_cnt.v_page_count;
1163 	if (mem_size <= 32768) /* delphij XXX 128MB */
1164 		kmem_zmax = PAGE_SIZE;
1165 
1166 	if (vm_kmem_size_scale < 1)
1167 		vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
1168 
1169 	/*
1170 	 * Check if we should use defaults for the "vm_kmem_size"
1171 	 * variable:
1172 	 */
1173 	if (vm_kmem_size == 0) {
1174 		vm_kmem_size = mem_size / vm_kmem_size_scale;
1175 		vm_kmem_size = vm_kmem_size * PAGE_SIZE < vm_kmem_size ?
1176 		    vm_kmem_size_max : vm_kmem_size * PAGE_SIZE;
1177 		if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min)
1178 			vm_kmem_size = vm_kmem_size_min;
1179 		if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
1180 			vm_kmem_size = vm_kmem_size_max;
1181 	}
1182 	if (vm_kmem_size == 0)
1183 		panic("Tune VM_KMEM_SIZE_* for the platform");
1184 
1185 	/*
1186 	 * The amount of KVA space that is preallocated to the
1187 	 * kmem arena can be set statically at compile-time or manually
1188 	 * through the kernel environment.  However, it is still limited to
1189 	 * twice the physical memory size, which has been sufficient to handle
1190 	 * the most severe cases of external fragmentation in the kmem arena.
1191 	 */
1192 	if (vm_kmem_size / 2 / PAGE_SIZE > mem_size)
1193 		vm_kmem_size = 2 * mem_size * PAGE_SIZE;
1194 
1195 	vm_kmem_size = round_page(vm_kmem_size);
1196 
1197 	/*
1198 	 * With KASAN or KMSAN enabled, dynamically allocated kernel memory is
1199 	 * shadowed.  Account for this when setting the UMA limit.
1200 	 */
1201 #if defined(KASAN)
1202 	vm_kmem_size = (vm_kmem_size * KASAN_SHADOW_SCALE) /
1203 	    (KASAN_SHADOW_SCALE + 1);
1204 #elif defined(KMSAN)
1205 	vm_kmem_size /= 3;
1206 #endif
1207 
1208 #ifdef DEBUG_MEMGUARD
1209 	tmp = memguard_fudge(vm_kmem_size, kernel_map);
1210 #else
1211 	tmp = vm_kmem_size;
1212 #endif
1213 	uma_set_limit(tmp);
1214 
1215 #ifdef DEBUG_MEMGUARD
1216 	/*
1217 	 * Initialize MemGuard if support compiled in.  MemGuard is a
1218 	 * replacement allocator used for detecting tamper-after-free
1219 	 * scenarios as they occur.  It is only used for debugging.
1220 	 */
1221 	memguard_init(kernel_arena);
1222 #endif
1223 }
1224 
1225 /*
1226  * Initialize the kernel memory allocator
1227  */
1228 /* ARGSUSED*/
1229 static void
1230 mallocinit(void *dummy)
1231 {
1232 	int i;
1233 	uint8_t indx;
1234 
1235 	mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
1236 
1237 	kmeminit();
1238 
1239 	if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX)
1240 		kmem_zmax = KMEM_ZMAX;
1241 
1242 	for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
1243 		int size = kmemzones[indx].kz_size;
1244 		const char *name = kmemzones[indx].kz_name;
1245 		size_t align;
1246 		int subzone;
1247 
1248 		align = UMA_ALIGN_PTR;
1249 		if (powerof2(size) && size > sizeof(void *))
1250 			align = MIN(size, PAGE_SIZE) - 1;
1251 		for (subzone = 0; subzone < numzones; subzone++) {
1252 			kmemzones[indx].kz_zone[subzone] =
1253 			    uma_zcreate(name, size,
1254 #if defined(INVARIANTS) && !defined(KASAN) && !defined(KMSAN)
1255 			    mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
1256 #else
1257 			    NULL, NULL, NULL, NULL,
1258 #endif
1259 			    align, UMA_ZONE_MALLOC);
1260 		}
1261 		for (;i <= size; i+= KMEM_ZBASE)
1262 			kmemsize[i >> KMEM_ZSHIFT] = indx;
1263 	}
1264 }
1265 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL);
1266 
1267 void
1268 malloc_init(void *data)
1269 {
1270 	struct malloc_type_internal *mtip;
1271 	struct malloc_type *mtp;
1272 
1273 	KASSERT(vm_cnt.v_page_count != 0, ("malloc_register before vm_init"));
1274 
1275 	mtp = data;
1276 	if (mtp->ks_version != M_VERSION)
1277 		panic("malloc_init: type %s with unsupported version %lu",
1278 		    mtp->ks_shortdesc, mtp->ks_version);
1279 
1280 	mtip = &mtp->ks_mti;
1281 	mtip->mti_stats = uma_zalloc_pcpu(pcpu_zone_64, M_WAITOK | M_ZERO);
1282 	mtp_set_subzone(mtp);
1283 
1284 	mtx_lock(&malloc_mtx);
1285 	mtp->ks_next = kmemstatistics;
1286 	kmemstatistics = mtp;
1287 	kmemcount++;
1288 	mtx_unlock(&malloc_mtx);
1289 }
1290 
1291 void
1292 malloc_uninit(void *data)
1293 {
1294 	struct malloc_type_internal *mtip;
1295 	struct malloc_type_stats *mtsp;
1296 	struct malloc_type *mtp, *temp;
1297 	long temp_allocs, temp_bytes;
1298 	int i;
1299 
1300 	mtp = data;
1301 	KASSERT(mtp->ks_version == M_VERSION,
1302 	    ("malloc_uninit: bad malloc type version"));
1303 
1304 	mtx_lock(&malloc_mtx);
1305 	mtip = &mtp->ks_mti;
1306 	if (mtp != kmemstatistics) {
1307 		for (temp = kmemstatistics; temp != NULL;
1308 		    temp = temp->ks_next) {
1309 			if (temp->ks_next == mtp) {
1310 				temp->ks_next = mtp->ks_next;
1311 				break;
1312 			}
1313 		}
1314 		KASSERT(temp,
1315 		    ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc));
1316 	} else
1317 		kmemstatistics = mtp->ks_next;
1318 	kmemcount--;
1319 	mtx_unlock(&malloc_mtx);
1320 
1321 	/*
1322 	 * Look for memory leaks.
1323 	 */
1324 	temp_allocs = temp_bytes = 0;
1325 	for (i = 0; i <= mp_maxid; i++) {
1326 		mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1327 		temp_allocs += mtsp->mts_numallocs;
1328 		temp_allocs -= mtsp->mts_numfrees;
1329 		temp_bytes += mtsp->mts_memalloced;
1330 		temp_bytes -= mtsp->mts_memfreed;
1331 	}
1332 	if (temp_allocs > 0 || temp_bytes > 0) {
1333 		printf("Warning: memory type %s leaked memory on destroy "
1334 		    "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc,
1335 		    temp_allocs, temp_bytes);
1336 	}
1337 
1338 	uma_zfree_pcpu(pcpu_zone_64, mtip->mti_stats);
1339 }
1340 
1341 struct malloc_type *
1342 malloc_desc2type(const char *desc)
1343 {
1344 	struct malloc_type *mtp;
1345 
1346 	mtx_assert(&malloc_mtx, MA_OWNED);
1347 	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1348 		if (strcmp(mtp->ks_shortdesc, desc) == 0)
1349 			return (mtp);
1350 	}
1351 	return (NULL);
1352 }
1353 
1354 static int
1355 sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
1356 {
1357 	struct malloc_type_stream_header mtsh;
1358 	struct malloc_type_internal *mtip;
1359 	struct malloc_type_stats *mtsp, zeromts;
1360 	struct malloc_type_header mth;
1361 	struct malloc_type *mtp;
1362 	int error, i;
1363 	struct sbuf sbuf;
1364 
1365 	error = sysctl_wire_old_buffer(req, 0);
1366 	if (error != 0)
1367 		return (error);
1368 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
1369 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
1370 	mtx_lock(&malloc_mtx);
1371 
1372 	bzero(&zeromts, sizeof(zeromts));
1373 
1374 	/*
1375 	 * Insert stream header.
1376 	 */
1377 	bzero(&mtsh, sizeof(mtsh));
1378 	mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
1379 	mtsh.mtsh_maxcpus = MAXCPU;
1380 	mtsh.mtsh_count = kmemcount;
1381 	(void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh));
1382 
1383 	/*
1384 	 * Insert alternating sequence of type headers and type statistics.
1385 	 */
1386 	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1387 		mtip = &mtp->ks_mti;
1388 
1389 		/*
1390 		 * Insert type header.
1391 		 */
1392 		bzero(&mth, sizeof(mth));
1393 		strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
1394 		(void)sbuf_bcat(&sbuf, &mth, sizeof(mth));
1395 
1396 		/*
1397 		 * Insert type statistics for each CPU.
1398 		 */
1399 		for (i = 0; i <= mp_maxid; i++) {
1400 			mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1401 			(void)sbuf_bcat(&sbuf, mtsp, sizeof(*mtsp));
1402 		}
1403 		/*
1404 		 * Fill in the missing CPUs.
1405 		 */
1406 		for (; i < MAXCPU; i++) {
1407 			(void)sbuf_bcat(&sbuf, &zeromts, sizeof(zeromts));
1408 		}
1409 	}
1410 	mtx_unlock(&malloc_mtx);
1411 	error = sbuf_finish(&sbuf);
1412 	sbuf_delete(&sbuf);
1413 	return (error);
1414 }
1415 
1416 SYSCTL_PROC(_kern, OID_AUTO, malloc_stats,
1417     CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_MPSAFE, 0, 0,
1418     sysctl_kern_malloc_stats, "s,malloc_type_ustats",
1419     "Return malloc types");
1420 
1421 SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0,
1422     "Count of kernel malloc types");
1423 
1424 void
1425 malloc_type_list(malloc_type_list_func_t *func, void *arg)
1426 {
1427 	struct malloc_type *mtp, **bufmtp;
1428 	int count, i;
1429 	size_t buflen;
1430 
1431 	mtx_lock(&malloc_mtx);
1432 restart:
1433 	mtx_assert(&malloc_mtx, MA_OWNED);
1434 	count = kmemcount;
1435 	mtx_unlock(&malloc_mtx);
1436 
1437 	buflen = sizeof(struct malloc_type *) * count;
1438 	bufmtp = malloc(buflen, M_TEMP, M_WAITOK);
1439 
1440 	mtx_lock(&malloc_mtx);
1441 
1442 	if (count < kmemcount) {
1443 		free(bufmtp, M_TEMP);
1444 		goto restart;
1445 	}
1446 
1447 	for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++)
1448 		bufmtp[i] = mtp;
1449 
1450 	mtx_unlock(&malloc_mtx);
1451 
1452 	for (i = 0; i < count; i++)
1453 		(func)(bufmtp[i], arg);
1454 
1455 	free(bufmtp, M_TEMP);
1456 }
1457 
1458 #ifdef DDB
1459 static int64_t
1460 get_malloc_stats(const struct malloc_type_internal *mtip, uint64_t *allocs,
1461     uint64_t *inuse)
1462 {
1463 	const struct malloc_type_stats *mtsp;
1464 	uint64_t frees, alloced, freed;
1465 	int i;
1466 
1467 	*allocs = 0;
1468 	frees = 0;
1469 	alloced = 0;
1470 	freed = 0;
1471 	for (i = 0; i <= mp_maxid; i++) {
1472 		mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1473 
1474 		*allocs += mtsp->mts_numallocs;
1475 		frees += mtsp->mts_numfrees;
1476 		alloced += mtsp->mts_memalloced;
1477 		freed += mtsp->mts_memfreed;
1478 	}
1479 	*inuse = *allocs - frees;
1480 	return (alloced - freed);
1481 }
1482 
1483 DB_SHOW_COMMAND_FLAGS(malloc, db_show_malloc, DB_CMD_MEMSAFE)
1484 {
1485 	const char *fmt_hdr, *fmt_entry;
1486 	struct malloc_type *mtp;
1487 	uint64_t allocs, inuse;
1488 	int64_t size;
1489 	/* variables for sorting */
1490 	struct malloc_type *last_mtype, *cur_mtype;
1491 	int64_t cur_size, last_size;
1492 	int ties;
1493 
1494 	if (modif[0] == 'i') {
1495 		fmt_hdr = "%s,%s,%s,%s\n";
1496 		fmt_entry = "\"%s\",%ju,%jdK,%ju\n";
1497 	} else {
1498 		fmt_hdr = "%18s %12s  %12s %12s\n";
1499 		fmt_entry = "%18s %12ju %12jdK %12ju\n";
1500 	}
1501 
1502 	db_printf(fmt_hdr, "Type", "InUse", "MemUse", "Requests");
1503 
1504 	/* Select sort, largest size first. */
1505 	last_mtype = NULL;
1506 	last_size = INT64_MAX;
1507 	for (;;) {
1508 		cur_mtype = NULL;
1509 		cur_size = -1;
1510 		ties = 0;
1511 
1512 		for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1513 			/*
1514 			 * In the case of size ties, print out mtypes
1515 			 * in the order they are encountered.  That is,
1516 			 * when we encounter the most recently output
1517 			 * mtype, we have already printed all preceding
1518 			 * ties, and we must print all following ties.
1519 			 */
1520 			if (mtp == last_mtype) {
1521 				ties = 1;
1522 				continue;
1523 			}
1524 			size = get_malloc_stats(&mtp->ks_mti, &allocs,
1525 			    &inuse);
1526 			if (size > cur_size && size < last_size + ties) {
1527 				cur_size = size;
1528 				cur_mtype = mtp;
1529 			}
1530 		}
1531 		if (cur_mtype == NULL)
1532 			break;
1533 
1534 		size = get_malloc_stats(&cur_mtype->ks_mti, &allocs, &inuse);
1535 		db_printf(fmt_entry, cur_mtype->ks_shortdesc, inuse,
1536 		    howmany(size, 1024), allocs);
1537 
1538 		if (db_pager_quit)
1539 			break;
1540 
1541 		last_mtype = cur_mtype;
1542 		last_size = cur_size;
1543 	}
1544 }
1545 
1546 #if MALLOC_DEBUG_MAXZONES > 1
1547 DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches)
1548 {
1549 	struct malloc_type_internal *mtip;
1550 	struct malloc_type *mtp;
1551 	u_int subzone;
1552 
1553 	if (!have_addr) {
1554 		db_printf("Usage: show multizone_matches <malloc type/addr>\n");
1555 		return;
1556 	}
1557 	mtp = (void *)addr;
1558 	if (mtp->ks_version != M_VERSION) {
1559 		db_printf("Version %lx does not match expected %x\n",
1560 		    mtp->ks_version, M_VERSION);
1561 		return;
1562 	}
1563 
1564 	mtip = &mtp->ks_mti;
1565 	subzone = mtip->mti_zone;
1566 
1567 	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1568 		mtip = &mtp->ks_mti;
1569 		if (mtip->mti_zone != subzone)
1570 			continue;
1571 		db_printf("%s\n", mtp->ks_shortdesc);
1572 		if (db_pager_quit)
1573 			break;
1574 	}
1575 }
1576 #endif /* MALLOC_DEBUG_MAXZONES > 1 */
1577 #endif /* DDB */
1578