1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1987, 1991, 1993
5 * The Regents of the University of California.
6 * Copyright (c) 2005-2009 Robert N. M. Watson
7 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> (mallocarray)
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 /*
36 * Kernel malloc(9) implementation -- general purpose kernel memory allocator
37 * based on memory types. Back end is implemented using the UMA(9) zone
38 * allocator. A set of fixed-size buckets are used for smaller allocations,
39 * and a special UMA allocation interface is used for larger allocations.
40 * Callers declare memory types, and statistics are maintained independently
41 * for each memory type. Statistics are maintained per-CPU for performance
42 * reasons. See malloc(9) and comments in malloc.h for a detailed
43 * description.
44 */
45
46 #include <sys/cdefs.h>
47 #include "opt_ddb.h"
48 #include "opt_vm.h"
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/asan.h>
53 #include <sys/kdb.h>
54 #include <sys/kernel.h>
55 #include <sys/lock.h>
56 #include <sys/malloc.h>
57 #include <sys/msan.h>
58 #include <sys/mutex.h>
59 #include <sys/vmmeter.h>
60 #include <sys/proc.h>
61 #include <sys/queue.h>
62 #include <sys/sbuf.h>
63 #include <sys/smp.h>
64 #include <sys/sysctl.h>
65 #include <sys/time.h>
66 #include <sys/vmem.h>
67 #ifdef EPOCH_TRACE
68 #include <sys/epoch.h>
69 #endif
70
71 #include <vm/vm.h>
72 #include <vm/pmap.h>
73 #include <vm/vm_domainset.h>
74 #include <vm/vm_pageout.h>
75 #include <vm/vm_param.h>
76 #include <vm/vm_kern.h>
77 #include <vm/vm_extern.h>
78 #include <vm/vm_map.h>
79 #include <vm/vm_page.h>
80 #include <vm/vm_phys.h>
81 #include <vm/vm_pagequeue.h>
82 #include <vm/uma.h>
83 #include <vm/uma_int.h>
84 #include <vm/uma_dbg.h>
85
86 #ifdef DEBUG_MEMGUARD
87 #include <vm/memguard.h>
88 #endif
89 #ifdef DEBUG_REDZONE
90 #include <vm/redzone.h>
91 #endif
92
93 #if defined(INVARIANTS) && defined(__i386__)
94 #include <machine/cpu.h>
95 #endif
96
97 #include <ddb/ddb.h>
98
99 #ifdef KDTRACE_HOOKS
100 #include <sys/dtrace_bsd.h>
101
102 bool __read_frequently dtrace_malloc_enabled;
103 dtrace_malloc_probe_func_t __read_mostly dtrace_malloc_probe;
104 #endif
105
106 #if defined(INVARIANTS) || defined(MALLOC_MAKE_FAILURES) || \
107 defined(DEBUG_MEMGUARD) || defined(DEBUG_REDZONE)
108 #define MALLOC_DEBUG 1
109 #endif
110
111 #if defined(KASAN) || defined(DEBUG_REDZONE)
112 #define DEBUG_REDZONE_ARG_DEF , unsigned long osize
113 #define DEBUG_REDZONE_ARG , osize
114 #else
115 #define DEBUG_REDZONE_ARG_DEF
116 #define DEBUG_REDZONE_ARG
117 #endif
118
119 typedef enum {
120 SLAB_COOKIE_SLAB_PTR = 0x0,
121 SLAB_COOKIE_MALLOC_LARGE = 0x1,
122 SLAB_COOKIE_CONTIG_MALLOC = 0x2,
123 } slab_cookie_t;
124 #define SLAB_COOKIE_MASK 0x3
125 #define SLAB_COOKIE_SHIFT 2
126 #define GET_SLAB_COOKIE(_slab) \
127 ((slab_cookie_t)(uintptr_t)(_slab) & SLAB_COOKIE_MASK)
128
129 /*
130 * When realloc() is called, if the new size is sufficiently smaller than
131 * the old size, realloc() will allocate a new, smaller block to avoid
132 * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
133 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
134 */
135 #ifndef REALLOC_FRACTION
136 #define REALLOC_FRACTION 1 /* new block if <= half the size */
137 #endif
138
139 /*
140 * Centrally define some common malloc types.
141 */
142 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
143 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
144 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
145
146 static struct malloc_type *kmemstatistics;
147 static int kmemcount;
148
149 #define KMEM_ZSHIFT 4
150 #define KMEM_ZBASE 16
151 #define KMEM_ZMASK (KMEM_ZBASE - 1)
152
153 #define KMEM_ZMAX 65536
154 #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT)
155 static uint8_t kmemsize[KMEM_ZSIZE + 1];
156
157 #ifndef MALLOC_DEBUG_MAXZONES
158 #define MALLOC_DEBUG_MAXZONES 1
159 #endif
160 static int numzones = MALLOC_DEBUG_MAXZONES;
161
162 /*
163 * Small malloc(9) memory allocations are allocated from a set of UMA buckets
164 * of various sizes.
165 *
166 * Warning: the layout of the struct is duplicated in libmemstat for KVM support.
167 *
168 * XXX: The comment here used to read "These won't be powers of two for
169 * long." It's possible that a significant amount of wasted memory could be
170 * recovered by tuning the sizes of these buckets.
171 */
172 struct {
173 int kz_size;
174 const char *kz_name;
175 uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES];
176 } kmemzones[] = {
177 {16, "malloc-16", },
178 {32, "malloc-32", },
179 {64, "malloc-64", },
180 {128, "malloc-128", },
181 {256, "malloc-256", },
182 {384, "malloc-384", },
183 {512, "malloc-512", },
184 {1024, "malloc-1024", },
185 {2048, "malloc-2048", },
186 {4096, "malloc-4096", },
187 {8192, "malloc-8192", },
188 {16384, "malloc-16384", },
189 {32768, "malloc-32768", },
190 {65536, "malloc-65536", },
191 {0, NULL},
192 };
193
194 u_long vm_kmem_size;
195 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0,
196 "Size of kernel memory");
197
198 static u_long kmem_zmax = KMEM_ZMAX;
199 SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0,
200 "Maximum allocation size that malloc(9) would use UMA as backend");
201
202 static u_long vm_kmem_size_min;
203 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0,
204 "Minimum size of kernel memory");
205
206 static u_long vm_kmem_size_max;
207 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0,
208 "Maximum size of kernel memory");
209
210 static u_int vm_kmem_size_scale;
211 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0,
212 "Scale factor for kernel memory size");
213
214 static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
215 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
216 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
217 sysctl_kmem_map_size, "LU", "Current kmem allocation size");
218
219 static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
220 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
221 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
222 sysctl_kmem_map_free, "LU", "Free space in kmem");
223
224 static SYSCTL_NODE(_vm, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
225 "Malloc information");
226
227 static u_int vm_malloc_zone_count = nitems(kmemzones);
228 SYSCTL_UINT(_vm_malloc, OID_AUTO, zone_count,
229 CTLFLAG_RD, &vm_malloc_zone_count, 0,
230 "Number of malloc zones");
231
232 static int sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS);
233 SYSCTL_PROC(_vm_malloc, OID_AUTO, zone_sizes,
234 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, NULL, 0,
235 sysctl_vm_malloc_zone_sizes, "S", "Zone sizes used by malloc");
236
237 /*
238 * The malloc_mtx protects the kmemstatistics linked list.
239 */
240 struct mtx malloc_mtx;
241
242 static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS);
243
244 #if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1)
245 static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
246 "Kernel malloc debugging options");
247 #endif
248
249 /*
250 * malloc(9) fault injection -- cause malloc failures every (n) mallocs when
251 * the caller specifies M_NOWAIT. If set to 0, no failures are caused.
252 */
253 #ifdef MALLOC_MAKE_FAILURES
254 static int malloc_failure_rate;
255 static int malloc_nowait_count;
256 static int malloc_failure_count;
257 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN,
258 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
259 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
260 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
261 #endif
262
263 static int
sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)264 sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
265 {
266 u_long size;
267
268 size = uma_size();
269 return (sysctl_handle_long(oidp, &size, 0, req));
270 }
271
272 static int
sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)273 sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
274 {
275 u_long size, limit;
276
277 /* The sysctl is unsigned, implement as a saturation value. */
278 size = uma_size();
279 limit = uma_limit();
280 if (size > limit)
281 size = 0;
282 else
283 size = limit - size;
284 return (sysctl_handle_long(oidp, &size, 0, req));
285 }
286
287 static int
sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS)288 sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS)
289 {
290 int sizes[nitems(kmemzones)];
291 int i;
292
293 for (i = 0; i < nitems(kmemzones); i++) {
294 sizes[i] = kmemzones[i].kz_size;
295 }
296
297 return (SYSCTL_OUT(req, &sizes, sizeof(sizes)));
298 }
299
300 /*
301 * malloc(9) uma zone separation -- sub-page buffer overruns in one
302 * malloc type will affect only a subset of other malloc types.
303 */
304 #if MALLOC_DEBUG_MAXZONES > 1
305 static void
tunable_set_numzones(void)306 tunable_set_numzones(void)
307 {
308
309 TUNABLE_INT_FETCH("debug.malloc.numzones",
310 &numzones);
311
312 /* Sanity check the number of malloc uma zones. */
313 if (numzones <= 0)
314 numzones = 1;
315 if (numzones > MALLOC_DEBUG_MAXZONES)
316 numzones = MALLOC_DEBUG_MAXZONES;
317 }
318 SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL);
319 SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
320 &numzones, 0, "Number of malloc uma subzones");
321
322 /*
323 * Any number that changes regularly is an okay choice for the
324 * offset. Build numbers are pretty good of you have them.
325 */
326 static u_int zone_offset = __FreeBSD_version;
327 TUNABLE_INT("debug.malloc.zone_offset", &zone_offset);
328 SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN,
329 &zone_offset, 0, "Separate malloc types by examining the "
330 "Nth character in the malloc type short description.");
331
332 static void
mtp_set_subzone(struct malloc_type * mtp)333 mtp_set_subzone(struct malloc_type *mtp)
334 {
335 struct malloc_type_internal *mtip;
336 const char *desc;
337 size_t len;
338 u_int val;
339
340 mtip = &mtp->ks_mti;
341 desc = mtp->ks_shortdesc;
342 if (desc == NULL || (len = strlen(desc)) == 0)
343 val = 0;
344 else
345 val = desc[zone_offset % len];
346 mtip->mti_zone = (val % numzones);
347 }
348
349 static inline u_int
mtp_get_subzone(struct malloc_type * mtp)350 mtp_get_subzone(struct malloc_type *mtp)
351 {
352 struct malloc_type_internal *mtip;
353
354 mtip = &mtp->ks_mti;
355
356 KASSERT(mtip->mti_zone < numzones,
357 ("mti_zone %u out of range %d",
358 mtip->mti_zone, numzones));
359 return (mtip->mti_zone);
360 }
361 #elif MALLOC_DEBUG_MAXZONES == 0
362 #error "MALLOC_DEBUG_MAXZONES must be positive."
363 #else
364 static void
mtp_set_subzone(struct malloc_type * mtp)365 mtp_set_subzone(struct malloc_type *mtp)
366 {
367 struct malloc_type_internal *mtip;
368
369 mtip = &mtp->ks_mti;
370 mtip->mti_zone = 0;
371 }
372
373 static inline u_int
mtp_get_subzone(struct malloc_type * mtp)374 mtp_get_subzone(struct malloc_type *mtp)
375 {
376
377 return (0);
378 }
379 #endif /* MALLOC_DEBUG_MAXZONES > 1 */
380
381 /*
382 * An allocation has succeeded -- update malloc type statistics for the
383 * amount of bucket size. Occurs within a critical section so that the
384 * thread isn't preempted and doesn't migrate while updating per-PCU
385 * statistics.
386 */
387 static void
malloc_type_zone_allocated(struct malloc_type * mtp,unsigned long size,int zindx)388 malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
389 int zindx)
390 {
391 struct malloc_type_internal *mtip;
392 struct malloc_type_stats *mtsp;
393
394 critical_enter();
395 mtip = &mtp->ks_mti;
396 mtsp = zpcpu_get(mtip->mti_stats);
397 if (size > 0) {
398 mtsp->mts_memalloced += size;
399 mtsp->mts_numallocs++;
400 }
401 if (zindx != -1)
402 mtsp->mts_size |= 1 << zindx;
403
404 #ifdef KDTRACE_HOOKS
405 if (__predict_false(dtrace_malloc_enabled)) {
406 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC];
407 if (probe_id != 0)
408 (dtrace_malloc_probe)(probe_id,
409 (uintptr_t) mtp, (uintptr_t) mtip,
410 (uintptr_t) mtsp, size, zindx);
411 }
412 #endif
413
414 critical_exit();
415 }
416
417 void
malloc_type_allocated(struct malloc_type * mtp,unsigned long size)418 malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
419 {
420
421 if (size > 0)
422 malloc_type_zone_allocated(mtp, size, -1);
423 }
424
425 /*
426 * A free operation has occurred -- update malloc type statistics for the
427 * amount of the bucket size. Occurs within a critical section so that the
428 * thread isn't preempted and doesn't migrate while updating per-CPU
429 * statistics.
430 */
431 void
malloc_type_freed(struct malloc_type * mtp,unsigned long size)432 malloc_type_freed(struct malloc_type *mtp, unsigned long size)
433 {
434 struct malloc_type_internal *mtip;
435 struct malloc_type_stats *mtsp;
436
437 critical_enter();
438 mtip = &mtp->ks_mti;
439 mtsp = zpcpu_get(mtip->mti_stats);
440 mtsp->mts_memfreed += size;
441 mtsp->mts_numfrees++;
442
443 #ifdef KDTRACE_HOOKS
444 if (__predict_false(dtrace_malloc_enabled)) {
445 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE];
446 if (probe_id != 0)
447 (dtrace_malloc_probe)(probe_id,
448 (uintptr_t) mtp, (uintptr_t) mtip,
449 (uintptr_t) mtsp, size, 0);
450 }
451 #endif
452
453 critical_exit();
454 }
455
456 /*
457 * contigmalloc:
458 *
459 * Allocate a block of physically contiguous memory.
460 *
461 * If M_NOWAIT is set, this routine will not block and return NULL if
462 * the allocation fails.
463 */
464 #define IS_CONTIG_MALLOC(_slab) \
465 (GET_SLAB_COOKIE(_slab) == SLAB_COOKIE_CONTIG_MALLOC)
466 #define CONTIG_MALLOC_SLAB(_size) \
467 ((void *)(((_size) << SLAB_COOKIE_SHIFT) | SLAB_COOKIE_CONTIG_MALLOC))
468 static inline size_t
contigmalloc_size(uma_slab_t slab)469 contigmalloc_size(uma_slab_t slab)
470 {
471 uintptr_t va;
472
473 KASSERT(IS_CONTIG_MALLOC(slab),
474 ("%s: called on non-contigmalloc allocation: %p", __func__, slab));
475 va = (uintptr_t)slab;
476 return (va >> SLAB_COOKIE_SHIFT);
477 }
478
479 void *
contigmalloc(unsigned long size,struct malloc_type * type,int flags,vm_paddr_t low,vm_paddr_t high,unsigned long alignment,vm_paddr_t boundary)480 contigmalloc(unsigned long size, struct malloc_type *type, int flags,
481 vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
482 vm_paddr_t boundary)
483 {
484 void *ret;
485
486 ret = (void *)kmem_alloc_contig(size, flags, low, high, alignment,
487 boundary, VM_MEMATTR_DEFAULT);
488 if (ret != NULL) {
489 /* Use low bits unused for slab pointers. */
490 vsetzoneslab((uintptr_t)ret, NULL, CONTIG_MALLOC_SLAB(size));
491 malloc_type_allocated(type, round_page(size));
492 }
493 return (ret);
494 }
495
496 void *
contigmalloc_domainset(unsigned long size,struct malloc_type * type,struct domainset * ds,int flags,vm_paddr_t low,vm_paddr_t high,unsigned long alignment,vm_paddr_t boundary)497 contigmalloc_domainset(unsigned long size, struct malloc_type *type,
498 struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high,
499 unsigned long alignment, vm_paddr_t boundary)
500 {
501 void *ret;
502
503 ret = (void *)kmem_alloc_contig_domainset(ds, size, flags, low, high,
504 alignment, boundary, VM_MEMATTR_DEFAULT);
505 if (ret != NULL) {
506 /* Use low bits unused for slab pointers. */
507 vsetzoneslab((uintptr_t)ret, NULL, CONTIG_MALLOC_SLAB(size));
508 malloc_type_allocated(type, round_page(size));
509 }
510 return (ret);
511 }
512 #undef IS_CONTIG_MALLOC
513 #undef CONTIG_MALLOC_SLAB
514
515 /* contigfree(9) is deprecated. */
516 void
contigfree(void * addr,unsigned long size __unused,struct malloc_type * type)517 contigfree(void *addr, unsigned long size __unused, struct malloc_type *type)
518 {
519 free(addr, type);
520 }
521
522 #ifdef MALLOC_DEBUG
523 static int
malloc_dbg(caddr_t * vap,size_t * sizep,struct malloc_type * mtp,int flags)524 malloc_dbg(caddr_t *vap, size_t *sizep, struct malloc_type *mtp,
525 int flags)
526 {
527 KASSERT(mtp->ks_version == M_VERSION, ("malloc: bad malloc type version"));
528 KASSERT((flags & (M_WAITOK | M_NOWAIT)) != 0,
529 ("malloc: flags must include either M_WAITOK or M_NOWAIT"));
530 KASSERT((flags & (M_WAITOK | M_NOWAIT)) != (M_WAITOK | M_NOWAIT),
531 ("malloc: flags may not include both M_WAITOK and M_NOWAIT"));
532 KASSERT((flags & M_NEVERFREED) == 0,
533 ("malloc: M_NEVERFREED is for internal use only"));
534 #ifdef MALLOC_MAKE_FAILURES
535 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
536 atomic_add_int(&malloc_nowait_count, 1);
537 if ((malloc_nowait_count % malloc_failure_rate) == 0) {
538 atomic_add_int(&malloc_failure_count, 1);
539 *vap = NULL;
540 return (EJUSTRETURN);
541 }
542 }
543 #endif
544 if (flags & M_WAITOK) {
545 KASSERT(curthread->td_intr_nesting_level == 0,
546 ("malloc(M_WAITOK) in interrupt context"));
547 if (__predict_false(!THREAD_CAN_SLEEP())) {
548 #ifdef EPOCH_TRACE
549 epoch_trace_list(curthread);
550 #endif
551 KASSERT(0,
552 ("malloc(M_WAITOK) with sleeping prohibited"));
553 }
554 }
555 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
556 ("malloc: called with spinlock or critical section held"));
557
558 #ifdef DEBUG_MEMGUARD
559 if (memguard_cmp_mtp(mtp, *sizep)) {
560 *vap = memguard_alloc(*sizep, flags);
561 if (*vap != NULL)
562 return (EJUSTRETURN);
563 /* This is unfortunate but should not be fatal. */
564 }
565 #endif
566
567 #ifdef DEBUG_REDZONE
568 *sizep = redzone_size_ntor(*sizep);
569 #endif
570
571 return (0);
572 }
573 #endif
574
575 /*
576 * Handle large allocations and frees by using kmem_malloc directly.
577 */
578 #define IS_MALLOC_LARGE(_slab) \
579 (GET_SLAB_COOKIE(_slab) == SLAB_COOKIE_MALLOC_LARGE)
580 #define MALLOC_LARGE_SLAB(_size) \
581 ((void *)(((_size) << SLAB_COOKIE_SHIFT) | SLAB_COOKIE_MALLOC_LARGE))
582 static inline size_t
malloc_large_size(uma_slab_t slab)583 malloc_large_size(uma_slab_t slab)
584 {
585 uintptr_t va;
586
587 va = (uintptr_t)slab;
588 KASSERT(IS_MALLOC_LARGE(slab),
589 ("%s: called on non-malloc_large allocation: %p", __func__, slab));
590 return (va >> SLAB_COOKIE_SHIFT);
591 }
592
593 static caddr_t __noinline
malloc_large(size_t size,struct malloc_type * mtp,struct domainset * policy,int flags DEBUG_REDZONE_ARG_DEF)594 malloc_large(size_t size, struct malloc_type *mtp, struct domainset *policy,
595 int flags DEBUG_REDZONE_ARG_DEF)
596 {
597 void *va;
598
599 size = roundup(size, PAGE_SIZE);
600 va = kmem_malloc_domainset(policy, size, flags);
601 if (va != NULL) {
602 /* Use low bits unused for slab pointers. */
603 vsetzoneslab((uintptr_t)va, NULL, MALLOC_LARGE_SLAB(size));
604 uma_total_inc(size);
605 }
606 malloc_type_allocated(mtp, va == NULL ? 0 : size);
607 if (__predict_false(va == NULL)) {
608 KASSERT((flags & M_WAITOK) == 0,
609 ("malloc(M_WAITOK) returned NULL"));
610 } else {
611 #ifdef DEBUG_REDZONE
612 va = redzone_setup(va, osize);
613 #endif
614 kasan_mark(va, osize, size, KASAN_MALLOC_REDZONE);
615 }
616 return (va);
617 }
618
619 static void
free_large(void * addr,size_t size)620 free_large(void *addr, size_t size)
621 {
622
623 kmem_free(addr, size);
624 uma_total_dec(size);
625 }
626 #undef IS_MALLOC_LARGE
627 #undef MALLOC_LARGE_SLAB
628
629 /*
630 * malloc:
631 *
632 * Allocate a block of memory.
633 *
634 * If M_NOWAIT is set, this routine will not block and return NULL if
635 * the allocation fails.
636 */
637 void *
638 (malloc)(size_t size, struct malloc_type *mtp, int flags)
639 {
640 int indx;
641 caddr_t va;
642 uma_zone_t zone;
643 #if defined(DEBUG_REDZONE) || defined(KASAN)
644 unsigned long osize = size;
645 #endif
646
647 MPASS((flags & M_EXEC) == 0);
648
649 #ifdef MALLOC_DEBUG
650 va = NULL;
651 if (malloc_dbg(&va, &size, mtp, flags) != 0)
652 return (va);
653 #endif
654
655 if (__predict_false(size > kmem_zmax))
656 return (malloc_large(size, mtp, DOMAINSET_RR(), flags
657 DEBUG_REDZONE_ARG));
658
659 if (size & KMEM_ZMASK)
660 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
661 indx = kmemsize[size >> KMEM_ZSHIFT];
662 zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
663 va = uma_zalloc_arg(zone, zone, flags);
664 if (va != NULL) {
665 size = zone->uz_size;
666 if ((flags & M_ZERO) == 0) {
667 kmsan_mark(va, size, KMSAN_STATE_UNINIT);
668 kmsan_orig(va, size, KMSAN_TYPE_MALLOC, KMSAN_RET_ADDR);
669 }
670 }
671 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
672 if (__predict_false(va == NULL)) {
673 KASSERT((flags & M_WAITOK) == 0,
674 ("malloc(M_WAITOK) returned NULL"));
675 }
676 #ifdef DEBUG_REDZONE
677 if (va != NULL)
678 va = redzone_setup(va, osize);
679 #endif
680 #ifdef KASAN
681 if (va != NULL)
682 kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE);
683 #endif
684 return ((void *) va);
685 }
686
687 static void *
malloc_domain(size_t * sizep,int * indxp,struct malloc_type * mtp,int domain,int flags)688 malloc_domain(size_t *sizep, int *indxp, struct malloc_type *mtp, int domain,
689 int flags)
690 {
691 uma_zone_t zone;
692 caddr_t va;
693 size_t size;
694 int indx;
695
696 size = *sizep;
697 KASSERT(size <= kmem_zmax && (flags & M_EXEC) == 0,
698 ("malloc_domain: Called with bad flag / size combination"));
699 if (size & KMEM_ZMASK)
700 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
701 indx = kmemsize[size >> KMEM_ZSHIFT];
702 zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
703 va = uma_zalloc_domain(zone, zone, domain, flags);
704 if (va != NULL)
705 *sizep = zone->uz_size;
706 *indxp = indx;
707 return ((void *)va);
708 }
709
710 void *
malloc_domainset(size_t size,struct malloc_type * mtp,struct domainset * ds,int flags)711 malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds,
712 int flags)
713 {
714 struct vm_domainset_iter di;
715 caddr_t va;
716 int domain;
717 int indx;
718 #if defined(KASAN) || defined(DEBUG_REDZONE)
719 unsigned long osize = size;
720 #endif
721
722 MPASS((flags & M_EXEC) == 0);
723
724 #ifdef MALLOC_DEBUG
725 va = NULL;
726 if (malloc_dbg(&va, &size, mtp, flags) != 0)
727 return (va);
728 #endif
729
730 if (__predict_false(size > kmem_zmax))
731 return (malloc_large(size, mtp, DOMAINSET_RR(), flags
732 DEBUG_REDZONE_ARG));
733
734 vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
735 do {
736 va = malloc_domain(&size, &indx, mtp, domain, flags);
737 } while (va == NULL && vm_domainset_iter_policy(&di, &domain) == 0);
738 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
739 if (__predict_false(va == NULL)) {
740 KASSERT((flags & M_WAITOK) == 0,
741 ("malloc(M_WAITOK) returned NULL"));
742 }
743 #ifdef DEBUG_REDZONE
744 if (va != NULL)
745 va = redzone_setup(va, osize);
746 #endif
747 #ifdef KASAN
748 if (va != NULL)
749 kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE);
750 #endif
751 #ifdef KMSAN
752 if ((flags & M_ZERO) == 0) {
753 kmsan_mark(va, size, KMSAN_STATE_UNINIT);
754 kmsan_orig(va, size, KMSAN_TYPE_MALLOC, KMSAN_RET_ADDR);
755 }
756 #endif
757 return (va);
758 }
759
760 /*
761 * Allocate an executable area.
762 */
763 void *
malloc_exec(size_t size,struct malloc_type * mtp,int flags)764 malloc_exec(size_t size, struct malloc_type *mtp, int flags)
765 {
766
767 return (malloc_domainset_exec(size, mtp, DOMAINSET_RR(), flags));
768 }
769
770 void *
malloc_domainset_exec(size_t size,struct malloc_type * mtp,struct domainset * ds,int flags)771 malloc_domainset_exec(size_t size, struct malloc_type *mtp, struct domainset *ds,
772 int flags)
773 {
774 #if defined(DEBUG_REDZONE) || defined(KASAN)
775 unsigned long osize = size;
776 #endif
777 #ifdef MALLOC_DEBUG
778 caddr_t va;
779 #endif
780
781 flags |= M_EXEC;
782
783 #ifdef MALLOC_DEBUG
784 va = NULL;
785 if (malloc_dbg(&va, &size, mtp, flags) != 0)
786 return (va);
787 #endif
788
789 return (malloc_large(size, mtp, ds, flags DEBUG_REDZONE_ARG));
790 }
791
792 void *
malloc_aligned(size_t size,size_t align,struct malloc_type * type,int flags)793 malloc_aligned(size_t size, size_t align, struct malloc_type *type, int flags)
794 {
795 return (malloc_domainset_aligned(size, align, type, DOMAINSET_RR(),
796 flags));
797 }
798
799 void *
malloc_domainset_aligned(size_t size,size_t align,struct malloc_type * mtp,struct domainset * ds,int flags)800 malloc_domainset_aligned(size_t size, size_t align,
801 struct malloc_type *mtp, struct domainset *ds, int flags)
802 {
803 void *res;
804 size_t asize;
805
806 KASSERT(powerof2(align),
807 ("malloc_domainset_aligned: wrong align %#zx size %#zx",
808 align, size));
809 KASSERT(align <= PAGE_SIZE,
810 ("malloc_domainset_aligned: align %#zx (size %#zx) too large",
811 align, size));
812
813 /*
814 * Round the allocation size up to the next power of 2,
815 * because we can only guarantee alignment for
816 * power-of-2-sized allocations. Further increase the
817 * allocation size to align if the rounded size is less than
818 * align, since malloc zones provide alignment equal to their
819 * size.
820 */
821 if (size == 0)
822 size = 1;
823 asize = size <= align ? align : 1UL << flsl(size - 1);
824
825 res = malloc_domainset(asize, mtp, ds, flags);
826 KASSERT(res == NULL || ((uintptr_t)res & (align - 1)) == 0,
827 ("malloc_domainset_aligned: result not aligned %p size %#zx "
828 "allocsize %#zx align %#zx", res, size, asize, align));
829 return (res);
830 }
831
832 void *
mallocarray(size_t nmemb,size_t size,struct malloc_type * type,int flags)833 mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags)
834 {
835
836 if (WOULD_OVERFLOW(nmemb, size))
837 panic("mallocarray: %zu * %zu overflowed", nmemb, size);
838
839 return (malloc(size * nmemb, type, flags));
840 }
841
842 void *
mallocarray_domainset(size_t nmemb,size_t size,struct malloc_type * type,struct domainset * ds,int flags)843 mallocarray_domainset(size_t nmemb, size_t size, struct malloc_type *type,
844 struct domainset *ds, int flags)
845 {
846
847 if (WOULD_OVERFLOW(nmemb, size))
848 panic("mallocarray_domainset: %zu * %zu overflowed", nmemb, size);
849
850 return (malloc_domainset(size * nmemb, type, ds, flags));
851 }
852
853 #if defined(INVARIANTS) && !defined(KASAN)
854 static void
free_save_type(void * addr,struct malloc_type * mtp,u_long size)855 free_save_type(void *addr, struct malloc_type *mtp, u_long size)
856 {
857 struct malloc_type **mtpp = addr;
858
859 /*
860 * Cache a pointer to the malloc_type that most recently freed
861 * this memory here. This way we know who is most likely to
862 * have stepped on it later.
863 *
864 * This code assumes that size is a multiple of 8 bytes for
865 * 64 bit machines
866 */
867 mtpp = (struct malloc_type **) ((unsigned long)mtpp & ~UMA_ALIGN_PTR);
868 mtpp += (size - sizeof(struct malloc_type *)) /
869 sizeof(struct malloc_type *);
870 *mtpp = mtp;
871 }
872 #endif
873
874 #ifdef MALLOC_DEBUG
875 static int
free_dbg(void ** addrp,struct malloc_type * mtp)876 free_dbg(void **addrp, struct malloc_type *mtp)
877 {
878 void *addr;
879
880 addr = *addrp;
881 KASSERT(mtp->ks_version == M_VERSION, ("free: bad malloc type version"));
882 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
883 ("free: called with spinlock or critical section held"));
884
885 /* free(NULL, ...) does nothing */
886 if (addr == NULL)
887 return (EJUSTRETURN);
888
889 #ifdef DEBUG_MEMGUARD
890 if (is_memguard_addr(addr)) {
891 memguard_free(addr);
892 return (EJUSTRETURN);
893 }
894 #endif
895
896 #ifdef DEBUG_REDZONE
897 redzone_check(addr);
898 *addrp = redzone_addr_ntor(addr);
899 #endif
900
901 return (0);
902 }
903 #endif
904
905 static __always_inline void
_free(void * addr,struct malloc_type * mtp,bool dozero)906 _free(void *addr, struct malloc_type *mtp, bool dozero)
907 {
908 uma_zone_t zone;
909 uma_slab_t slab;
910 u_long size;
911
912 #ifdef MALLOC_DEBUG
913 if (free_dbg(&addr, mtp) != 0)
914 return;
915 #endif
916 /* free(NULL, ...) does nothing */
917 if (addr == NULL)
918 return;
919
920 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
921 if (slab == NULL)
922 panic("%s(%d): address %p(%p) has not been allocated", __func__,
923 dozero, addr, (void *)((uintptr_t)addr & (~UMA_SLAB_MASK)));
924
925 switch (GET_SLAB_COOKIE(slab)) {
926 case __predict_true(SLAB_COOKIE_SLAB_PTR):
927 size = zone->uz_size;
928 #if defined(INVARIANTS) && !defined(KASAN)
929 free_save_type(addr, mtp, size);
930 #endif
931 if (dozero) {
932 kasan_mark(addr, size, size, 0);
933 explicit_bzero(addr, size);
934 }
935 uma_zfree_arg(zone, addr, slab);
936 break;
937 case SLAB_COOKIE_MALLOC_LARGE:
938 size = malloc_large_size(slab);
939 if (dozero) {
940 kasan_mark(addr, size, size, 0);
941 explicit_bzero(addr, size);
942 }
943 free_large(addr, size);
944 break;
945 case SLAB_COOKIE_CONTIG_MALLOC:
946 size = round_page(contigmalloc_size(slab));
947 if (dozero)
948 explicit_bzero(addr, size);
949 kmem_free(addr, size);
950 break;
951 default:
952 panic("%s(%d): addr %p slab %p with unknown cookie %d",
953 __func__, dozero, addr, slab, GET_SLAB_COOKIE(slab));
954 /* NOTREACHED */
955 }
956 malloc_type_freed(mtp, size);
957 }
958
959 /*
960 * free:
961 * Free a block of memory allocated by malloc/contigmalloc.
962 * This routine may not block.
963 */
964 void
free(void * addr,struct malloc_type * mtp)965 free(void *addr, struct malloc_type *mtp)
966 {
967 _free(addr, mtp, false);
968 }
969
970 /*
971 * zfree:
972 * Zero then free a block of memory allocated by malloc/contigmalloc.
973 * This routine may not block.
974 */
975 void
zfree(void * addr,struct malloc_type * mtp)976 zfree(void *addr, struct malloc_type *mtp)
977 {
978 _free(addr, mtp, true);
979 }
980
981 /*
982 * realloc: change the size of a memory block
983 */
984 void *
realloc(void * addr,size_t size,struct malloc_type * mtp,int flags)985 realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
986 {
987 #ifndef DEBUG_REDZONE
988 uma_zone_t zone;
989 uma_slab_t slab;
990 #endif
991 unsigned long alloc;
992 void *newaddr;
993
994 KASSERT(mtp->ks_version == M_VERSION,
995 ("realloc: bad malloc type version"));
996 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
997 ("realloc: called with spinlock or critical section held"));
998
999 /* realloc(NULL, ...) is equivalent to malloc(...) */
1000 if (addr == NULL)
1001 return (malloc(size, mtp, flags));
1002
1003 /*
1004 * XXX: Should report free of old memory and alloc of new memory to
1005 * per-CPU stats.
1006 */
1007
1008 #ifdef DEBUG_MEMGUARD
1009 if (is_memguard_addr(addr))
1010 return (memguard_realloc(addr, size, mtp, flags));
1011 #endif
1012
1013 #ifdef DEBUG_REDZONE
1014 alloc = redzone_get_size(addr);
1015 #else
1016 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
1017
1018 /* Sanity check */
1019 KASSERT(slab != NULL,
1020 ("realloc: address %p out of range", (void *)addr));
1021
1022 /* Get the size of the original block */
1023 switch (GET_SLAB_COOKIE(slab)) {
1024 case __predict_true(SLAB_COOKIE_SLAB_PTR):
1025 alloc = zone->uz_size;
1026 break;
1027 case SLAB_COOKIE_MALLOC_LARGE:
1028 alloc = malloc_large_size(slab);
1029 break;
1030 default:
1031 #ifdef INVARIANTS
1032 panic("%s: called for addr %p of unsupported allocation type; "
1033 "slab %p cookie %d", __func__, addr, slab, GET_SLAB_COOKIE(slab));
1034 #endif
1035 return (NULL);
1036 }
1037
1038 /* Reuse the original block if appropriate */
1039 if (size <= alloc &&
1040 (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) {
1041 kasan_mark((void *)addr, size, alloc, KASAN_MALLOC_REDZONE);
1042 return (addr);
1043 }
1044 #endif /* !DEBUG_REDZONE */
1045
1046 /* Allocate a new, bigger (or smaller) block */
1047 if ((newaddr = malloc(size, mtp, flags)) == NULL)
1048 return (NULL);
1049
1050 /*
1051 * Copy over original contents. For KASAN, the redzone must be marked
1052 * valid before performing the copy.
1053 */
1054 kasan_mark(addr, alloc, alloc, 0);
1055 bcopy(addr, newaddr, min(size, alloc));
1056 free(addr, mtp);
1057 return (newaddr);
1058 }
1059
1060 /*
1061 * reallocf: same as realloc() but free memory on failure.
1062 */
1063 void *
reallocf(void * addr,size_t size,struct malloc_type * mtp,int flags)1064 reallocf(void *addr, size_t size, struct malloc_type *mtp, int flags)
1065 {
1066 void *mem;
1067
1068 if ((mem = realloc(addr, size, mtp, flags)) == NULL)
1069 free(addr, mtp);
1070 return (mem);
1071 }
1072
1073 /*
1074 * malloc_size: returns the number of bytes allocated for a request of the
1075 * specified size
1076 */
1077 size_t
malloc_size(size_t size)1078 malloc_size(size_t size)
1079 {
1080 int indx;
1081
1082 if (size > kmem_zmax)
1083 return (round_page(size));
1084 if (size & KMEM_ZMASK)
1085 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
1086 indx = kmemsize[size >> KMEM_ZSHIFT];
1087 return (kmemzones[indx].kz_size);
1088 }
1089
1090 /*
1091 * malloc_usable_size: returns the usable size of the allocation.
1092 */
1093 size_t
malloc_usable_size(const void * addr)1094 malloc_usable_size(const void *addr)
1095 {
1096 #ifndef DEBUG_REDZONE
1097 uma_zone_t zone;
1098 uma_slab_t slab;
1099 #endif
1100 u_long size;
1101
1102 if (addr == NULL)
1103 return (0);
1104
1105 #ifdef DEBUG_MEMGUARD
1106 if (is_memguard_addr(__DECONST(void *, addr)))
1107 return (memguard_get_req_size(addr));
1108 #endif
1109
1110 #ifdef DEBUG_REDZONE
1111 size = redzone_get_size(__DECONST(void *, addr));
1112 #else
1113 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
1114 if (slab == NULL)
1115 panic("malloc_usable_size: address %p(%p) is not allocated",
1116 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
1117
1118 switch (GET_SLAB_COOKIE(slab)) {
1119 case __predict_true(SLAB_COOKIE_SLAB_PTR):
1120 size = zone->uz_size;
1121 break;
1122 case SLAB_COOKIE_MALLOC_LARGE:
1123 size = malloc_large_size(slab);
1124 break;
1125 default:
1126 __assert_unreachable();
1127 size = 0;
1128 break;
1129 }
1130 #endif
1131
1132 /*
1133 * Unmark the redzone to avoid reports from consumers who are
1134 * (presumably) about to use the full allocation size.
1135 */
1136 kasan_mark(addr, size, size, 0);
1137
1138 return (size);
1139 }
1140
1141 CTASSERT(VM_KMEM_SIZE_SCALE >= 1);
1142
1143 /*
1144 * Initialize the kernel memory (kmem) arena.
1145 */
1146 void
kmeminit(void)1147 kmeminit(void)
1148 {
1149 u_long mem_size;
1150 u_long tmp;
1151
1152 #ifdef VM_KMEM_SIZE
1153 if (vm_kmem_size == 0)
1154 vm_kmem_size = VM_KMEM_SIZE;
1155 #endif
1156 #ifdef VM_KMEM_SIZE_MIN
1157 if (vm_kmem_size_min == 0)
1158 vm_kmem_size_min = VM_KMEM_SIZE_MIN;
1159 #endif
1160 #ifdef VM_KMEM_SIZE_MAX
1161 if (vm_kmem_size_max == 0)
1162 vm_kmem_size_max = VM_KMEM_SIZE_MAX;
1163 #endif
1164 /*
1165 * Calculate the amount of kernel virtual address (KVA) space that is
1166 * preallocated to the kmem arena. In order to support a wide range
1167 * of machines, it is a function of the physical memory size,
1168 * specifically,
1169 *
1170 * min(max(physical memory size / VM_KMEM_SIZE_SCALE,
1171 * VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX)
1172 *
1173 * Every architecture must define an integral value for
1174 * VM_KMEM_SIZE_SCALE. However, the definitions of VM_KMEM_SIZE_MIN
1175 * and VM_KMEM_SIZE_MAX, which represent respectively the floor and
1176 * ceiling on this preallocation, are optional. Typically,
1177 * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on
1178 * a given architecture.
1179 */
1180 mem_size = vm_cnt.v_page_count;
1181 if (mem_size <= 32768) /* delphij XXX 128MB */
1182 kmem_zmax = PAGE_SIZE;
1183
1184 if (vm_kmem_size_scale < 1)
1185 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
1186
1187 /*
1188 * Check if we should use defaults for the "vm_kmem_size"
1189 * variable:
1190 */
1191 if (vm_kmem_size == 0) {
1192 vm_kmem_size = mem_size / vm_kmem_size_scale;
1193 vm_kmem_size = vm_kmem_size * PAGE_SIZE < vm_kmem_size ?
1194 vm_kmem_size_max : vm_kmem_size * PAGE_SIZE;
1195 if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min)
1196 vm_kmem_size = vm_kmem_size_min;
1197 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
1198 vm_kmem_size = vm_kmem_size_max;
1199 }
1200 if (vm_kmem_size == 0)
1201 panic("Tune VM_KMEM_SIZE_* for the platform");
1202
1203 /*
1204 * The amount of KVA space that is preallocated to the
1205 * kmem arena can be set statically at compile-time or manually
1206 * through the kernel environment. However, it is still limited to
1207 * twice the physical memory size, which has been sufficient to handle
1208 * the most severe cases of external fragmentation in the kmem arena.
1209 */
1210 if (vm_kmem_size / 2 / PAGE_SIZE > mem_size)
1211 vm_kmem_size = 2 * mem_size * PAGE_SIZE;
1212
1213 vm_kmem_size = round_page(vm_kmem_size);
1214
1215 /*
1216 * With KASAN or KMSAN enabled, dynamically allocated kernel memory is
1217 * shadowed. Account for this when setting the UMA limit.
1218 */
1219 #if defined(KASAN)
1220 vm_kmem_size = (vm_kmem_size * KASAN_SHADOW_SCALE) /
1221 (KASAN_SHADOW_SCALE + 1);
1222 #elif defined(KMSAN)
1223 vm_kmem_size /= 3;
1224 #endif
1225
1226 #ifdef DEBUG_MEMGUARD
1227 tmp = memguard_fudge(vm_kmem_size, kernel_map);
1228 #else
1229 tmp = vm_kmem_size;
1230 #endif
1231 uma_set_limit(tmp);
1232
1233 #ifdef DEBUG_MEMGUARD
1234 /*
1235 * Initialize MemGuard if support compiled in. MemGuard is a
1236 * replacement allocator used for detecting tamper-after-free
1237 * scenarios as they occur. It is only used for debugging.
1238 */
1239 memguard_init(kernel_arena);
1240 #endif
1241 }
1242
1243 /*
1244 * Initialize the kernel memory allocator
1245 */
1246 /* ARGSUSED*/
1247 static void
mallocinit(void * dummy)1248 mallocinit(void *dummy)
1249 {
1250 int i;
1251 uint8_t indx;
1252
1253 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
1254
1255 kmeminit();
1256
1257 if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX)
1258 kmem_zmax = KMEM_ZMAX;
1259
1260 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
1261 int size = kmemzones[indx].kz_size;
1262 const char *name = kmemzones[indx].kz_name;
1263 size_t align;
1264 int subzone;
1265
1266 align = UMA_ALIGN_PTR;
1267 if (powerof2(size) && size > sizeof(void *))
1268 align = MIN(size, PAGE_SIZE) - 1;
1269 for (subzone = 0; subzone < numzones; subzone++) {
1270 kmemzones[indx].kz_zone[subzone] =
1271 uma_zcreate(name, size,
1272 #if defined(INVARIANTS) && !defined(KASAN) && !defined(KMSAN)
1273 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
1274 #else
1275 NULL, NULL, NULL, NULL,
1276 #endif
1277 align, UMA_ZONE_MALLOC);
1278 }
1279 for (;i <= size; i+= KMEM_ZBASE)
1280 kmemsize[i >> KMEM_ZSHIFT] = indx;
1281 }
1282 }
1283 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL);
1284
1285 void
malloc_init(void * data)1286 malloc_init(void *data)
1287 {
1288 struct malloc_type_internal *mtip;
1289 struct malloc_type *mtp;
1290
1291 KASSERT(vm_cnt.v_page_count != 0,
1292 ("malloc_init() called before vm_mem_init()"));
1293
1294 mtp = data;
1295 if (mtp->ks_version != M_VERSION)
1296 panic("malloc_init: type %s with unsupported version %lu",
1297 mtp->ks_shortdesc, mtp->ks_version);
1298
1299 mtip = &mtp->ks_mti;
1300 mtip->mti_stats = uma_zalloc_pcpu(pcpu_zone_64, M_WAITOK | M_ZERO);
1301 mtp_set_subzone(mtp);
1302
1303 mtx_lock(&malloc_mtx);
1304 mtp->ks_next = kmemstatistics;
1305 kmemstatistics = mtp;
1306 kmemcount++;
1307 mtx_unlock(&malloc_mtx);
1308 }
1309
1310 void
malloc_uninit(void * data)1311 malloc_uninit(void *data)
1312 {
1313 struct malloc_type_internal *mtip;
1314 struct malloc_type_stats *mtsp;
1315 struct malloc_type *mtp, *temp;
1316 long temp_allocs, temp_bytes;
1317 int i;
1318
1319 mtp = data;
1320 KASSERT(mtp->ks_version == M_VERSION,
1321 ("malloc_uninit: bad malloc type version"));
1322
1323 mtx_lock(&malloc_mtx);
1324 mtip = &mtp->ks_mti;
1325 if (mtp != kmemstatistics) {
1326 for (temp = kmemstatistics; temp != NULL;
1327 temp = temp->ks_next) {
1328 if (temp->ks_next == mtp) {
1329 temp->ks_next = mtp->ks_next;
1330 break;
1331 }
1332 }
1333 KASSERT(temp,
1334 ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc));
1335 } else
1336 kmemstatistics = mtp->ks_next;
1337 kmemcount--;
1338 mtx_unlock(&malloc_mtx);
1339
1340 /*
1341 * Look for memory leaks.
1342 */
1343 temp_allocs = temp_bytes = 0;
1344 for (i = 0; i <= mp_maxid; i++) {
1345 mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1346 temp_allocs += mtsp->mts_numallocs;
1347 temp_allocs -= mtsp->mts_numfrees;
1348 temp_bytes += mtsp->mts_memalloced;
1349 temp_bytes -= mtsp->mts_memfreed;
1350 }
1351 if (temp_allocs > 0 || temp_bytes > 0) {
1352 printf("Warning: memory type %s leaked memory on destroy "
1353 "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc,
1354 temp_allocs, temp_bytes);
1355 }
1356
1357 uma_zfree_pcpu(pcpu_zone_64, mtip->mti_stats);
1358 }
1359
1360 struct malloc_type *
malloc_desc2type(const char * desc)1361 malloc_desc2type(const char *desc)
1362 {
1363 struct malloc_type *mtp;
1364
1365 mtx_assert(&malloc_mtx, MA_OWNED);
1366 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1367 if (strcmp(mtp->ks_shortdesc, desc) == 0)
1368 return (mtp);
1369 }
1370 return (NULL);
1371 }
1372
1373 static int
sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)1374 sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
1375 {
1376 struct malloc_type_stream_header mtsh;
1377 struct malloc_type_internal *mtip;
1378 struct malloc_type_stats *mtsp, zeromts;
1379 struct malloc_type_header mth;
1380 struct malloc_type *mtp;
1381 int error, i;
1382 struct sbuf sbuf;
1383
1384 error = sysctl_wire_old_buffer(req, 0);
1385 if (error != 0)
1386 return (error);
1387 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
1388 sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
1389 mtx_lock(&malloc_mtx);
1390
1391 bzero(&zeromts, sizeof(zeromts));
1392
1393 /*
1394 * Insert stream header.
1395 */
1396 bzero(&mtsh, sizeof(mtsh));
1397 mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
1398 mtsh.mtsh_maxcpus = MAXCPU;
1399 mtsh.mtsh_count = kmemcount;
1400 (void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh));
1401
1402 /*
1403 * Insert alternating sequence of type headers and type statistics.
1404 */
1405 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1406 mtip = &mtp->ks_mti;
1407
1408 /*
1409 * Insert type header.
1410 */
1411 bzero(&mth, sizeof(mth));
1412 strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
1413 (void)sbuf_bcat(&sbuf, &mth, sizeof(mth));
1414
1415 /*
1416 * Insert type statistics for each CPU.
1417 */
1418 for (i = 0; i <= mp_maxid; i++) {
1419 mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1420 (void)sbuf_bcat(&sbuf, mtsp, sizeof(*mtsp));
1421 }
1422 /*
1423 * Fill in the missing CPUs.
1424 */
1425 for (; i < MAXCPU; i++) {
1426 (void)sbuf_bcat(&sbuf, &zeromts, sizeof(zeromts));
1427 }
1428 }
1429 mtx_unlock(&malloc_mtx);
1430 error = sbuf_finish(&sbuf);
1431 sbuf_delete(&sbuf);
1432 return (error);
1433 }
1434
1435 SYSCTL_PROC(_kern, OID_AUTO, malloc_stats,
1436 CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_MPSAFE, 0, 0,
1437 sysctl_kern_malloc_stats, "s,malloc_type_ustats",
1438 "Return malloc types");
1439
1440 SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0,
1441 "Count of kernel malloc types");
1442
1443 void
malloc_type_list(malloc_type_list_func_t * func,void * arg)1444 malloc_type_list(malloc_type_list_func_t *func, void *arg)
1445 {
1446 struct malloc_type *mtp, **bufmtp;
1447 int count, i;
1448 size_t buflen;
1449
1450 mtx_lock(&malloc_mtx);
1451 restart:
1452 mtx_assert(&malloc_mtx, MA_OWNED);
1453 count = kmemcount;
1454 mtx_unlock(&malloc_mtx);
1455
1456 buflen = sizeof(struct malloc_type *) * count;
1457 bufmtp = malloc(buflen, M_TEMP, M_WAITOK);
1458
1459 mtx_lock(&malloc_mtx);
1460
1461 if (count < kmemcount) {
1462 free(bufmtp, M_TEMP);
1463 goto restart;
1464 }
1465
1466 for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++)
1467 bufmtp[i] = mtp;
1468
1469 mtx_unlock(&malloc_mtx);
1470
1471 for (i = 0; i < count; i++)
1472 (func)(bufmtp[i], arg);
1473
1474 free(bufmtp, M_TEMP);
1475 }
1476
1477 #ifdef DDB
1478 static int64_t
get_malloc_stats(const struct malloc_type_internal * mtip,uint64_t * allocs,uint64_t * inuse)1479 get_malloc_stats(const struct malloc_type_internal *mtip, uint64_t *allocs,
1480 uint64_t *inuse)
1481 {
1482 const struct malloc_type_stats *mtsp;
1483 uint64_t frees, alloced, freed;
1484 int i;
1485
1486 *allocs = 0;
1487 frees = 0;
1488 alloced = 0;
1489 freed = 0;
1490 for (i = 0; i <= mp_maxid; i++) {
1491 mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1492
1493 *allocs += mtsp->mts_numallocs;
1494 frees += mtsp->mts_numfrees;
1495 alloced += mtsp->mts_memalloced;
1496 freed += mtsp->mts_memfreed;
1497 }
1498 *inuse = *allocs - frees;
1499 return (alloced - freed);
1500 }
1501
DB_SHOW_COMMAND_FLAGS(malloc,db_show_malloc,DB_CMD_MEMSAFE)1502 DB_SHOW_COMMAND_FLAGS(malloc, db_show_malloc, DB_CMD_MEMSAFE)
1503 {
1504 const char *fmt_hdr, *fmt_entry;
1505 struct malloc_type *mtp;
1506 uint64_t allocs, inuse;
1507 int64_t size;
1508 /* variables for sorting */
1509 struct malloc_type *last_mtype, *cur_mtype;
1510 int64_t cur_size, last_size;
1511 int ties;
1512
1513 if (modif[0] == 'i') {
1514 fmt_hdr = "%s,%s,%s,%s\n";
1515 fmt_entry = "\"%s\",%ju,%jdK,%ju\n";
1516 } else {
1517 fmt_hdr = "%18s %12s %12s %12s\n";
1518 fmt_entry = "%18s %12ju %12jdK %12ju\n";
1519 }
1520
1521 db_printf(fmt_hdr, "Type", "InUse", "MemUse", "Requests");
1522
1523 /* Select sort, largest size first. */
1524 last_mtype = NULL;
1525 last_size = INT64_MAX;
1526 for (;;) {
1527 cur_mtype = NULL;
1528 cur_size = -1;
1529 ties = 0;
1530
1531 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1532 /*
1533 * In the case of size ties, print out mtypes
1534 * in the order they are encountered. That is,
1535 * when we encounter the most recently output
1536 * mtype, we have already printed all preceding
1537 * ties, and we must print all following ties.
1538 */
1539 if (mtp == last_mtype) {
1540 ties = 1;
1541 continue;
1542 }
1543 size = get_malloc_stats(&mtp->ks_mti, &allocs,
1544 &inuse);
1545 if (size > cur_size && size < last_size + ties) {
1546 cur_size = size;
1547 cur_mtype = mtp;
1548 }
1549 }
1550 if (cur_mtype == NULL)
1551 break;
1552
1553 size = get_malloc_stats(&cur_mtype->ks_mti, &allocs, &inuse);
1554 db_printf(fmt_entry, cur_mtype->ks_shortdesc, inuse,
1555 howmany(size, 1024), allocs);
1556
1557 if (db_pager_quit)
1558 break;
1559
1560 last_mtype = cur_mtype;
1561 last_size = cur_size;
1562 }
1563 }
1564
1565 #if MALLOC_DEBUG_MAXZONES > 1
DB_SHOW_COMMAND(multizone_matches,db_show_multizone_matches)1566 DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches)
1567 {
1568 struct malloc_type_internal *mtip;
1569 struct malloc_type *mtp;
1570 u_int subzone;
1571
1572 if (!have_addr) {
1573 db_printf("Usage: show multizone_matches <malloc type/addr>\n");
1574 return;
1575 }
1576 mtp = (void *)addr;
1577 if (mtp->ks_version != M_VERSION) {
1578 db_printf("Version %lx does not match expected %x\n",
1579 mtp->ks_version, M_VERSION);
1580 return;
1581 }
1582
1583 mtip = &mtp->ks_mti;
1584 subzone = mtip->mti_zone;
1585
1586 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1587 mtip = &mtp->ks_mti;
1588 if (mtip->mti_zone != subzone)
1589 continue;
1590 db_printf("%s\n", mtp->ks_shortdesc);
1591 if (db_pager_quit)
1592 break;
1593 }
1594 }
1595 #endif /* MALLOC_DEBUG_MAXZONES > 1 */
1596 #endif /* DDB */
1597