xref: /freebsd/sys/kern/kern_malloc.c (revision 4b2eaea43fec8e8792be611dea204071a10b655a)
1 /*
2  * Copyright (c) 1987, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)kern_malloc.c	8.3 (Berkeley) 1/4/94
34  * $FreeBSD$
35  */
36 
37 #include "opt_vm.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/mutex.h>
46 #include <sys/vmmeter.h>
47 #include <sys/proc.h>
48 #include <sys/sysctl.h>
49 #include <sys/time.h>
50 
51 #include <vm/vm.h>
52 #include <vm/pmap.h>
53 #include <vm/vm_param.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_extern.h>
56 #include <vm/vm_map.h>
57 #include <vm/vm_page.h>
58 #include <vm/uma.h>
59 #include <vm/uma_int.h>
60 #include <vm/uma_dbg.h>
61 
62 #if defined(INVARIANTS) && defined(__i386__)
63 #include <machine/cpu.h>
64 #endif
65 
66 /*
67  * When realloc() is called, if the new size is sufficiently smaller than
68  * the old size, realloc() will allocate a new, smaller block to avoid
69  * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
70  * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
71  */
72 #ifndef REALLOC_FRACTION
73 #define	REALLOC_FRACTION	1	/* new block if <= half the size */
74 #endif
75 
76 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
77 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
78 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
79 
80 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
81 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
82 
83 static void kmeminit(void *);
84 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL)
85 
86 static MALLOC_DEFINE(M_FREE, "free", "should be on free list");
87 
88 static struct malloc_type *kmemstatistics;
89 static char *kmembase;
90 static char *kmemlimit;
91 
92 #define KMEM_ZSHIFT	4
93 #define KMEM_ZBASE	16
94 #define KMEM_ZMASK	(KMEM_ZBASE - 1)
95 
96 #define KMEM_ZMAX	65536
97 #define KMEM_ZSIZE	(KMEM_ZMAX >> KMEM_ZSHIFT)
98 static u_int8_t kmemsize[KMEM_ZSIZE + 1];
99 
100 /* These won't be powers of two for long */
101 struct {
102 	int kz_size;
103 	char *kz_name;
104 	uma_zone_t kz_zone;
105 } kmemzones[] = {
106 	{16, "16", NULL},
107 	{32, "32", NULL},
108 	{64, "64", NULL},
109 	{128, "128", NULL},
110 	{256, "256", NULL},
111 	{512, "512", NULL},
112 	{1024, "1024", NULL},
113 	{2048, "2048", NULL},
114 	{4096, "4096", NULL},
115 	{8192, "8192", NULL},
116 	{16384, "16384", NULL},
117 	{32768, "32768", NULL},
118 	{65536, "65536", NULL},
119 	{0, NULL},
120 };
121 
122 u_int vm_kmem_size;
123 
124 /*
125  * The malloc_mtx protects the kmemstatistics linked list.
126  */
127 
128 struct mtx malloc_mtx;
129 
130 #ifdef MALLOC_PROFILE
131 uint64_t krequests[KMEM_ZSIZE + 1];
132 
133 static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS);
134 #endif
135 
136 static int sysctl_kern_malloc(SYSCTL_HANDLER_ARGS);
137 
138 /* time_uptime of last malloc(9) failure */
139 static time_t t_malloc_fail;
140 
141 int
142 malloc_last_fail(void)
143 {
144 
145 	return (time_uptime - t_malloc_fail);
146 }
147 
148 /*
149  *	malloc:
150  *
151  *	Allocate a block of memory.
152  *
153  *	If M_NOWAIT is set, this routine will not block and return NULL if
154  *	the allocation fails.
155  */
156 void *
157 malloc(size, type, flags)
158 	unsigned long size;
159 	struct malloc_type *type;
160 	int flags;
161 {
162 	int indx;
163 	caddr_t va;
164 	uma_zone_t zone;
165 	register struct malloc_type *ksp = type;
166 
167 #if 0
168 	if (size == 0)
169 		Debugger("zero size malloc");
170 #endif
171 	if (!(flags & M_NOWAIT))
172 		KASSERT(curthread->td_intr_nesting_level == 0,
173 		   ("malloc() without M_NOWAIT in interrupt context"));
174 	if (size <= KMEM_ZMAX) {
175 		if (size & KMEM_ZMASK)
176 			size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
177 		indx = kmemsize[size >> KMEM_ZSHIFT];
178 		zone = kmemzones[indx].kz_zone;
179 #ifdef MALLOC_PROFILE
180 		krequests[size >> KMEM_ZSHIFT]++;
181 #endif
182 		va = uma_zalloc(zone, flags);
183 		mtx_lock(&ksp->ks_mtx);
184 		if (va == NULL)
185 			goto out;
186 
187 		ksp->ks_size |= 1 << indx;
188 		size = zone->uz_size;
189 	} else {
190 		size = roundup(size, PAGE_SIZE);
191 		zone = NULL;
192 		va = uma_large_malloc(size, flags);
193 		mtx_lock(&ksp->ks_mtx);
194 		if (va == NULL)
195 			goto out;
196 	}
197 	ksp->ks_memuse += size;
198 	ksp->ks_inuse++;
199 out:
200 	ksp->ks_calls++;
201 	if (ksp->ks_memuse > ksp->ks_maxused)
202 		ksp->ks_maxused = ksp->ks_memuse;
203 
204 	mtx_unlock(&ksp->ks_mtx);
205 	if (!(flags & M_NOWAIT))
206 		KASSERT(va != NULL, ("malloc() without M_NOWAIT returned NULL"));
207 	if (va == NULL) {
208 		t_malloc_fail = time_uptime;
209 	}
210 	return ((void *) va);
211 }
212 
213 /*
214  *	free:
215  *
216  *	Free a block of memory allocated by malloc.
217  *
218  *	This routine may not block.
219  */
220 void
221 free(addr, type)
222 	void *addr;
223 	struct malloc_type *type;
224 {
225 	register struct malloc_type *ksp = type;
226 	uma_slab_t slab;
227 	u_long size;
228 
229 	/* free(NULL, ...) does nothing */
230 	if (addr == NULL)
231 		return;
232 
233 	size = 0;
234 
235 	slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
236 
237 	if (slab == NULL)
238 		panic("free: address %p(%p) has not been allocated.\n",
239 		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
240 
241 
242 	if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
243 #ifdef INVARIANTS
244 		struct malloc_type **mtp = addr;
245 #endif
246 		size = slab->us_zone->uz_size;
247 #ifdef INVARIANTS
248 		/*
249 		 * Cache a pointer to the malloc_type that most recently freed
250 		 * this memory here.  This way we know who is most likely to
251 		 * have stepped on it later.
252 		 *
253 		 * This code assumes that size is a multiple of 8 bytes for
254 		 * 64 bit machines
255 		 */
256 		mtp = (struct malloc_type **)
257 		    ((unsigned long)mtp & ~UMA_ALIGN_PTR);
258 		mtp += (size - sizeof(struct malloc_type *)) /
259 		    sizeof(struct malloc_type *);
260 		*mtp = type;
261 #endif
262 		uma_zfree_arg(slab->us_zone, addr, slab);
263 	} else {
264 		size = slab->us_size;
265 		uma_large_free(slab);
266 	}
267 	mtx_lock(&ksp->ks_mtx);
268 	ksp->ks_memuse -= size;
269 	ksp->ks_inuse--;
270 	mtx_unlock(&ksp->ks_mtx);
271 }
272 
273 /*
274  *	realloc: change the size of a memory block
275  */
276 void *
277 realloc(addr, size, type, flags)
278 	void *addr;
279 	unsigned long size;
280 	struct malloc_type *type;
281 	int flags;
282 {
283 	uma_slab_t slab;
284 	unsigned long alloc;
285 	void *newaddr;
286 
287 	/* realloc(NULL, ...) is equivalent to malloc(...) */
288 	if (addr == NULL)
289 		return (malloc(size, type, flags));
290 
291 	slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
292 
293 	/* Sanity check */
294 	KASSERT(slab != NULL,
295 	    ("realloc: address %p out of range", (void *)addr));
296 
297 	/* Get the size of the original block */
298 	if (slab->us_zone)
299 		alloc = slab->us_zone->uz_size;
300 	else
301 		alloc = slab->us_size;
302 
303 	/* Reuse the original block if appropriate */
304 	if (size <= alloc
305 	    && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
306 		return (addr);
307 
308 	/* Allocate a new, bigger (or smaller) block */
309 	if ((newaddr = malloc(size, type, flags)) == NULL)
310 		return (NULL);
311 
312 	/* Copy over original contents */
313 	bcopy(addr, newaddr, min(size, alloc));
314 	free(addr, type);
315 	return (newaddr);
316 }
317 
318 /*
319  *	reallocf: same as realloc() but free memory on failure.
320  */
321 void *
322 reallocf(addr, size, type, flags)
323 	void *addr;
324 	unsigned long size;
325 	struct malloc_type *type;
326 	int flags;
327 {
328 	void *mem;
329 
330 	if ((mem = realloc(addr, size, type, flags)) == NULL)
331 		free(addr, type);
332 	return (mem);
333 }
334 
335 /*
336  * Initialize the kernel memory allocator
337  */
338 /* ARGSUSED*/
339 static void
340 kmeminit(dummy)
341 	void *dummy;
342 {
343 	u_int8_t indx;
344 	u_long npg;
345 	u_long mem_size;
346 	int i;
347 
348 	mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
349 
350 	/*
351 	 * Try to auto-tune the kernel memory size, so that it is
352 	 * more applicable for a wider range of machine sizes.
353 	 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while
354 	 * a VM_KMEM_SIZE of 12MB is a fair compromise.  The
355 	 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space
356 	 * available, and on an X86 with a total KVA space of 256MB,
357 	 * try to keep VM_KMEM_SIZE_MAX at 80MB or below.
358 	 *
359 	 * Note that the kmem_map is also used by the zone allocator,
360 	 * so make sure that there is enough space.
361 	 */
362 	vm_kmem_size = VM_KMEM_SIZE;
363 	mem_size = cnt.v_page_count * PAGE_SIZE;
364 
365 #if defined(VM_KMEM_SIZE_SCALE)
366 	if ((mem_size / VM_KMEM_SIZE_SCALE) > vm_kmem_size)
367 		vm_kmem_size = mem_size / VM_KMEM_SIZE_SCALE;
368 #endif
369 
370 #if defined(VM_KMEM_SIZE_MAX)
371 	if (vm_kmem_size >= VM_KMEM_SIZE_MAX)
372 		vm_kmem_size = VM_KMEM_SIZE_MAX;
373 #endif
374 
375 	/* Allow final override from the kernel environment */
376 	TUNABLE_INT_FETCH("kern.vm.kmem.size", &vm_kmem_size);
377 
378 	/*
379 	 * Limit kmem virtual size to twice the physical memory.
380 	 * This allows for kmem map sparseness, but limits the size
381 	 * to something sane. Be careful to not overflow the 32bit
382 	 * ints while doing the check.
383 	 */
384 	if ((vm_kmem_size / 2) > (cnt.v_page_count * PAGE_SIZE))
385 		vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE;
386 
387 	/*
388 	 * In mbuf_init(), we set up submaps for mbufs and clusters, in which
389 	 * case we rounddown() (nmbufs * MSIZE) and (nmbclusters * MCLBYTES),
390 	 * respectively. Mathematically, this means that what we do here may
391 	 * amount to slightly more address space than we need for the submaps,
392 	 * but it never hurts to have an extra page in kmem_map.
393 	 */
394 	npg = (nmbufs * MSIZE + nmbclusters * MCLBYTES + nmbcnt *
395 	    sizeof(u_int) + vm_kmem_size) / PAGE_SIZE;
396 
397 	kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
398 		(vm_offset_t *)&kmemlimit, (vm_size_t)(npg * PAGE_SIZE));
399 	kmem_map->system_map = 1;
400 
401 	uma_startup2();
402 
403 	for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
404 		int size = kmemzones[indx].kz_size;
405 		char *name = kmemzones[indx].kz_name;
406 
407 		kmemzones[indx].kz_zone = uma_zcreate(name, size,
408 #ifdef INVARIANTS
409 		    mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
410 #else
411 		    NULL, NULL, NULL, NULL,
412 #endif
413 		    UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
414 
415 		for (;i <= size; i+= KMEM_ZBASE)
416 			kmemsize[i >> KMEM_ZSHIFT] = indx;
417 
418 	}
419 }
420 
421 void
422 malloc_init(data)
423 	void *data;
424 {
425 	struct malloc_type *type = (struct malloc_type *)data;
426 
427 	mtx_lock(&malloc_mtx);
428 	if (type->ks_magic != M_MAGIC)
429 		panic("malloc type lacks magic");
430 
431 	if (cnt.v_page_count == 0)
432 		panic("malloc_init not allowed before vm init");
433 
434 	if (type->ks_next != NULL)
435 		return;
436 
437 	type->ks_next = kmemstatistics;
438 	kmemstatistics = type;
439 	mtx_init(&type->ks_mtx, type->ks_shortdesc, "Malloc Stats", MTX_DEF);
440 	mtx_unlock(&malloc_mtx);
441 }
442 
443 void
444 malloc_uninit(data)
445 	void *data;
446 {
447 	struct malloc_type *type = (struct malloc_type *)data;
448 	struct malloc_type *t;
449 
450 	mtx_lock(&malloc_mtx);
451 	mtx_lock(&type->ks_mtx);
452 	if (type->ks_magic != M_MAGIC)
453 		panic("malloc type lacks magic");
454 
455 	if (cnt.v_page_count == 0)
456 		panic("malloc_uninit not allowed before vm init");
457 
458 	if (type == kmemstatistics)
459 		kmemstatistics = type->ks_next;
460 	else {
461 		for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
462 			if (t->ks_next == type) {
463 				t->ks_next = type->ks_next;
464 				break;
465 			}
466 		}
467 	}
468 	type->ks_next = NULL;
469 	mtx_destroy(&type->ks_mtx);
470 	mtx_unlock(&malloc_mtx);
471 }
472 
473 static int
474 sysctl_kern_malloc(SYSCTL_HANDLER_ARGS)
475 {
476 	struct malloc_type *type;
477 	int linesize = 128;
478 	int curline;
479 	int bufsize;
480 	int first;
481 	int error;
482 	char *buf;
483 	char *p;
484 	int cnt;
485 	int len;
486 	int i;
487 
488 	cnt = 0;
489 
490 	mtx_lock(&malloc_mtx);
491 	for (type = kmemstatistics; type != NULL; type = type->ks_next)
492 		cnt++;
493 
494 	mtx_unlock(&malloc_mtx);
495 	bufsize = linesize * (cnt + 1);
496 	p = buf = (char *)malloc(bufsize, M_TEMP, M_ZERO);
497 	mtx_lock(&malloc_mtx);
498 
499 	len = snprintf(p, linesize,
500 	    "\n        Type  InUse MemUse HighUse Requests  Size(s)\n");
501 	p += len;
502 
503 	for (type = kmemstatistics; cnt != 0 && type != NULL;
504 	    type = type->ks_next, cnt--) {
505 		if (type->ks_calls == 0)
506 			continue;
507 
508 		curline = linesize - 2;	/* Leave room for the \n */
509 		len = snprintf(p, curline, "%13s%6lu%6luK%7luK%9llu",
510 			type->ks_shortdesc,
511 			type->ks_inuse,
512 			(type->ks_memuse + 1023) / 1024,
513 			(type->ks_maxused + 1023) / 1024,
514 			(long long unsigned)type->ks_calls);
515 		curline -= len;
516 		p += len;
517 
518 		first = 1;
519 		for (i = 0; i < sizeof(kmemzones) / sizeof(kmemzones[0]) - 1;
520 		    i++) {
521 			if (type->ks_size & (1 << i)) {
522 				if (first)
523 					len = snprintf(p, curline, "  ");
524 				else
525 					len = snprintf(p, curline, ",");
526 				curline -= len;
527 				p += len;
528 
529 				len = snprintf(p, curline,
530 				    "%s", kmemzones[i].kz_name);
531 				curline -= len;
532 				p += len;
533 
534 				first = 0;
535 			}
536 		}
537 
538 		len = snprintf(p, 2, "\n");
539 		p += len;
540 	}
541 
542 	mtx_unlock(&malloc_mtx);
543 	error = SYSCTL_OUT(req, buf, p - buf);
544 
545 	free(buf, M_TEMP);
546 	return (error);
547 }
548 
549 SYSCTL_OID(_kern, OID_AUTO, malloc, CTLTYPE_STRING|CTLFLAG_RD,
550     NULL, 0, sysctl_kern_malloc, "A", "Malloc Stats");
551 
552 #ifdef MALLOC_PROFILE
553 
554 static int
555 sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
556 {
557 	int linesize = 64;
558 	uint64_t count;
559 	uint64_t waste;
560 	uint64_t mem;
561 	int bufsize;
562 	int error;
563 	char *buf;
564 	int rsize;
565 	int size;
566 	char *p;
567 	int len;
568 	int i;
569 
570 	bufsize = linesize * (KMEM_ZSIZE + 1);
571 	bufsize += 128; 	/* For the stats line */
572 	bufsize += 128; 	/* For the banner line */
573 	waste = 0;
574 	mem = 0;
575 
576 	p = buf = (char *)malloc(bufsize, M_TEMP, M_ZERO);
577 	len = snprintf(p, bufsize,
578 	    "\n  Size                    Requests  Real Size\n");
579 	bufsize -= len;
580 	p += len;
581 
582 	for (i = 0; i < KMEM_ZSIZE; i++) {
583 		size = i << KMEM_ZSHIFT;
584 		rsize = kmemzones[kmemsize[i]].kz_size;
585 		count = (long long unsigned)krequests[i];
586 
587 		len = snprintf(p, bufsize, "%6d%28llu%11d\n",
588 		    size, (unsigned long long)count, rsize);
589 		bufsize -= len;
590 		p += len;
591 
592 		if ((rsize * count) > (size * count))
593 			waste += (rsize * count) - (size * count);
594 		mem += (rsize * count);
595 	}
596 
597 	len = snprintf(p, bufsize,
598 	    "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n",
599 	    (unsigned long long)mem, (unsigned long long)waste);
600 	p += len;
601 
602 	error = SYSCTL_OUT(req, buf, p - buf);
603 
604 	free(buf, M_TEMP);
605 	return (error);
606 }
607 
608 SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD,
609     NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling");
610 #endif /* MALLOC_PROFILE */
611