xref: /freebsd/sys/kern/kern_malloc.c (revision 77a0943ded95b9e6438f7db70c4a28e4d93946d4)
1 /*
2  * Copyright (c) 1987, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)kern_malloc.c	8.3 (Berkeley) 1/4/94
34  * $FreeBSD$
35  */
36 
37 #include "opt_vm.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/mbuf.h>
44 #include <sys/mutex.h>
45 #include <sys/vmmeter.h>
46 #include <sys/lock.h>
47 
48 #include <vm/vm.h>
49 #include <vm/vm_param.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_extern.h>
52 #include <vm/pmap.h>
53 #include <vm/vm_map.h>
54 
55 #if defined(INVARIANTS) && defined(__i386__)
56 #include <machine/cpu.h>
57 #endif
58 
59 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
60 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
61 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
62 
63 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
64 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
65 
66 static void kmeminit __P((void *));
67 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL)
68 
69 static MALLOC_DEFINE(M_FREE, "free", "should be on free list");
70 
71 static struct malloc_type *kmemstatistics;
72 static struct kmembuckets bucket[MINBUCKET + 16];
73 static struct kmemusage *kmemusage;
74 static char *kmembase;
75 static char *kmemlimit;
76 
77 MUTEX_DECLARE(static, malloc_mtx);
78 
79 u_int vm_kmem_size;
80 
81 #ifdef INVARIANTS
82 /*
83  * This structure provides a set of masks to catch unaligned frees.
84  */
85 static long addrmask[] = { 0,
86 	0x00000001, 0x00000003, 0x00000007, 0x0000000f,
87 	0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
88 	0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
89 	0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
90 };
91 
92 /*
93  * The WEIRD_ADDR is used as known text to copy into free objects so
94  * that modifications after frees can be detected.
95  */
96 #define WEIRD_ADDR	0xdeadc0de
97 #define MAX_COPY	64
98 
99 /*
100  * Normally the first word of the structure is used to hold the list
101  * pointer for free objects. However, when running with diagnostics,
102  * we use the third and fourth fields, so as to catch modifications
103  * in the most commonly trashed first two words.
104  */
105 struct freelist {
106 	long	spare0;
107 	struct malloc_type *type;
108 	long	spare1;
109 	caddr_t	next;
110 };
111 #else /* !INVARIANTS */
112 struct freelist {
113 	caddr_t	next;
114 };
115 #endif /* INVARIANTS */
116 
117 /*
118  *	malloc:
119  *
120  *	Allocate a block of memory.
121  *
122  *	If M_NOWAIT is set, this routine will not block and return NULL if
123  *	the allocation fails.
124  *
125  *	If M_ASLEEP is set (M_NOWAIT must also be set), this routine
126  *	will have the side effect of calling asleep() if it returns NULL,
127  *	allowing the parent to await() at some future time.
128  */
129 void *
130 malloc(size, type, flags)
131 	unsigned long size;
132 	struct malloc_type *type;
133 	int flags;
134 {
135 	register struct kmembuckets *kbp;
136 	register struct kmemusage *kup;
137 	register struct freelist *freep;
138 	long indx, npg, allocsize;
139 	int s;
140 	caddr_t va, cp, savedlist;
141 #ifdef INVARIANTS
142 	long *end, *lp;
143 	int copysize;
144 	const char *savedtype;
145 #endif
146 	register struct malloc_type *ksp = type;
147 
148 #if defined(INVARIANTS) && defined(__i386__)
149 	if (flags == M_WAITOK)
150 		KASSERT(intr_nesting_level == 0,
151 		   ("malloc(M_WAITOK) in interrupt context"));
152 #endif
153 	indx = BUCKETINDX(size);
154 	kbp = &bucket[indx];
155 	s = splmem();
156 	mtx_enter(&malloc_mtx, MTX_DEF);
157 	while (ksp->ks_memuse >= ksp->ks_limit) {
158 		if (flags & M_ASLEEP) {
159 			if (ksp->ks_limblocks < 65535)
160 				ksp->ks_limblocks++;
161 			asleep((caddr_t)ksp, PSWP+2, type->ks_shortdesc, 0);
162 		}
163 		if (flags & M_NOWAIT) {
164 			splx(s);
165 			mtx_exit(&malloc_mtx, MTX_DEF);
166 			return ((void *) NULL);
167 		}
168 		if (ksp->ks_limblocks < 65535)
169 			ksp->ks_limblocks++;
170 		msleep((caddr_t)ksp, &malloc_mtx, PSWP+2, type->ks_shortdesc,
171 		    0);
172 	}
173 	ksp->ks_size |= 1 << indx;
174 #ifdef INVARIANTS
175 	copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
176 #endif
177 	if (kbp->kb_next == NULL) {
178 		kbp->kb_last = NULL;
179 		if (size > MAXALLOCSAVE)
180 			allocsize = roundup(size, PAGE_SIZE);
181 		else
182 			allocsize = 1 << indx;
183 		npg = btoc(allocsize);
184 
185 		mtx_exit(&malloc_mtx, MTX_DEF);
186 		mtx_enter(&Giant, MTX_DEF);
187 		va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg), flags);
188 		mtx_exit(&Giant, MTX_DEF);
189 
190 		if (va == NULL) {
191 			splx(s);
192 			return ((void *) NULL);
193 		}
194 		/*
195 		 * Enter malloc_mtx after the error check to avoid having to
196 		 * immediately exit it again if there is an error.
197 		 */
198 		mtx_enter(&malloc_mtx, MTX_DEF);
199 
200 		kbp->kb_total += kbp->kb_elmpercl;
201 		kup = btokup(va);
202 		kup->ku_indx = indx;
203 		if (allocsize > MAXALLOCSAVE) {
204 			if (npg > 65535)
205 				panic("malloc: allocation too large");
206 			kup->ku_pagecnt = npg;
207 			ksp->ks_memuse += allocsize;
208 			goto out;
209 		}
210 		kup->ku_freecnt = kbp->kb_elmpercl;
211 		kbp->kb_totalfree += kbp->kb_elmpercl;
212 		/*
213 		 * Just in case we blocked while allocating memory,
214 		 * and someone else also allocated memory for this
215 		 * bucket, don't assume the list is still empty.
216 		 */
217 		savedlist = kbp->kb_next;
218 		kbp->kb_next = cp = va + (npg * PAGE_SIZE) - allocsize;
219 		for (;;) {
220 			freep = (struct freelist *)cp;
221 #ifdef INVARIANTS
222 			/*
223 			 * Copy in known text to detect modification
224 			 * after freeing.
225 			 */
226 			end = (long *)&cp[copysize];
227 			for (lp = (long *)cp; lp < end; lp++)
228 				*lp = WEIRD_ADDR;
229 			freep->type = M_FREE;
230 #endif /* INVARIANTS */
231 			if (cp <= va)
232 				break;
233 			cp -= allocsize;
234 			freep->next = cp;
235 		}
236 		freep->next = savedlist;
237 		if (kbp->kb_last == NULL)
238 			kbp->kb_last = (caddr_t)freep;
239 	}
240 	va = kbp->kb_next;
241 	kbp->kb_next = ((struct freelist *)va)->next;
242 #ifdef INVARIANTS
243 	freep = (struct freelist *)va;
244 	savedtype = (const char *) freep->type->ks_shortdesc;
245 #if BYTE_ORDER == BIG_ENDIAN
246 	freep->type = (struct malloc_type *)WEIRD_ADDR >> 16;
247 #endif
248 #if BYTE_ORDER == LITTLE_ENDIAN
249 	freep->type = (struct malloc_type *)WEIRD_ADDR;
250 #endif
251 	if ((intptr_t)(void *)&freep->next & 0x2)
252 		freep->next = (caddr_t)((WEIRD_ADDR >> 16)|(WEIRD_ADDR << 16));
253 	else
254 		freep->next = (caddr_t)WEIRD_ADDR;
255 	end = (long *)&va[copysize];
256 	for (lp = (long *)va; lp < end; lp++) {
257 		if (*lp == WEIRD_ADDR)
258 			continue;
259 		printf("%s %ld of object %p size %lu %s %s (0x%lx != 0x%lx)\n",
260 			"Data modified on freelist: word",
261 			(long)(lp - (long *)va), (void *)va, size,
262 			"previous type", savedtype, *lp, (u_long)WEIRD_ADDR);
263 		break;
264 	}
265 	freep->spare0 = 0;
266 #endif /* INVARIANTS */
267 	kup = btokup(va);
268 	if (kup->ku_indx != indx)
269 		panic("malloc: wrong bucket");
270 	if (kup->ku_freecnt == 0)
271 		panic("malloc: lost data");
272 	kup->ku_freecnt--;
273 	kbp->kb_totalfree--;
274 	ksp->ks_memuse += 1 << indx;
275 out:
276 	kbp->kb_calls++;
277 	ksp->ks_inuse++;
278 	ksp->ks_calls++;
279 	if (ksp->ks_memuse > ksp->ks_maxused)
280 		ksp->ks_maxused = ksp->ks_memuse;
281 	splx(s);
282 	mtx_exit(&malloc_mtx, MTX_DEF);
283 	/* XXX: Do idle pre-zeroing.  */
284 	if (va != NULL && (flags & M_ZERO))
285 		bzero(va, size);
286 	return ((void *) va);
287 }
288 
289 /*
290  *	free:
291  *
292  *	Free a block of memory allocated by malloc.
293  *
294  *	This routine may not block.
295  */
296 void
297 free(addr, type)
298 	void *addr;
299 	struct malloc_type *type;
300 {
301 	register struct kmembuckets *kbp;
302 	register struct kmemusage *kup;
303 	register struct freelist *freep;
304 	long size;
305 	int s;
306 #ifdef INVARIANTS
307 	struct freelist *fp;
308 	long *end, *lp, alloc, copysize;
309 #endif
310 	register struct malloc_type *ksp = type;
311 
312 	KASSERT(kmembase <= (char *)addr && (char *)addr < kmemlimit,
313 	    ("free: address %p out of range", (void *)addr));
314 	kup = btokup(addr);
315 	size = 1 << kup->ku_indx;
316 	kbp = &bucket[kup->ku_indx];
317 	s = splmem();
318 	mtx_enter(&malloc_mtx, MTX_DEF);
319 #ifdef INVARIANTS
320 	/*
321 	 * Check for returns of data that do not point to the
322 	 * beginning of the allocation.
323 	 */
324 	if (size > PAGE_SIZE)
325 		alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
326 	else
327 		alloc = addrmask[kup->ku_indx];
328 	if (((uintptr_t)(void *)addr & alloc) != 0)
329 		panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
330 		    (void *)addr, size, type->ks_shortdesc, alloc);
331 #endif /* INVARIANTS */
332 	if (size > MAXALLOCSAVE) {
333 		mtx_exit(&malloc_mtx, MTX_DEF);
334 		mtx_enter(&Giant, MTX_DEF);
335 		kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
336 		mtx_exit(&Giant, MTX_DEF);
337 		mtx_enter(&malloc_mtx, MTX_DEF);
338 
339 		size = kup->ku_pagecnt << PAGE_SHIFT;
340 		ksp->ks_memuse -= size;
341 		kup->ku_indx = 0;
342 		kup->ku_pagecnt = 0;
343 		if (ksp->ks_memuse + size >= ksp->ks_limit &&
344 		    ksp->ks_memuse < ksp->ks_limit)
345 			wakeup((caddr_t)ksp);
346 		ksp->ks_inuse--;
347 		kbp->kb_total -= 1;
348 		splx(s);
349 		mtx_exit(&malloc_mtx, MTX_DEF);
350 		return;
351 	}
352 	freep = (struct freelist *)addr;
353 #ifdef INVARIANTS
354 	/*
355 	 * Check for multiple frees. Use a quick check to see if
356 	 * it looks free before laboriously searching the freelist.
357 	 */
358 	if (freep->spare0 == WEIRD_ADDR) {
359 		fp = (struct freelist *)kbp->kb_next;
360 		while (fp) {
361 			if (fp->spare0 != WEIRD_ADDR)
362 				panic("free: free item %p modified", fp);
363 			else if (addr == (caddr_t)fp)
364 				panic("free: multiple freed item %p", addr);
365 			fp = (struct freelist *)fp->next;
366 		}
367 	}
368 	/*
369 	 * Copy in known text to detect modification after freeing
370 	 * and to make it look free. Also, save the type being freed
371 	 * so we can list likely culprit if modification is detected
372 	 * when the object is reallocated.
373 	 */
374 	copysize = size < MAX_COPY ? size : MAX_COPY;
375 	end = (long *)&((caddr_t)addr)[copysize];
376 	for (lp = (long *)addr; lp < end; lp++)
377 		*lp = WEIRD_ADDR;
378 	freep->type = type;
379 #endif /* INVARIANTS */
380 	kup->ku_freecnt++;
381 	if (kup->ku_freecnt >= kbp->kb_elmpercl) {
382 		if (kup->ku_freecnt > kbp->kb_elmpercl)
383 			panic("free: multiple frees");
384 		else if (kbp->kb_totalfree > kbp->kb_highwat)
385 			kbp->kb_couldfree++;
386 	}
387 	kbp->kb_totalfree++;
388 	ksp->ks_memuse -= size;
389 	if (ksp->ks_memuse + size >= ksp->ks_limit &&
390 	    ksp->ks_memuse < ksp->ks_limit)
391 		wakeup((caddr_t)ksp);
392 	ksp->ks_inuse--;
393 #ifdef OLD_MALLOC_MEMORY_POLICY
394 	if (kbp->kb_next == NULL)
395 		kbp->kb_next = addr;
396 	else
397 		((struct freelist *)kbp->kb_last)->next = addr;
398 	freep->next = NULL;
399 	kbp->kb_last = addr;
400 #else
401 	/*
402 	 * Return memory to the head of the queue for quick reuse.  This
403 	 * can improve performance by improving the probability of the
404 	 * item being in the cache when it is reused.
405 	 */
406 	if (kbp->kb_next == NULL) {
407 		kbp->kb_next = addr;
408 		kbp->kb_last = addr;
409 		freep->next = NULL;
410 	} else {
411 		freep->next = kbp->kb_next;
412 		kbp->kb_next = addr;
413 	}
414 #endif
415 	splx(s);
416 	mtx_exit(&malloc_mtx, MTX_DEF);
417 }
418 
419 /*
420  * Initialize the kernel memory allocator
421  */
422 /* ARGSUSED*/
423 static void
424 kmeminit(dummy)
425 	void *dummy;
426 {
427 	register long indx;
428 	u_long npg;
429 	u_long mem_size;
430 	u_long xvm_kmem_size;
431 
432 #if	((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
433 #error "kmeminit: MAXALLOCSAVE not power of 2"
434 #endif
435 #if	(MAXALLOCSAVE > MINALLOCSIZE * 32768)
436 #error "kmeminit: MAXALLOCSAVE too big"
437 #endif
438 #if	(MAXALLOCSAVE < PAGE_SIZE)
439 #error "kmeminit: MAXALLOCSAVE too small"
440 #endif
441 
442 	mtx_init(&malloc_mtx, "malloc", MTX_DEF | MTX_COLD);
443 
444 	/*
445 	 * Try to auto-tune the kernel memory size, so that it is
446 	 * more applicable for a wider range of machine sizes.
447 	 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while
448 	 * a VM_KMEM_SIZE of 12MB is a fair compromise.  The
449 	 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space
450 	 * available, and on an X86 with a total KVA space of 256MB,
451 	 * try to keep VM_KMEM_SIZE_MAX at 80MB or below.
452 	 *
453 	 * Note that the kmem_map is also used by the zone allocator,
454 	 * so make sure that there is enough space.
455 	 */
456 	xvm_kmem_size = VM_KMEM_SIZE;
457 	mem_size = cnt.v_page_count * PAGE_SIZE;
458 
459 #if defined(VM_KMEM_SIZE_SCALE)
460 	if ((mem_size / VM_KMEM_SIZE_SCALE) > xvm_kmem_size)
461 		xvm_kmem_size = mem_size / VM_KMEM_SIZE_SCALE;
462 #endif
463 
464 #if defined(VM_KMEM_SIZE_MAX)
465 	if (xvm_kmem_size >= VM_KMEM_SIZE_MAX)
466 		xvm_kmem_size = VM_KMEM_SIZE_MAX;
467 #endif
468 
469 	/* Allow final override from the kernel environment */
470 	TUNABLE_INT_FETCH("kern.vm.kmem.size", xvm_kmem_size, vm_kmem_size);
471 
472 	/*
473 	 * Limit kmem virtual size to twice the physical memory.
474 	 * This allows for kmem map sparseness, but limits the size
475 	 * to something sane. Be careful to not overflow the 32bit
476 	 * ints while doing the check.
477 	 */
478 	if ((vm_kmem_size / 2) > (cnt.v_page_count * PAGE_SIZE))
479 		vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE;
480 
481 	npg = (nmbufs * MSIZE + nmbclusters * MCLBYTES + vm_kmem_size)
482 		/ PAGE_SIZE;
483 
484 	kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
485 		(vm_size_t)(npg * sizeof(struct kmemusage)));
486 	kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
487 		(vm_offset_t *)&kmemlimit, (vm_size_t)(npg * PAGE_SIZE));
488 	kmem_map->system_map = 1;
489 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
490 		if (1 << indx >= PAGE_SIZE)
491 			bucket[indx].kb_elmpercl = 1;
492 		else
493 			bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
494 		bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
495 	}
496 }
497 
498 void
499 malloc_init(data)
500 	void *data;
501 {
502 	struct malloc_type *type = (struct malloc_type *)data;
503 
504 	if (type->ks_magic != M_MAGIC)
505 		panic("malloc type lacks magic");
506 
507 	if (type->ks_limit != 0)
508 		return;
509 
510 	if (cnt.v_page_count == 0)
511 		panic("malloc_init not allowed before vm init");
512 
513 	/*
514 	 * The default limits for each malloc region is 1/2 of the
515 	 * malloc portion of the kmem map size.
516 	 */
517 	type->ks_limit = vm_kmem_size / 2;
518 	type->ks_next = kmemstatistics;
519 	kmemstatistics = type;
520 }
521 
522 void
523 malloc_uninit(data)
524 	void *data;
525 {
526 	struct malloc_type *type = (struct malloc_type *)data;
527 	struct malloc_type *t;
528 #ifdef INVARIANTS
529 	struct kmembuckets *kbp;
530 	struct freelist *freep;
531 	long indx;
532 	int s;
533 #endif
534 
535 	if (type->ks_magic != M_MAGIC)
536 		panic("malloc type lacks magic");
537 
538 	if (cnt.v_page_count == 0)
539 		panic("malloc_uninit not allowed before vm init");
540 
541 	if (type->ks_limit == 0)
542 		panic("malloc_uninit on uninitialized type");
543 
544 #ifdef INVARIANTS
545 	s = splmem();
546 	mtx_enter(&malloc_mtx, MTX_DEF);
547 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
548 		kbp = bucket + indx;
549 		freep = (struct freelist*)kbp->kb_next;
550 		while (freep) {
551 			if (freep->type == type)
552 				freep->type = M_FREE;
553 			freep = (struct freelist*)freep->next;
554 		}
555 	}
556 	splx(s);
557 	mtx_exit(&malloc_mtx, MTX_DEF);
558 
559 	if (type->ks_memuse != 0)
560 		printf("malloc_uninit: %ld bytes of '%s' still allocated\n",
561 		    type->ks_memuse, type->ks_shortdesc);
562 #endif
563 
564 	if (type == kmemstatistics)
565 		kmemstatistics = type->ks_next;
566 	else {
567 		for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
568 			if (t->ks_next == type) {
569 				t->ks_next = type->ks_next;
570 				break;
571 			}
572 		}
573 	}
574 	type->ks_next = NULL;
575 	type->ks_limit = 0;
576 }
577