xref: /freebsd/sys/kern/kern_malloc.c (revision 4cf49a43559ed9fdad601bdcccd2c55963008675)
1 /*
2  * Copyright (c) 1987, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)kern_malloc.c	8.3 (Berkeley) 1/4/94
34  * $FreeBSD$
35  */
36 
37 #include "opt_vm.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/mbuf.h>
44 #include <sys/vmmeter.h>
45 #include <sys/lock.h>
46 
47 #include <vm/vm.h>
48 #include <vm/vm_param.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_extern.h>
51 #include <vm/pmap.h>
52 #include <vm/vm_map.h>
53 
54 #if defined(INVARIANTS) && defined(__i386__)
55 #include <machine/cpu.h>
56 #endif
57 
58 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
59 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
60 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
61 
62 static void kmeminit __P((void *));
63 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL)
64 
65 static MALLOC_DEFINE(M_FREE, "free", "should be on free list");
66 
67 static struct malloc_type *kmemstatistics;
68 static struct kmembuckets bucket[MINBUCKET + 16];
69 static struct kmemusage *kmemusage;
70 static char *kmembase;
71 static char *kmemlimit;
72 static int vm_kmem_size;
73 
74 #ifdef INVARIANTS
75 /*
76  * This structure provides a set of masks to catch unaligned frees.
77  */
78 static long addrmask[] = { 0,
79 	0x00000001, 0x00000003, 0x00000007, 0x0000000f,
80 	0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
81 	0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
82 	0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
83 };
84 
85 /*
86  * The WEIRD_ADDR is used as known text to copy into free objects so
87  * that modifications after frees can be detected.
88  */
89 #define WEIRD_ADDR	0xdeadc0de
90 #define MAX_COPY	64
91 
92 /*
93  * Normally the first word of the structure is used to hold the list
94  * pointer for free objects. However, when running with diagnostics,
95  * we use the third and fourth fields, so as to catch modifications
96  * in the most commonly trashed first two words.
97  */
98 struct freelist {
99 	long	spare0;
100 	struct malloc_type *type;
101 	long	spare1;
102 	caddr_t	next;
103 };
104 #else /* !INVARIANTS */
105 struct freelist {
106 	caddr_t	next;
107 };
108 #endif /* INVARIANTS */
109 
110 /*
111  *	malloc:
112  *
113  *	Allocate a block of memory.
114  *
115  *	If M_NOWAIT is set, this routine will not block and return NULL if
116  *	the allocation fails.
117  *
118  *	If M_ASLEEP is set (M_NOWAIT must also be set), this routine
119  *	will have the side effect of calling asleep() if it returns NULL,
120  *	allowing the parent to await() at some future time.
121  */
122 void *
123 malloc(size, type, flags)
124 	unsigned long size;
125 	struct malloc_type *type;
126 	int flags;
127 {
128 	register struct kmembuckets *kbp;
129 	register struct kmemusage *kup;
130 	register struct freelist *freep;
131 	long indx, npg, allocsize;
132 	int s;
133 	caddr_t va, cp, savedlist;
134 #ifdef INVARIANTS
135 	long *end, *lp;
136 	int copysize;
137 	const char *savedtype;
138 #endif
139 	register struct malloc_type *ksp = type;
140 
141 #if defined(INVARIANTS) && defined(__i386__)
142 	if (flags == M_WAITOK)
143 		KASSERT(intr_nesting_level == 0,
144 		   ("malloc(M_WAITOK) in interrupt context"));
145 #endif
146 	/*
147 	 * Must be at splmem() prior to initializing segment to handle
148 	 * potential initialization race.
149 	 */
150 
151 	s = splmem();
152 
153 	if (type->ks_limit == 0)
154 		malloc_init(type);
155 
156 	indx = BUCKETINDX(size);
157 	kbp = &bucket[indx];
158 
159 	while (ksp->ks_memuse >= ksp->ks_limit) {
160 		if (flags & M_ASLEEP) {
161 			if (ksp->ks_limblocks < 65535)
162 				ksp->ks_limblocks++;
163 			asleep((caddr_t)ksp, PSWP+2, type->ks_shortdesc, 0);
164 		}
165 		if (flags & M_NOWAIT) {
166 			splx(s);
167 			return ((void *) NULL);
168 		}
169 		if (ksp->ks_limblocks < 65535)
170 			ksp->ks_limblocks++;
171 		tsleep((caddr_t)ksp, PSWP+2, type->ks_shortdesc, 0);
172 	}
173 	ksp->ks_size |= 1 << indx;
174 #ifdef INVARIANTS
175 	copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
176 #endif
177 	if (kbp->kb_next == NULL) {
178 		kbp->kb_last = NULL;
179 		if (size > MAXALLOCSAVE)
180 			allocsize = roundup(size, PAGE_SIZE);
181 		else
182 			allocsize = 1 << indx;
183 		npg = btoc(allocsize);
184 		va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg), flags);
185 		if (va == NULL) {
186 			splx(s);
187 			return ((void *) NULL);
188 		}
189 		kbp->kb_total += kbp->kb_elmpercl;
190 		kup = btokup(va);
191 		kup->ku_indx = indx;
192 		if (allocsize > MAXALLOCSAVE) {
193 			if (npg > 65535)
194 				panic("malloc: allocation too large");
195 			kup->ku_pagecnt = npg;
196 			ksp->ks_memuse += allocsize;
197 			goto out;
198 		}
199 		kup->ku_freecnt = kbp->kb_elmpercl;
200 		kbp->kb_totalfree += kbp->kb_elmpercl;
201 		/*
202 		 * Just in case we blocked while allocating memory,
203 		 * and someone else also allocated memory for this
204 		 * bucket, don't assume the list is still empty.
205 		 */
206 		savedlist = kbp->kb_next;
207 		kbp->kb_next = cp = va + (npg * PAGE_SIZE) - allocsize;
208 		for (;;) {
209 			freep = (struct freelist *)cp;
210 #ifdef INVARIANTS
211 			/*
212 			 * Copy in known text to detect modification
213 			 * after freeing.
214 			 */
215 			end = (long *)&cp[copysize];
216 			for (lp = (long *)cp; lp < end; lp++)
217 				*lp = WEIRD_ADDR;
218 			freep->type = M_FREE;
219 #endif /* INVARIANTS */
220 			if (cp <= va)
221 				break;
222 			cp -= allocsize;
223 			freep->next = cp;
224 		}
225 		freep->next = savedlist;
226 		if (kbp->kb_last == NULL)
227 			kbp->kb_last = (caddr_t)freep;
228 	}
229 	va = kbp->kb_next;
230 	kbp->kb_next = ((struct freelist *)va)->next;
231 #ifdef INVARIANTS
232 	freep = (struct freelist *)va;
233 	savedtype = (const char *) type->ks_shortdesc;
234 #if BYTE_ORDER == BIG_ENDIAN
235 	freep->type = (struct malloc_type *)WEIRD_ADDR >> 16;
236 #endif
237 #if BYTE_ORDER == LITTLE_ENDIAN
238 	freep->type = (struct malloc_type *)WEIRD_ADDR;
239 #endif
240 	if ((intptr_t)(void *)&freep->next & 0x2)
241 		freep->next = (caddr_t)((WEIRD_ADDR >> 16)|(WEIRD_ADDR << 16));
242 	else
243 		freep->next = (caddr_t)WEIRD_ADDR;
244 	end = (long *)&va[copysize];
245 	for (lp = (long *)va; lp < end; lp++) {
246 		if (*lp == WEIRD_ADDR)
247 			continue;
248 		printf("%s %ld of object %p size %lu %s %s (0x%lx != 0x%lx)\n",
249 			"Data modified on freelist: word",
250 			(long)(lp - (long *)va), (void *)va, size,
251 			"previous type", savedtype, *lp, (u_long)WEIRD_ADDR);
252 		break;
253 	}
254 	freep->spare0 = 0;
255 #endif /* INVARIANTS */
256 	kup = btokup(va);
257 	if (kup->ku_indx != indx)
258 		panic("malloc: wrong bucket");
259 	if (kup->ku_freecnt == 0)
260 		panic("malloc: lost data");
261 	kup->ku_freecnt--;
262 	kbp->kb_totalfree--;
263 	ksp->ks_memuse += 1 << indx;
264 out:
265 	kbp->kb_calls++;
266 	ksp->ks_inuse++;
267 	ksp->ks_calls++;
268 	if (ksp->ks_memuse > ksp->ks_maxused)
269 		ksp->ks_maxused = ksp->ks_memuse;
270 	splx(s);
271 	return ((void *) va);
272 }
273 
274 /*
275  *	free:
276  *
277  *	Free a block of memory allocated by malloc.
278  *
279  *	This routine may not block.
280  */
281 void
282 free(addr, type)
283 	void *addr;
284 	struct malloc_type *type;
285 {
286 	register struct kmembuckets *kbp;
287 	register struct kmemusage *kup;
288 	register struct freelist *freep;
289 	long size;
290 	int s;
291 #ifdef INVARIANTS
292 	struct freelist *fp;
293 	long *end, *lp, alloc, copysize;
294 #endif
295 	register struct malloc_type *ksp = type;
296 
297 	if (type->ks_limit == 0)
298 		panic("freeing with unknown type (%s)", type->ks_shortdesc);
299 
300 	KASSERT(kmembase <= (char *)addr && (char *)addr < kmemlimit,
301 	    ("free: address %p out of range", (void *)addr));
302 	kup = btokup(addr);
303 	size = 1 << kup->ku_indx;
304 	kbp = &bucket[kup->ku_indx];
305 	s = splmem();
306 #ifdef INVARIANTS
307 	/*
308 	 * Check for returns of data that do not point to the
309 	 * beginning of the allocation.
310 	 */
311 	if (size > PAGE_SIZE)
312 		alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
313 	else
314 		alloc = addrmask[kup->ku_indx];
315 	if (((uintptr_t)(void *)addr & alloc) != 0)
316 		panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
317 		    (void *)addr, size, type->ks_shortdesc, alloc);
318 #endif /* INVARIANTS */
319 	if (size > MAXALLOCSAVE) {
320 		kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
321 		size = kup->ku_pagecnt << PAGE_SHIFT;
322 		ksp->ks_memuse -= size;
323 		kup->ku_indx = 0;
324 		kup->ku_pagecnt = 0;
325 		if (ksp->ks_memuse + size >= ksp->ks_limit &&
326 		    ksp->ks_memuse < ksp->ks_limit)
327 			wakeup((caddr_t)ksp);
328 		ksp->ks_inuse--;
329 		kbp->kb_total -= 1;
330 		splx(s);
331 		return;
332 	}
333 	freep = (struct freelist *)addr;
334 #ifdef INVARIANTS
335 	/*
336 	 * Check for multiple frees. Use a quick check to see if
337 	 * it looks free before laboriously searching the freelist.
338 	 */
339 	if (freep->spare0 == WEIRD_ADDR) {
340 		fp = (struct freelist *)kbp->kb_next;
341 		while (fp) {
342 			if (fp->spare0 != WEIRD_ADDR)
343 				panic("free: free item %p modified", fp);
344 			else if (addr == (caddr_t)fp)
345 				panic("free: multiple freed item %p", addr);
346 			fp = (struct freelist *)fp->next;
347 		}
348 	}
349 	/*
350 	 * Copy in known text to detect modification after freeing
351 	 * and to make it look free. Also, save the type being freed
352 	 * so we can list likely culprit if modification is detected
353 	 * when the object is reallocated.
354 	 */
355 	copysize = size < MAX_COPY ? size : MAX_COPY;
356 	end = (long *)&((caddr_t)addr)[copysize];
357 	for (lp = (long *)addr; lp < end; lp++)
358 		*lp = WEIRD_ADDR;
359 	freep->type = type;
360 #endif /* INVARIANTS */
361 	kup->ku_freecnt++;
362 	if (kup->ku_freecnt >= kbp->kb_elmpercl) {
363 		if (kup->ku_freecnt > kbp->kb_elmpercl)
364 			panic("free: multiple frees");
365 		else if (kbp->kb_totalfree > kbp->kb_highwat)
366 			kbp->kb_couldfree++;
367 	}
368 	kbp->kb_totalfree++;
369 	ksp->ks_memuse -= size;
370 	if (ksp->ks_memuse + size >= ksp->ks_limit &&
371 	    ksp->ks_memuse < ksp->ks_limit)
372 		wakeup((caddr_t)ksp);
373 	ksp->ks_inuse--;
374 #ifdef OLD_MALLOC_MEMORY_POLICY
375 	if (kbp->kb_next == NULL)
376 		kbp->kb_next = addr;
377 	else
378 		((struct freelist *)kbp->kb_last)->next = addr;
379 	freep->next = NULL;
380 	kbp->kb_last = addr;
381 #else
382 	/*
383 	 * Return memory to the head of the queue for quick reuse.  This
384 	 * can improve performance by improving the probability of the
385 	 * item being in the cache when it is reused.
386 	 */
387 	if (kbp->kb_next == NULL) {
388 		kbp->kb_next = addr;
389 		kbp->kb_last = addr;
390 		freep->next = NULL;
391 	} else {
392 		freep->next = kbp->kb_next;
393 		kbp->kb_next = addr;
394 	}
395 #endif
396 	splx(s);
397 }
398 
399 /*
400  * Initialize the kernel memory allocator
401  */
402 /* ARGSUSED*/
403 static void
404 kmeminit(dummy)
405 	void *dummy;
406 {
407 	register long indx;
408 	int npg;
409 	int mem_size;
410 	int xvm_kmem_size;
411 
412 #if	((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
413 #error "kmeminit: MAXALLOCSAVE not power of 2"
414 #endif
415 #if	(MAXALLOCSAVE > MINALLOCSIZE * 32768)
416 #error "kmeminit: MAXALLOCSAVE too big"
417 #endif
418 #if	(MAXALLOCSAVE < PAGE_SIZE)
419 #error "kmeminit: MAXALLOCSAVE too small"
420 #endif
421 
422 	/*
423 	 * Try to auto-tune the kernel memory size, so that it is
424 	 * more applicable for a wider range of machine sizes.
425 	 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while
426 	 * a VM_KMEM_SIZE of 12MB is a fair compromise.  The
427 	 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space
428 	 * available, and on an X86 with a total KVA space of 256MB,
429 	 * try to keep VM_KMEM_SIZE_MAX at 80MB or below.
430 	 *
431 	 * Note that the kmem_map is also used by the zone allocator,
432 	 * so make sure that there is enough space.
433 	 */
434 	xvm_kmem_size = VM_KMEM_SIZE;
435 	mem_size = cnt.v_page_count * PAGE_SIZE;
436 
437 #if defined(VM_KMEM_SIZE_SCALE)
438 	if ((mem_size / VM_KMEM_SIZE_SCALE) > xvm_kmem_size)
439 		xvm_kmem_size = mem_size / VM_KMEM_SIZE_SCALE;
440 #endif
441 
442 #if defined(VM_KMEM_SIZE_MAX)
443 	if (xvm_kmem_size >= VM_KMEM_SIZE_MAX)
444 		xvm_kmem_size = VM_KMEM_SIZE_MAX;
445 #endif
446 
447 	/* Allow final override from the kernel environment */
448 	TUNABLE_INT_FETCH("kern.vm.kmem.size", xvm_kmem_size, vm_kmem_size);
449 
450 	if (vm_kmem_size > 2 * (cnt.v_page_count * PAGE_SIZE))
451 		vm_kmem_size = 2 * (cnt.v_page_count * PAGE_SIZE);
452 
453 	npg = (nmbufs * MSIZE + nmbclusters * MCLBYTES + vm_kmem_size)
454 		/ PAGE_SIZE;
455 
456 	kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
457 		(vm_size_t)(npg * sizeof(struct kmemusage)));
458 	kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
459 		(vm_offset_t *)&kmemlimit, (vm_size_t)(npg * PAGE_SIZE));
460 	kmem_map->system_map = 1;
461 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
462 		if (1 << indx >= PAGE_SIZE)
463 			bucket[indx].kb_elmpercl = 1;
464 		else
465 			bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
466 		bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
467 	}
468 }
469 
470 void
471 malloc_init(data)
472 	void *data;
473 {
474 	struct malloc_type *type = (struct malloc_type *)data;
475 
476 	if (type->ks_magic != M_MAGIC)
477 		panic("malloc type lacks magic");
478 
479 	if (type->ks_limit != 0)
480 		return;
481 
482 	if (cnt.v_page_count == 0)
483 		panic("malloc_init not allowed before vm init");
484 
485 	/*
486 	 * The default limits for each malloc region is 1/2 of the
487 	 * malloc portion of the kmem map size.
488 	 */
489 	type->ks_limit = vm_kmem_size / 2;
490 	type->ks_next = kmemstatistics;
491 	kmemstatistics = type;
492 }
493 
494 void
495 malloc_uninit(data)
496 	void *data;
497 {
498 	struct malloc_type *type = (struct malloc_type *)data;
499 	struct malloc_type *t;
500 
501 	if (type->ks_magic != M_MAGIC)
502 		panic("malloc type lacks magic");
503 
504 	if (cnt.v_page_count == 0)
505 		panic("malloc_uninit not allowed before vm init");
506 
507 	if (type->ks_limit == 0)
508 		panic("malloc_uninit on uninitialized type");
509 
510 	if (type == kmemstatistics)
511 		kmemstatistics = type->ks_next;
512 	else {
513 		for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
514 			if (t->ks_next == type) {
515 				t->ks_next = type->ks_next;
516 				break;
517 			}
518 		}
519 	}
520 	type->ks_next = NULL;
521 	type->ks_limit = 0;
522 }
523