xref: /freebsd/sys/kern/kern_malloc.c (revision 2da199da53835ee2d9228a60717fd2d0fccf9e50)
1 /*
2  * Copyright (c) 1987, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)kern_malloc.c	8.3 (Berkeley) 1/4/94
34  * $Id: kern_malloc.c,v 1.53 1999/01/21 21:54:32 msmith Exp $
35  */
36 
37 #include "opt_vm.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #define MALLOC_INSTANTIATE
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/vmmeter.h>
46 #include <sys/lock.h>
47 
48 #include <vm/vm.h>
49 #include <vm/vm_param.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_extern.h>
52 #include <vm/pmap.h>
53 #include <vm/vm_map.h>
54 
55 static void kmeminit __P((void *));
56 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL)
57 
58 static MALLOC_DEFINE(M_FREE, "free", "should be on free list");
59 
60 static struct malloc_type *kmemstatistics;
61 static struct kmembuckets bucket[MINBUCKET + 16];
62 static struct kmemusage *kmemusage;
63 static char *kmembase;
64 static char *kmemlimit;
65 static int vm_kmem_size;
66 
67 #ifdef INVARIANTS
68 /*
69  * This structure provides a set of masks to catch unaligned frees.
70  */
71 static long addrmask[] = { 0,
72 	0x00000001, 0x00000003, 0x00000007, 0x0000000f,
73 	0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
74 	0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
75 	0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
76 };
77 
78 /*
79  * The WEIRD_ADDR is used as known text to copy into free objects so
80  * that modifications after frees can be detected.
81  */
82 #define WEIRD_ADDR	0xdeadc0de
83 #define MAX_COPY	64
84 
85 /*
86  * Normally the first word of the structure is used to hold the list
87  * pointer for free objects. However, when running with diagnostics,
88  * we use the third and fourth fields, so as to catch modifications
89  * in the most commonly trashed first two words.
90  */
91 struct freelist {
92 	long	spare0;
93 	struct malloc_type *type;
94 	long	spare1;
95 	caddr_t	next;
96 };
97 #else /* !INVARIANTS */
98 struct freelist {
99 	caddr_t	next;
100 };
101 #endif /* INVARIANTS */
102 
103 /*
104  *	malloc:
105  *
106  *	Allocate a block of memory.
107  *
108  *	If M_NOWAIT is set, this routine will not block and return NULL if
109  *	the allocation fails.
110  *
111  *	If M_ASLEEP is set (M_NOWAIT must also be set), this routine
112  *	will have the side effect of calling asleep() if it returns NULL,
113  *	allowing the parent to await() at some future time.
114  */
115 void *
116 malloc(size, type, flags)
117 	unsigned long size;
118 	struct malloc_type *type;
119 	int flags;
120 {
121 	register struct kmembuckets *kbp;
122 	register struct kmemusage *kup;
123 	register struct freelist *freep;
124 	long indx, npg, allocsize;
125 	int s;
126 	caddr_t va, cp, savedlist;
127 #ifdef INVARIANTS
128 	long *end, *lp;
129 	int copysize;
130 	const char *savedtype;
131 #endif
132 	register struct malloc_type *ksp = type;
133 
134 	/*
135 	 * Must be at splmem() prior to initializing segment to handle
136 	 * potential initialization race.
137 	 */
138 
139 	s = splmem();
140 
141 	if (!type->ks_next) {
142 		malloc_init(type);
143 	}
144 
145 	indx = BUCKETINDX(size);
146 	kbp = &bucket[indx];
147 
148 	while (ksp->ks_memuse >= ksp->ks_limit) {
149 		if (flags & M_ASLEEP) {
150 			if (ksp->ks_limblocks < 65535)
151 				ksp->ks_limblocks++;
152 			asleep((caddr_t)ksp, PSWP+2, type->ks_shortdesc, 0);
153 		}
154 		if (flags & M_NOWAIT) {
155 			splx(s);
156 			return ((void *) NULL);
157 		}
158 		if (ksp->ks_limblocks < 65535)
159 			ksp->ks_limblocks++;
160 		tsleep((caddr_t)ksp, PSWP+2, type->ks_shortdesc, 0);
161 	}
162 	ksp->ks_size |= 1 << indx;
163 #ifdef INVARIANTS
164 	copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
165 #endif
166 	if (kbp->kb_next == NULL) {
167 		kbp->kb_last = NULL;
168 		if (size > MAXALLOCSAVE)
169 			allocsize = roundup(size, PAGE_SIZE);
170 		else
171 			allocsize = 1 << indx;
172 		npg = btoc(allocsize);
173 		va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg), flags);
174 		if (va == NULL) {
175 			splx(s);
176 			return ((void *) NULL);
177 		}
178 		kbp->kb_total += kbp->kb_elmpercl;
179 		kup = btokup(va);
180 		kup->ku_indx = indx;
181 		if (allocsize > MAXALLOCSAVE) {
182 			if (npg > 65535)
183 				panic("malloc: allocation too large");
184 			kup->ku_pagecnt = npg;
185 			ksp->ks_memuse += allocsize;
186 			goto out;
187 		}
188 		kup->ku_freecnt = kbp->kb_elmpercl;
189 		kbp->kb_totalfree += kbp->kb_elmpercl;
190 		/*
191 		 * Just in case we blocked while allocating memory,
192 		 * and someone else also allocated memory for this
193 		 * bucket, don't assume the list is still empty.
194 		 */
195 		savedlist = kbp->kb_next;
196 		kbp->kb_next = cp = va + (npg * PAGE_SIZE) - allocsize;
197 		for (;;) {
198 			freep = (struct freelist *)cp;
199 #ifdef INVARIANTS
200 			/*
201 			 * Copy in known text to detect modification
202 			 * after freeing.
203 			 */
204 			end = (long *)&cp[copysize];
205 			for (lp = (long *)cp; lp < end; lp++)
206 				*lp = WEIRD_ADDR;
207 			freep->type = M_FREE;
208 #endif /* INVARIANTS */
209 			if (cp <= va)
210 				break;
211 			cp -= allocsize;
212 			freep->next = cp;
213 		}
214 		freep->next = savedlist;
215 		if (kbp->kb_last == NULL)
216 			kbp->kb_last = (caddr_t)freep;
217 	}
218 	va = kbp->kb_next;
219 	kbp->kb_next = ((struct freelist *)va)->next;
220 #ifdef INVARIANTS
221 	freep = (struct freelist *)va;
222 	savedtype = (const char *) type->ks_shortdesc;
223 #if BYTE_ORDER == BIG_ENDIAN
224 	freep->type = (struct malloc_type *)WEIRD_ADDR >> 16;
225 #endif
226 #if BYTE_ORDER == LITTLE_ENDIAN
227 	freep->type = (struct malloc_type *)WEIRD_ADDR;
228 #endif
229 	if ((intptr_t)(void *)&freep->next & 0x2)
230 		freep->next = (caddr_t)((WEIRD_ADDR >> 16)|(WEIRD_ADDR << 16));
231 	else
232 		freep->next = (caddr_t)WEIRD_ADDR;
233 	end = (long *)&va[copysize];
234 	for (lp = (long *)va; lp < end; lp++) {
235 		if (*lp == WEIRD_ADDR)
236 			continue;
237 		printf("%s %ld of object %p size %lu %s %s (0x%lx != 0x%lx)\n",
238 			"Data modified on freelist: word",
239 			(long)(lp - (long *)va), (void *)va, size,
240 			"previous type", savedtype, *lp, (u_long)WEIRD_ADDR);
241 		break;
242 	}
243 	freep->spare0 = 0;
244 #endif /* INVARIANTS */
245 	kup = btokup(va);
246 	if (kup->ku_indx != indx)
247 		panic("malloc: wrong bucket");
248 	if (kup->ku_freecnt == 0)
249 		panic("malloc: lost data");
250 	kup->ku_freecnt--;
251 	kbp->kb_totalfree--;
252 	ksp->ks_memuse += 1 << indx;
253 out:
254 	kbp->kb_calls++;
255 	ksp->ks_inuse++;
256 	ksp->ks_calls++;
257 	if (ksp->ks_memuse > ksp->ks_maxused)
258 		ksp->ks_maxused = ksp->ks_memuse;
259 	splx(s);
260 	return ((void *) va);
261 }
262 
263 /*
264  *	free:
265  *
266  *	Free a block of memory allocated by malloc.
267  *
268  *	This routine may not block.
269  */
270 void
271 free(addr, type)
272 	void *addr;
273 	struct malloc_type *type;
274 {
275 	register struct kmembuckets *kbp;
276 	register struct kmemusage *kup;
277 	register struct freelist *freep;
278 	long size;
279 	int s;
280 #ifdef INVARIANTS
281 	struct freelist *fp;
282 	long *end, *lp, alloc, copysize;
283 #endif
284 	register struct malloc_type *ksp = type;
285 
286 	if (!type->ks_next)
287 		panic("freeing with unknown type (%s)", type->ks_shortdesc);
288 
289 	KASSERT(kmembase <= (char *)addr && (char *)addr < kmemlimit,
290 	    ("free: address %p out of range", (void *)addr));
291 	kup = btokup(addr);
292 	size = 1 << kup->ku_indx;
293 	kbp = &bucket[kup->ku_indx];
294 	s = splmem();
295 #ifdef INVARIANTS
296 	/*
297 	 * Check for returns of data that do not point to the
298 	 * beginning of the allocation.
299 	 */
300 	if (size > PAGE_SIZE)
301 		alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
302 	else
303 		alloc = addrmask[kup->ku_indx];
304 	if (((uintptr_t)(void *)addr & alloc) != 0)
305 		panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
306 		    (void *)addr, size, type->ks_shortdesc, alloc);
307 #endif /* INVARIANTS */
308 	if (size > MAXALLOCSAVE) {
309 		kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
310 		size = kup->ku_pagecnt << PAGE_SHIFT;
311 		ksp->ks_memuse -= size;
312 		kup->ku_indx = 0;
313 		kup->ku_pagecnt = 0;
314 		if (ksp->ks_memuse + size >= ksp->ks_limit &&
315 		    ksp->ks_memuse < ksp->ks_limit)
316 			wakeup((caddr_t)ksp);
317 		ksp->ks_inuse--;
318 		kbp->kb_total -= 1;
319 		splx(s);
320 		return;
321 	}
322 	freep = (struct freelist *)addr;
323 #ifdef INVARIANTS
324 	/*
325 	 * Check for multiple frees. Use a quick check to see if
326 	 * it looks free before laboriously searching the freelist.
327 	 */
328 	if (freep->spare0 == WEIRD_ADDR) {
329 		fp = (struct freelist *)kbp->kb_next;
330 		while (fp) {
331 			if (fp->spare0 != WEIRD_ADDR)
332 				panic("free: free item %p modified", fp);
333 			else if (addr == (caddr_t)fp)
334 				panic("free: multiple freed item %p", addr);
335 			fp = (struct freelist *)fp->next;
336 		}
337 	}
338 	/*
339 	 * Copy in known text to detect modification after freeing
340 	 * and to make it look free. Also, save the type being freed
341 	 * so we can list likely culprit if modification is detected
342 	 * when the object is reallocated.
343 	 */
344 	copysize = size < MAX_COPY ? size : MAX_COPY;
345 	end = (long *)&((caddr_t)addr)[copysize];
346 	for (lp = (long *)addr; lp < end; lp++)
347 		*lp = WEIRD_ADDR;
348 	freep->type = type;
349 #endif /* INVARIANTS */
350 	kup->ku_freecnt++;
351 	if (kup->ku_freecnt >= kbp->kb_elmpercl)
352 		if (kup->ku_freecnt > kbp->kb_elmpercl)
353 			panic("free: multiple frees");
354 		else if (kbp->kb_totalfree > kbp->kb_highwat)
355 			kbp->kb_couldfree++;
356 	kbp->kb_totalfree++;
357 	ksp->ks_memuse -= size;
358 	if (ksp->ks_memuse + size >= ksp->ks_limit &&
359 	    ksp->ks_memuse < ksp->ks_limit)
360 		wakeup((caddr_t)ksp);
361 	ksp->ks_inuse--;
362 #ifdef OLD_MALLOC_MEMORY_POLICY
363 	if (kbp->kb_next == NULL)
364 		kbp->kb_next = addr;
365 	else
366 		((struct freelist *)kbp->kb_last)->next = addr;
367 	freep->next = NULL;
368 	kbp->kb_last = addr;
369 #else
370 	/*
371 	 * Return memory to the head of the queue for quick reuse.  This
372 	 * can improve performance by improving the probability of the
373 	 * item being in the cache when it is reused.
374 	 */
375 	if (kbp->kb_next == NULL) {
376 		kbp->kb_next = addr;
377 		kbp->kb_last = addr;
378 		freep->next = NULL;
379 	} else {
380 		freep->next = kbp->kb_next;
381 		kbp->kb_next = addr;
382 	}
383 #endif
384 	splx(s);
385 }
386 
387 /*
388  * Initialize the kernel memory allocator
389  */
390 /* ARGSUSED*/
391 static void
392 kmeminit(dummy)
393 	void *dummy;
394 {
395 	register long indx;
396 	int npg;
397 	int mem_size;
398 	int xvm_kmem_size;
399 
400 #if	((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
401 #error "kmeminit: MAXALLOCSAVE not power of 2"
402 #endif
403 #if	(MAXALLOCSAVE > MINALLOCSIZE * 32768)
404 #error "kmeminit: MAXALLOCSAVE too big"
405 #endif
406 #if	(MAXALLOCSAVE < PAGE_SIZE)
407 #error "kmeminit: MAXALLOCSAVE too small"
408 #endif
409 
410 	/*
411 	 * Try to auto-tune the kernel memory size, so that it is
412 	 * more applicable for a wider range of machine sizes.
413 	 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while
414 	 * a VM_KMEM_SIZE of 12MB is a fair compromise.  The
415 	 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space
416 	 * available, and on an X86 with a total KVA space of 256MB,
417 	 * try to keep VM_KMEM_SIZE_MAX at 80MB or below.
418 	 *
419 	 * Note that the kmem_map is also used by the zone allocator,
420 	 * so make sure that there is enough space.
421 	 */
422 	vm_kmem_size = VM_KMEM_SIZE;
423 	mem_size = cnt.v_page_count * PAGE_SIZE;
424 
425 #if defined(VM_KMEM_SIZE_SCALE)
426 	if ((mem_size / VM_KMEM_SIZE_SCALE) > vm_kmem_size)
427 		vm_kmem_size = mem_size / VM_KMEM_SIZE_SCALE;
428 #endif
429 
430 #if defined(VM_KMEM_SIZE_MAX)
431 	if (vm_kmem_size >= VM_KMEM_SIZE_MAX)
432 		vm_kmem_size = VM_KMEM_SIZE_MAX;
433 #endif
434 
435 	/* Allow final override from the kernel environment */
436 	if (getenv_int("kern.vm.kmem.size", &xvm_kmem_size))
437 	    vm_kmem_size = xvm_kmem_size;
438 
439 	if (vm_kmem_size > 2 * (cnt.v_page_count * PAGE_SIZE))
440 		vm_kmem_size = 2 * (cnt.v_page_count * PAGE_SIZE);
441 
442 	npg = (nmbufs * MSIZE + nmbclusters * MCLBYTES + vm_kmem_size)
443 		/ PAGE_SIZE;
444 
445 	kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
446 		(vm_size_t)(npg * sizeof(struct kmemusage)));
447 	kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
448 		(vm_offset_t *)&kmemlimit, (vm_size_t)(npg * PAGE_SIZE));
449 	kmem_map->system_map = 1;
450 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
451 		if (1 << indx >= PAGE_SIZE)
452 			bucket[indx].kb_elmpercl = 1;
453 		else
454 			bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
455 		bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
456 	}
457 }
458 
459 void
460 malloc_init(data)
461 	void *data;
462 {
463 	struct malloc_type *type = (struct malloc_type *)data;
464 
465 	if (type->ks_magic != M_MAGIC)
466 		panic("malloc type lacks magic");
467 
468 	if (type->ks_next)
469 		return;
470 
471 	if (cnt.v_page_count == 0)
472 		panic("malloc_init not allowed before vm init");
473 
474 	/*
475 	 * The default limits for each malloc region is 1/2 of the
476 	 * malloc portion of the kmem map size.
477 	 */
478 	type->ks_limit = vm_kmem_size / 2;
479 	type->ks_next = kmemstatistics;
480 	kmemstatistics = type;
481 }
482 
483 void
484 malloc_uninit(data)
485 	void *data;
486 {
487 	struct malloc_type *type = (struct malloc_type *)data;
488 	struct malloc_type *t;
489 
490 	if (type->ks_magic != M_MAGIC)
491 		panic("malloc type lacks magic");
492 
493 	if (cnt.v_page_count == 0)
494 		panic("malloc_uninit not allowed before vm init");
495 
496 	if (type == kmemstatistics)
497 		kmemstatistics = type->ks_next;
498 	else {
499 		for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
500 			if (t->ks_next == type) {
501 				t->ks_next = type->ks_next;
502 				break;
503 			}
504 		}
505 	}
506 }
507