xref: /freebsd/sys/kern/kern_malloc.c (revision a8445737e740901f5f2c8d24c12ef7fc8b00134e)
1 /*
2  * Copyright (c) 1987, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)kern_malloc.c	8.3 (Berkeley) 1/4/94
34  * $Id: kern_malloc.c,v 1.46 1998/07/29 17:38:14 bde Exp $
35  */
36 
37 #include "opt_vm.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #define MALLOC_INSTANTIATE
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/vmmeter.h>
46 #include <sys/lock.h>
47 
48 #include <vm/vm.h>
49 #include <vm/vm_param.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_extern.h>
52 #include <vm/pmap.h>
53 #include <vm/vm_map.h>
54 
55 static void kmeminit __P((void *));
56 static void malloc_init __P((struct malloc_type *));
57 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL)
58 
59 static MALLOC_DEFINE(M_FREE, "free", "should be on free list");
60 
61 static struct malloc_type *kmemstatistics = M_FREE;
62 static struct kmembuckets bucket[MINBUCKET + 16];
63 static struct kmemusage *kmemusage;
64 static char *kmembase;
65 static char *kmemlimit;
66 static int vm_kmem_size;
67 
68 #ifdef DIAGNOSTIC
69 /*
70  * This structure provides a set of masks to catch unaligned frees.
71  */
72 static long addrmask[] = { 0,
73 	0x00000001, 0x00000003, 0x00000007, 0x0000000f,
74 	0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
75 	0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
76 	0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
77 };
78 
79 /*
80  * The WEIRD_ADDR is used as known text to copy into free objects so
81  * that modifications after frees can be detected.
82  */
83 #define WEIRD_ADDR	0xdeadc0de
84 #define MAX_COPY	64
85 
86 /*
87  * Normally the first word of the structure is used to hold the list
88  * pointer for free objects. However, when running with diagnostics,
89  * we use the third and fourth fields, so as to catch modifications
90  * in the most commonly trashed first two words.
91  */
92 struct freelist {
93 	long	spare0;
94 	struct malloc_type *type;
95 	long	spare1;
96 	caddr_t	next;
97 };
98 #else /* !DIAGNOSTIC */
99 struct freelist {
100 	caddr_t	next;
101 };
102 #endif /* DIAGNOSTIC */
103 
104 /*
105  * Allocate a block of memory
106  */
107 void *
108 malloc(size, type, flags)
109 	unsigned long size;
110 	struct malloc_type *type;
111 	int flags;
112 {
113 	register struct kmembuckets *kbp;
114 	register struct kmemusage *kup;
115 	register struct freelist *freep;
116 	long indx, npg, allocsize;
117 	int s;
118 	caddr_t va, cp, savedlist;
119 #ifdef DIAGNOSTIC
120 	long *end, *lp;
121 	int copysize;
122 	char *savedtype;
123 #endif
124 	register struct malloc_type *ksp = type;
125 
126 	if (!type->ks_next)
127 		malloc_init(type);
128 
129 	indx = BUCKETINDX(size);
130 	kbp = &bucket[indx];
131 	s = splmem();
132 	while (ksp->ks_memuse >= ksp->ks_limit) {
133 		if (flags & M_NOWAIT) {
134 			splx(s);
135 			return ((void *) NULL);
136 		}
137 		if (ksp->ks_limblocks < 65535)
138 			ksp->ks_limblocks++;
139 		tsleep((caddr_t)ksp, PSWP+2, type->ks_shortdesc, 0);
140 	}
141 	ksp->ks_size |= 1 << indx;
142 #ifdef DIAGNOSTIC
143 	copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
144 #endif
145 	if (kbp->kb_next == NULL) {
146 		kbp->kb_last = NULL;
147 		if (size > MAXALLOCSAVE)
148 			allocsize = roundup(size, PAGE_SIZE);
149 		else
150 			allocsize = 1 << indx;
151 		npg = btoc(allocsize);
152 		va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg), flags);
153 		if (va == NULL) {
154 			splx(s);
155 			return ((void *) NULL);
156 		}
157 		kbp->kb_total += kbp->kb_elmpercl;
158 		kup = btokup(va);
159 		kup->ku_indx = indx;
160 		if (allocsize > MAXALLOCSAVE) {
161 			if (npg > 65535)
162 				panic("malloc: allocation too large");
163 			kup->ku_pagecnt = npg;
164 			ksp->ks_memuse += allocsize;
165 			goto out;
166 		}
167 		kup->ku_freecnt = kbp->kb_elmpercl;
168 		kbp->kb_totalfree += kbp->kb_elmpercl;
169 		/*
170 		 * Just in case we blocked while allocating memory,
171 		 * and someone else also allocated memory for this
172 		 * bucket, don't assume the list is still empty.
173 		 */
174 		savedlist = kbp->kb_next;
175 		kbp->kb_next = cp = va + (npg * PAGE_SIZE) - allocsize;
176 		for (;;) {
177 			freep = (struct freelist *)cp;
178 #ifdef DIAGNOSTIC
179 			/*
180 			 * Copy in known text to detect modification
181 			 * after freeing.
182 			 */
183 			end = (long *)&cp[copysize];
184 			for (lp = (long *)cp; lp < end; lp++)
185 				*lp = WEIRD_ADDR;
186 			freep->type = M_FREE;
187 #endif /* DIAGNOSTIC */
188 			if (cp <= va)
189 				break;
190 			cp -= allocsize;
191 			freep->next = cp;
192 		}
193 		freep->next = savedlist;
194 		if (kbp->kb_last == NULL)
195 			kbp->kb_last = (caddr_t)freep;
196 	}
197 	va = kbp->kb_next;
198 	kbp->kb_next = ((struct freelist *)va)->next;
199 #ifdef DIAGNOSTIC
200 	freep = (struct freelist *)va;
201 	savedtype = (char *) type->ks_shortdesc;
202 #if BYTE_ORDER == BIG_ENDIAN
203 	freep->type = (struct malloc_type *)WEIRD_ADDR >> 16;
204 #endif
205 #if BYTE_ORDER == LITTLE_ENDIAN
206 	freep->type = (struct malloc_type *)WEIRD_ADDR;
207 #endif
208 	if ((intptr_t)(void *)&freep->next & 0x2)
209 		freep->next = (caddr_t)((WEIRD_ADDR >> 16)|(WEIRD_ADDR << 16));
210 	else
211 		freep->next = (caddr_t)WEIRD_ADDR;
212 	end = (long *)&va[copysize];
213 	for (lp = (long *)va; lp < end; lp++) {
214 		if (*lp == WEIRD_ADDR)
215 			continue;
216 		printf("%s %ld of object %p size %lu %s %s (0x%lx != 0x%lx)\n",
217 			"Data modified on freelist: word",
218 			(long)(lp - (long *)va), (void *)va, size,
219 			"previous type", savedtype, *lp, (u_long)WEIRD_ADDR);
220 		break;
221 	}
222 	freep->spare0 = 0;
223 #endif /* DIAGNOSTIC */
224 	kup = btokup(va);
225 	if (kup->ku_indx != indx)
226 		panic("malloc: wrong bucket");
227 	if (kup->ku_freecnt == 0)
228 		panic("malloc: lost data");
229 	kup->ku_freecnt--;
230 	kbp->kb_totalfree--;
231 	ksp->ks_memuse += 1 << indx;
232 out:
233 	kbp->kb_calls++;
234 	ksp->ks_inuse++;
235 	ksp->ks_calls++;
236 	if (ksp->ks_memuse > ksp->ks_maxused)
237 		ksp->ks_maxused = ksp->ks_memuse;
238 	splx(s);
239 	return ((void *) va);
240 }
241 
242 /*
243  * Free a block of memory allocated by malloc.
244  */
245 void
246 free(addr, type)
247 	void *addr;
248 	struct malloc_type *type;
249 {
250 	register struct kmembuckets *kbp;
251 	register struct kmemusage *kup;
252 	register struct freelist *freep;
253 	long size;
254 	int s;
255 #ifdef DIAGNOSTIC
256 	struct freelist *fp;
257 	long *end, *lp, alloc, copysize;
258 #endif
259 	register struct malloc_type *ksp = type;
260 
261 	if (!type->ks_next)
262 		panic("freeing with unknown type (%s)", type->ks_shortdesc);
263 
264 #ifdef DIAGNOSTIC
265 	if ((char *)addr < kmembase || (char *)addr >= kmemlimit) {
266 		panic("free: address %p out of range", (void *)addr);
267 	}
268 #endif
269 	kup = btokup(addr);
270 	size = 1 << kup->ku_indx;
271 	kbp = &bucket[kup->ku_indx];
272 	s = splmem();
273 #ifdef DIAGNOSTIC
274 	/*
275 	 * Check for returns of data that do not point to the
276 	 * beginning of the allocation.
277 	 */
278 	if (size > PAGE_SIZE)
279 		alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
280 	else
281 		alloc = addrmask[kup->ku_indx];
282 	if (((uintptr_t)(void *)addr & alloc) != 0)
283 		panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
284 		    (void *)addr, size, type->ks_shortdesc, alloc);
285 #endif /* DIAGNOSTIC */
286 	if (size > MAXALLOCSAVE) {
287 		kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
288 		size = kup->ku_pagecnt << PAGE_SHIFT;
289 		ksp->ks_memuse -= size;
290 		kup->ku_indx = 0;
291 		kup->ku_pagecnt = 0;
292 		if (ksp->ks_memuse + size >= ksp->ks_limit &&
293 		    ksp->ks_memuse < ksp->ks_limit)
294 			wakeup((caddr_t)ksp);
295 		ksp->ks_inuse--;
296 		kbp->kb_total -= 1;
297 		splx(s);
298 		return;
299 	}
300 	freep = (struct freelist *)addr;
301 #ifdef DIAGNOSTIC
302 	/*
303 	 * Check for multiple frees. Use a quick check to see if
304 	 * it looks free before laboriously searching the freelist.
305 	 */
306 	if (freep->spare0 == WEIRD_ADDR) {
307 		fp = (struct freelist *)kbp->kb_next;
308 		while (fp) {
309 			if (fp->spare0 != WEIRD_ADDR) {
310 				printf("trashed free item %p\n", fp);
311 				panic("free: free item modified");
312 			} else if (addr == (caddr_t)fp) {
313 				printf("multiple freed item %p\n", addr);
314 				panic("free: multiple free");
315 			}
316 			fp = (struct freelist *)fp->next;
317 		}
318 	}
319 	/*
320 	 * Copy in known text to detect modification after freeing
321 	 * and to make it look free. Also, save the type being freed
322 	 * so we can list likely culprit if modification is detected
323 	 * when the object is reallocated.
324 	 */
325 	copysize = size < MAX_COPY ? size : MAX_COPY;
326 	end = (long *)&((caddr_t)addr)[copysize];
327 	for (lp = (long *)addr; lp < end; lp++)
328 		*lp = WEIRD_ADDR;
329 	freep->type = type;
330 #endif /* DIAGNOSTIC */
331 	kup->ku_freecnt++;
332 	if (kup->ku_freecnt >= kbp->kb_elmpercl)
333 		if (kup->ku_freecnt > kbp->kb_elmpercl)
334 			panic("free: multiple frees");
335 		else if (kbp->kb_totalfree > kbp->kb_highwat)
336 			kbp->kb_couldfree++;
337 	kbp->kb_totalfree++;
338 	ksp->ks_memuse -= size;
339 	if (ksp->ks_memuse + size >= ksp->ks_limit &&
340 	    ksp->ks_memuse < ksp->ks_limit)
341 		wakeup((caddr_t)ksp);
342 	ksp->ks_inuse--;
343 #ifdef OLD_MALLOC_MEMORY_POLICY
344 	if (kbp->kb_next == NULL)
345 		kbp->kb_next = addr;
346 	else
347 		((struct freelist *)kbp->kb_last)->next = addr;
348 	freep->next = NULL;
349 	kbp->kb_last = addr;
350 #else
351 	/*
352 	 * Return memory to the head of the queue for quick reuse.  This
353 	 * can improve performance by improving the probability of the
354 	 * item being in the cache when it is reused.
355 	 */
356 	if (kbp->kb_next == NULL) {
357 		kbp->kb_next = addr;
358 		kbp->kb_last = addr;
359 		freep->next = NULL;
360 	} else {
361 		freep->next = kbp->kb_next;
362 		kbp->kb_next = addr;
363 	}
364 #endif
365 	splx(s);
366 }
367 
368 /*
369  * Initialize the kernel memory allocator
370  */
371 /* ARGSUSED*/
372 static void
373 kmeminit(dummy)
374 	void *dummy;
375 {
376 	register long indx;
377 	int npg;
378 	int mem_size;
379 
380 #if	((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
381 #error "kmeminit: MAXALLOCSAVE not power of 2"
382 #endif
383 #if	(MAXALLOCSAVE > MINALLOCSIZE * 32768)
384 #error "kmeminit: MAXALLOCSAVE too big"
385 #endif
386 #if	(MAXALLOCSAVE < PAGE_SIZE)
387 #error "kmeminit: MAXALLOCSAVE too small"
388 #endif
389 
390 	/*
391 	 * Try to auto-tune the kernel memory size, so that it is
392 	 * more applicable for a wider range of machine sizes.
393 	 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while
394 	 * a VM_KMEM_SIZE of 12MB is a fair compromise.  The
395 	 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space
396 	 * available, and on an X86 with a total KVA space of 256MB,
397 	 * try to keep VM_KMEM_SIZE_MAX at 80MB or below.
398 	 *
399 	 * Note that the kmem_map is also used by the zone allocator,
400 	 * so make sure that there is enough space.
401 	 */
402 	vm_kmem_size = VM_KMEM_SIZE;
403 	mem_size = cnt.v_page_count * PAGE_SIZE;
404 
405 #if defined(VM_KMEM_SIZE_SCALE)
406 	if ((mem_size / VM_KMEM_SIZE_SCALE) > vm_kmem_size)
407 		vm_kmem_size = mem_size / VM_KMEM_SIZE_SCALE;
408 #endif
409 
410 #if defined(VM_KMEM_SIZE_MAX)
411 	if (vm_kmem_size >= VM_KMEM_SIZE_MAX)
412 		vm_kmem_size = VM_KMEM_SIZE_MAX;
413 #endif
414 
415 	if (vm_kmem_size > 2 * (cnt.v_page_count * PAGE_SIZE))
416 		vm_kmem_size = 2 * (cnt.v_page_count * PAGE_SIZE);
417 
418 	npg = (nmbufs * MSIZE + nmbclusters * MCLBYTES + vm_kmem_size)
419 		/ PAGE_SIZE;
420 
421 	kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
422 		(vm_size_t)(npg * sizeof(struct kmemusage)));
423 	kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
424 		(vm_offset_t *)&kmemlimit, (vm_size_t)(npg * PAGE_SIZE));
425 	kmem_map->system_map = 1;
426 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
427 		if (1 << indx >= PAGE_SIZE)
428 			bucket[indx].kb_elmpercl = 1;
429 		else
430 			bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
431 		bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
432 	}
433 }
434 
435 static void
436 malloc_init(type)
437 	struct malloc_type *type;
438 {
439 	int npg;
440 	int mem_size;
441 
442 	if (type->ks_magic != M_MAGIC)
443 		panic("malloc type lacks magic");
444 
445 	if (cnt.v_page_count == 0)
446 		panic("malloc_init not allowed before vm init");
447 
448 	/*
449 	 * The default limits for each malloc region is 1/2 of the
450 	 * malloc portion of the kmem map size.
451 	 */
452 	type->ks_limit = vm_kmem_size / 2;
453 	type->ks_next = kmemstatistics;
454 	kmemstatistics = type;
455 }
456