1 /* 2 * Copyright (c) 1987, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 34 * $FreeBSD$ 35 */ 36 37 #include "opt_vm.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/mbuf.h> 45 #include <sys/mutex.h> 46 #include <sys/vmmeter.h> 47 #include <sys/proc.h> 48 49 #include <vm/vm.h> 50 #include <vm/vm_param.h> 51 #include <vm/vm_kern.h> 52 #include <vm/vm_extern.h> 53 #include <vm/pmap.h> 54 #include <vm/vm_map.h> 55 56 #if defined(INVARIANTS) && defined(__i386__) 57 #include <machine/cpu.h> 58 #endif 59 60 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 61 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 62 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 63 64 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 65 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 66 67 static void kmeminit __P((void *)); 68 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL) 69 70 static MALLOC_DEFINE(M_FREE, "free", "should be on free list"); 71 72 static struct malloc_type *kmemstatistics; 73 static struct kmembuckets bucket[MINBUCKET + 16]; 74 static struct kmemusage *kmemusage; 75 static char *kmembase; 76 static char *kmemlimit; 77 78 static struct mtx malloc_mtx; 79 80 u_int vm_kmem_size; 81 82 #ifdef INVARIANTS 83 /* 84 * This structure provides a set of masks to catch unaligned frees. 85 */ 86 static long addrmask[] = { 0, 87 0x00000001, 0x00000003, 0x00000007, 0x0000000f, 88 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff, 89 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff, 90 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff, 91 }; 92 93 /* 94 * The WEIRD_ADDR is used as known text to copy into free objects so 95 * that modifications after frees can be detected. 96 */ 97 #define WEIRD_ADDR 0xdeadc0de 98 #define MAX_COPY 64 99 100 /* 101 * Normally the first word of the structure is used to hold the list 102 * pointer for free objects. However, when running with diagnostics, 103 * we use the third and fourth fields, so as to catch modifications 104 * in the most commonly trashed first two words. 105 */ 106 struct freelist { 107 long spare0; 108 struct malloc_type *type; 109 long spare1; 110 caddr_t next; 111 }; 112 #else /* !INVARIANTS */ 113 struct freelist { 114 caddr_t next; 115 }; 116 #endif /* INVARIANTS */ 117 118 /* 119 * malloc: 120 * 121 * Allocate a block of memory. 122 * 123 * If M_NOWAIT is set, this routine will not block and return NULL if 124 * the allocation fails. 125 * 126 * If M_ASLEEP is set (M_NOWAIT must also be set), this routine 127 * will have the side effect of calling asleep() if it returns NULL, 128 * allowing the parent to await() at some future time. 129 */ 130 void * 131 malloc(size, type, flags) 132 unsigned long size; 133 struct malloc_type *type; 134 int flags; 135 { 136 register struct kmembuckets *kbp; 137 register struct kmemusage *kup; 138 register struct freelist *freep; 139 long indx, npg, allocsize; 140 int s; 141 caddr_t va, cp, savedlist; 142 #ifdef INVARIANTS 143 long *end, *lp; 144 int copysize; 145 const char *savedtype; 146 #endif 147 register struct malloc_type *ksp = type; 148 149 #if defined(INVARIANTS) 150 if (flags == M_WAITOK) 151 KASSERT(curproc->p_intr_nesting_level == 0, 152 ("malloc(M_WAITOK) in interrupt context")); 153 #endif 154 indx = BUCKETINDX(size); 155 kbp = &bucket[indx]; 156 s = splmem(); 157 mtx_lock(&malloc_mtx); 158 while (ksp->ks_memuse >= ksp->ks_limit) { 159 if (flags & M_ASLEEP) { 160 if (ksp->ks_limblocks < 65535) 161 ksp->ks_limblocks++; 162 asleep((caddr_t)ksp, PSWP+2, type->ks_shortdesc, 0); 163 } 164 if (flags & M_NOWAIT) { 165 splx(s); 166 mtx_unlock(&malloc_mtx); 167 return ((void *) NULL); 168 } 169 if (ksp->ks_limblocks < 65535) 170 ksp->ks_limblocks++; 171 msleep((caddr_t)ksp, &malloc_mtx, PSWP+2, type->ks_shortdesc, 172 0); 173 } 174 ksp->ks_size |= 1 << indx; 175 #ifdef INVARIANTS 176 copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY; 177 #endif 178 if (kbp->kb_next == NULL) { 179 kbp->kb_last = NULL; 180 if (size > MAXALLOCSAVE) 181 allocsize = roundup(size, PAGE_SIZE); 182 else 183 allocsize = 1 << indx; 184 npg = btoc(allocsize); 185 186 mtx_unlock(&malloc_mtx); 187 va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg), flags); 188 189 if (va == NULL) { 190 splx(s); 191 return ((void *) NULL); 192 } 193 /* 194 * Enter malloc_mtx after the error check to avoid having to 195 * immediately exit it again if there is an error. 196 */ 197 mtx_lock(&malloc_mtx); 198 199 kbp->kb_total += kbp->kb_elmpercl; 200 kup = btokup(va); 201 kup->ku_indx = indx; 202 if (allocsize > MAXALLOCSAVE) { 203 if (npg > 65535) 204 panic("malloc: allocation too large"); 205 kup->ku_pagecnt = npg; 206 ksp->ks_memuse += allocsize; 207 goto out; 208 } 209 kup->ku_freecnt = kbp->kb_elmpercl; 210 kbp->kb_totalfree += kbp->kb_elmpercl; 211 /* 212 * Just in case we blocked while allocating memory, 213 * and someone else also allocated memory for this 214 * bucket, don't assume the list is still empty. 215 */ 216 savedlist = kbp->kb_next; 217 kbp->kb_next = cp = va + (npg * PAGE_SIZE) - allocsize; 218 for (;;) { 219 freep = (struct freelist *)cp; 220 #ifdef INVARIANTS 221 /* 222 * Copy in known text to detect modification 223 * after freeing. 224 */ 225 end = (long *)&cp[copysize]; 226 for (lp = (long *)cp; lp < end; lp++) 227 *lp = WEIRD_ADDR; 228 freep->type = M_FREE; 229 #endif /* INVARIANTS */ 230 if (cp <= va) 231 break; 232 cp -= allocsize; 233 freep->next = cp; 234 } 235 freep->next = savedlist; 236 if (kbp->kb_last == NULL) 237 kbp->kb_last = (caddr_t)freep; 238 } 239 va = kbp->kb_next; 240 kbp->kb_next = ((struct freelist *)va)->next; 241 #ifdef INVARIANTS 242 freep = (struct freelist *)va; 243 savedtype = (const char *) freep->type->ks_shortdesc; 244 freep->type = (struct malloc_type *)WEIRD_ADDR; 245 if ((intptr_t)(void *)&freep->next & 0x2) 246 freep->next = (caddr_t)((WEIRD_ADDR >> 16)|(WEIRD_ADDR << 16)); 247 else 248 freep->next = (caddr_t)WEIRD_ADDR; 249 end = (long *)&va[copysize]; 250 for (lp = (long *)va; lp < end; lp++) { 251 if (*lp == WEIRD_ADDR) 252 continue; 253 printf("%s %ld of object %p size %lu %s %s (0x%lx != 0x%lx)\n", 254 "Data modified on freelist: word", 255 (long)(lp - (long *)va), (void *)va, size, 256 "previous type", savedtype, *lp, (u_long)WEIRD_ADDR); 257 break; 258 } 259 freep->spare0 = 0; 260 #endif /* INVARIANTS */ 261 kup = btokup(va); 262 if (kup->ku_indx != indx) 263 panic("malloc: wrong bucket"); 264 if (kup->ku_freecnt == 0) 265 panic("malloc: lost data"); 266 kup->ku_freecnt--; 267 kbp->kb_totalfree--; 268 ksp->ks_memuse += 1 << indx; 269 out: 270 kbp->kb_calls++; 271 ksp->ks_inuse++; 272 ksp->ks_calls++; 273 if (ksp->ks_memuse > ksp->ks_maxused) 274 ksp->ks_maxused = ksp->ks_memuse; 275 splx(s); 276 mtx_unlock(&malloc_mtx); 277 /* XXX: Do idle pre-zeroing. */ 278 if (va != NULL && (flags & M_ZERO)) 279 bzero(va, size); 280 return ((void *) va); 281 } 282 283 /* 284 * free: 285 * 286 * Free a block of memory allocated by malloc. 287 * 288 * This routine may not block. 289 */ 290 void 291 free(addr, type) 292 void *addr; 293 struct malloc_type *type; 294 { 295 register struct kmembuckets *kbp; 296 register struct kmemusage *kup; 297 register struct freelist *freep; 298 long size; 299 int s; 300 #ifdef INVARIANTS 301 struct freelist *fp; 302 long *end, *lp, alloc, copysize; 303 #endif 304 register struct malloc_type *ksp = type; 305 306 KASSERT(kmembase <= (char *)addr && (char *)addr < kmemlimit, 307 ("free: address %p out of range", (void *)addr)); 308 kup = btokup(addr); 309 size = 1 << kup->ku_indx; 310 kbp = &bucket[kup->ku_indx]; 311 s = splmem(); 312 mtx_lock(&malloc_mtx); 313 #ifdef INVARIANTS 314 /* 315 * Check for returns of data that do not point to the 316 * beginning of the allocation. 317 */ 318 if (size > PAGE_SIZE) 319 alloc = addrmask[BUCKETINDX(PAGE_SIZE)]; 320 else 321 alloc = addrmask[kup->ku_indx]; 322 if (((uintptr_t)(void *)addr & alloc) != 0) 323 panic("free: unaligned addr %p, size %ld, type %s, mask %ld", 324 (void *)addr, size, type->ks_shortdesc, alloc); 325 #endif /* INVARIANTS */ 326 if (size > MAXALLOCSAVE) { 327 mtx_unlock(&malloc_mtx); 328 kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt)); 329 mtx_lock(&malloc_mtx); 330 331 size = kup->ku_pagecnt << PAGE_SHIFT; 332 ksp->ks_memuse -= size; 333 kup->ku_indx = 0; 334 kup->ku_pagecnt = 0; 335 if (ksp->ks_memuse + size >= ksp->ks_limit && 336 ksp->ks_memuse < ksp->ks_limit) 337 wakeup((caddr_t)ksp); 338 ksp->ks_inuse--; 339 kbp->kb_total -= 1; 340 splx(s); 341 mtx_unlock(&malloc_mtx); 342 return; 343 } 344 freep = (struct freelist *)addr; 345 #ifdef INVARIANTS 346 /* 347 * Check for multiple frees. Use a quick check to see if 348 * it looks free before laboriously searching the freelist. 349 */ 350 if (freep->spare0 == WEIRD_ADDR) { 351 fp = (struct freelist *)kbp->kb_next; 352 while (fp) { 353 if (fp->spare0 != WEIRD_ADDR) 354 panic("free: free item %p modified", fp); 355 else if (addr == (caddr_t)fp) 356 panic("free: multiple freed item %p", addr); 357 fp = (struct freelist *)fp->next; 358 } 359 } 360 /* 361 * Copy in known text to detect modification after freeing 362 * and to make it look free. Also, save the type being freed 363 * so we can list likely culprit if modification is detected 364 * when the object is reallocated. 365 */ 366 copysize = size < MAX_COPY ? size : MAX_COPY; 367 end = (long *)&((caddr_t)addr)[copysize]; 368 for (lp = (long *)addr; lp < end; lp++) 369 *lp = WEIRD_ADDR; 370 freep->type = type; 371 #endif /* INVARIANTS */ 372 kup->ku_freecnt++; 373 if (kup->ku_freecnt >= kbp->kb_elmpercl) { 374 if (kup->ku_freecnt > kbp->kb_elmpercl) 375 panic("free: multiple frees"); 376 else if (kbp->kb_totalfree > kbp->kb_highwat) 377 kbp->kb_couldfree++; 378 } 379 kbp->kb_totalfree++; 380 ksp->ks_memuse -= size; 381 if (ksp->ks_memuse + size >= ksp->ks_limit && 382 ksp->ks_memuse < ksp->ks_limit) 383 wakeup((caddr_t)ksp); 384 ksp->ks_inuse--; 385 #ifdef OLD_MALLOC_MEMORY_POLICY 386 if (kbp->kb_next == NULL) 387 kbp->kb_next = addr; 388 else 389 ((struct freelist *)kbp->kb_last)->next = addr; 390 freep->next = NULL; 391 kbp->kb_last = addr; 392 #else 393 /* 394 * Return memory to the head of the queue for quick reuse. This 395 * can improve performance by improving the probability of the 396 * item being in the cache when it is reused. 397 */ 398 if (kbp->kb_next == NULL) { 399 kbp->kb_next = addr; 400 kbp->kb_last = addr; 401 freep->next = NULL; 402 } else { 403 freep->next = kbp->kb_next; 404 kbp->kb_next = addr; 405 } 406 #endif 407 splx(s); 408 mtx_unlock(&malloc_mtx); 409 } 410 411 /* 412 * Initialize the kernel memory allocator 413 */ 414 /* ARGSUSED*/ 415 static void 416 kmeminit(dummy) 417 void *dummy; 418 { 419 register long indx; 420 u_long npg; 421 u_long mem_size; 422 423 #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0) 424 #error "kmeminit: MAXALLOCSAVE not power of 2" 425 #endif 426 #if (MAXALLOCSAVE > MINALLOCSIZE * 32768) 427 #error "kmeminit: MAXALLOCSAVE too big" 428 #endif 429 #if (MAXALLOCSAVE < PAGE_SIZE) 430 #error "kmeminit: MAXALLOCSAVE too small" 431 #endif 432 433 mtx_init(&malloc_mtx, "malloc", MTX_DEF); 434 435 /* 436 * Try to auto-tune the kernel memory size, so that it is 437 * more applicable for a wider range of machine sizes. 438 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while 439 * a VM_KMEM_SIZE of 12MB is a fair compromise. The 440 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space 441 * available, and on an X86 with a total KVA space of 256MB, 442 * try to keep VM_KMEM_SIZE_MAX at 80MB or below. 443 * 444 * Note that the kmem_map is also used by the zone allocator, 445 * so make sure that there is enough space. 446 */ 447 vm_kmem_size = VM_KMEM_SIZE; 448 mem_size = cnt.v_page_count * PAGE_SIZE; 449 450 #if defined(VM_KMEM_SIZE_SCALE) 451 if ((mem_size / VM_KMEM_SIZE_SCALE) > vm_kmem_size) 452 vm_kmem_size = mem_size / VM_KMEM_SIZE_SCALE; 453 #endif 454 455 #if defined(VM_KMEM_SIZE_MAX) 456 if (vm_kmem_size >= VM_KMEM_SIZE_MAX) 457 vm_kmem_size = VM_KMEM_SIZE_MAX; 458 #endif 459 460 /* Allow final override from the kernel environment */ 461 TUNABLE_INT_FETCH("kern.vm.kmem.size", &vm_kmem_size); 462 463 /* 464 * Limit kmem virtual size to twice the physical memory. 465 * This allows for kmem map sparseness, but limits the size 466 * to something sane. Be careful to not overflow the 32bit 467 * ints while doing the check. 468 */ 469 if ((vm_kmem_size / 2) > (cnt.v_page_count * PAGE_SIZE)) 470 vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE; 471 472 /* 473 * In mbuf_init(), we set up submaps for mbufs and clusters, in which 474 * case we rounddown() (nmbufs * MSIZE) and (nmbclusters * MCLBYTES), 475 * respectively. Mathematically, this means that what we do here may 476 * amount to slightly more address space than we need for the submaps, 477 * but it never hurts to have an extra page in kmem_map. 478 */ 479 npg = (nmbufs * MSIZE + nmbclusters * MCLBYTES + nmbcnt * 480 sizeof(u_int) + vm_kmem_size) / PAGE_SIZE; 481 482 kmemusage = (struct kmemusage *) kmem_alloc(kernel_map, 483 (vm_size_t)(npg * sizeof(struct kmemusage))); 484 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase, 485 (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * PAGE_SIZE)); 486 kmem_map->system_map = 1; 487 for (indx = 0; indx < MINBUCKET + 16; indx++) { 488 if (1 << indx >= PAGE_SIZE) 489 bucket[indx].kb_elmpercl = 1; 490 else 491 bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx); 492 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl; 493 } 494 } 495 496 void 497 malloc_init(data) 498 void *data; 499 { 500 struct malloc_type *type = (struct malloc_type *)data; 501 502 if (type->ks_magic != M_MAGIC) 503 panic("malloc type lacks magic"); 504 505 if (type->ks_limit != 0) 506 return; 507 508 if (cnt.v_page_count == 0) 509 panic("malloc_init not allowed before vm init"); 510 511 /* 512 * The default limits for each malloc region is 1/2 of the 513 * malloc portion of the kmem map size. 514 */ 515 type->ks_limit = vm_kmem_size / 2; 516 type->ks_next = kmemstatistics; 517 kmemstatistics = type; 518 } 519 520 void 521 malloc_uninit(data) 522 void *data; 523 { 524 struct malloc_type *type = (struct malloc_type *)data; 525 struct malloc_type *t; 526 #ifdef INVARIANTS 527 struct kmembuckets *kbp; 528 struct freelist *freep; 529 long indx; 530 int s; 531 #endif 532 533 if (type->ks_magic != M_MAGIC) 534 panic("malloc type lacks magic"); 535 536 if (cnt.v_page_count == 0) 537 panic("malloc_uninit not allowed before vm init"); 538 539 if (type->ks_limit == 0) 540 panic("malloc_uninit on uninitialized type"); 541 542 #ifdef INVARIANTS 543 s = splmem(); 544 mtx_lock(&malloc_mtx); 545 for (indx = 0; indx < MINBUCKET + 16; indx++) { 546 kbp = bucket + indx; 547 freep = (struct freelist*)kbp->kb_next; 548 while (freep) { 549 if (freep->type == type) 550 freep->type = M_FREE; 551 freep = (struct freelist*)freep->next; 552 } 553 } 554 splx(s); 555 mtx_unlock(&malloc_mtx); 556 557 if (type->ks_memuse != 0) 558 printf("malloc_uninit: %ld bytes of '%s' still allocated\n", 559 type->ks_memuse, type->ks_shortdesc); 560 #endif 561 562 if (type == kmemstatistics) 563 kmemstatistics = type->ks_next; 564 else { 565 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 566 if (t->ks_next == type) { 567 t->ks_next = type->ks_next; 568 break; 569 } 570 } 571 } 572 type->ks_next = NULL; 573 type->ks_limit = 0; 574 } 575