1df8bae1dSRodney W. Grimes /* 2df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * 5df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 6df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 7df8bae1dSRodney W. Grimes * are met: 8df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 9df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 10df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 11df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 12df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 13df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 14df8bae1dSRodney W. Grimes * must display the following acknowledgement: 15df8bae1dSRodney W. Grimes * This product includes software developed by the University of 16df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 17df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 18df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 19df8bae1dSRodney W. Grimes * without specific prior written permission. 20df8bae1dSRodney W. Grimes * 21df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31df8bae1dSRodney W. Grimes * SUCH DAMAGE. 32df8bae1dSRodney W. Grimes * 33df8bae1dSRodney W. Grimes * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 34c3aac50fSPeter Wemm * $FreeBSD$ 35df8bae1dSRodney W. Grimes */ 36df8bae1dSRodney W. Grimes 378a58a9f6SJohn Dyson #include "opt_vm.h" 388a58a9f6SJohn Dyson 39df8bae1dSRodney W. Grimes #include <sys/param.h> 4026f9a767SRodney W. Grimes #include <sys/systm.h> 41df8bae1dSRodney W. Grimes #include <sys/kernel.h> 42df8bae1dSRodney W. Grimes #include <sys/malloc.h> 4354e7152cSDavid Greenman #include <sys/mbuf.h> 44efeaf95aSDavid Greenman #include <sys/vmmeter.h> 453075778bSJohn Dyson #include <sys/lock.h> 46df8bae1dSRodney W. Grimes 47df8bae1dSRodney W. Grimes #include <vm/vm.h> 48efeaf95aSDavid Greenman #include <vm/vm_param.h> 49df8bae1dSRodney W. Grimes #include <vm/vm_kern.h> 50efeaf95aSDavid Greenman #include <vm/vm_extern.h> 513075778bSJohn Dyson #include <vm/pmap.h> 523075778bSJohn Dyson #include <vm/vm_map.h> 53df8bae1dSRodney W. Grimes 54984982d6SPoul-Henning Kamp #if defined(INVARIANTS) && defined(__i386__) 55984982d6SPoul-Henning Kamp #include <machine/cpu.h> 56984982d6SPoul-Henning Kamp #endif 57984982d6SPoul-Henning Kamp 583b6fb885SPoul-Henning Kamp MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 599ef246c6SBruce Evans MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 609ef246c6SBruce Evans MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 619ef246c6SBruce Evans 6282cd038dSYoshinobu Inoue MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 6382cd038dSYoshinobu Inoue MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 6482cd038dSYoshinobu Inoue 654590fd3aSDavid Greenman static void kmeminit __P((void *)); 662b14f991SJulian Elischer SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL) 672b14f991SJulian Elischer 68a1c995b6SPoul-Henning Kamp static MALLOC_DEFINE(M_FREE, "free", "should be on free list"); 69a1c995b6SPoul-Henning Kamp 70db669378SPeter Wemm static struct malloc_type *kmemstatistics; 71254c6cb3SPoul-Henning Kamp static struct kmembuckets bucket[MINBUCKET + 16]; 72254c6cb3SPoul-Henning Kamp static struct kmemusage *kmemusage; 73254c6cb3SPoul-Henning Kamp static char *kmembase; 74043a2f3bSBruce Evans static char *kmemlimit; 751f6889a1SMatthew Dillon 761f6889a1SMatthew Dillon u_int vm_kmem_size; 77df8bae1dSRodney W. Grimes 78219cbf59SEivind Eklund #ifdef INVARIANTS 79df8bae1dSRodney W. Grimes /* 80df8bae1dSRodney W. Grimes * This structure provides a set of masks to catch unaligned frees. 81df8bae1dSRodney W. Grimes */ 8287b6de2bSPoul-Henning Kamp static long addrmask[] = { 0, 83df8bae1dSRodney W. Grimes 0x00000001, 0x00000003, 0x00000007, 0x0000000f, 84df8bae1dSRodney W. Grimes 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff, 85df8bae1dSRodney W. Grimes 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff, 86df8bae1dSRodney W. Grimes 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff, 87df8bae1dSRodney W. Grimes }; 88df8bae1dSRodney W. Grimes 89df8bae1dSRodney W. Grimes /* 90df8bae1dSRodney W. Grimes * The WEIRD_ADDR is used as known text to copy into free objects so 91df8bae1dSRodney W. Grimes * that modifications after frees can be detected. 92df8bae1dSRodney W. Grimes */ 935124d598SDavid Greenman #define WEIRD_ADDR 0xdeadc0de 945124d598SDavid Greenman #define MAX_COPY 64 95df8bae1dSRodney W. Grimes 96df8bae1dSRodney W. Grimes /* 97df8bae1dSRodney W. Grimes * Normally the first word of the structure is used to hold the list 98df8bae1dSRodney W. Grimes * pointer for free objects. However, when running with diagnostics, 99df8bae1dSRodney W. Grimes * we use the third and fourth fields, so as to catch modifications 100df8bae1dSRodney W. Grimes * in the most commonly trashed first two words. 101df8bae1dSRodney W. Grimes */ 102df8bae1dSRodney W. Grimes struct freelist { 103df8bae1dSRodney W. Grimes long spare0; 10460a513e9SPoul-Henning Kamp struct malloc_type *type; 105df8bae1dSRodney W. Grimes long spare1; 106df8bae1dSRodney W. Grimes caddr_t next; 107df8bae1dSRodney W. Grimes }; 1085526d2d9SEivind Eklund #else /* !INVARIANTS */ 109df8bae1dSRodney W. Grimes struct freelist { 110df8bae1dSRodney W. Grimes caddr_t next; 111df8bae1dSRodney W. Grimes }; 1125526d2d9SEivind Eklund #endif /* INVARIANTS */ 113df8bae1dSRodney W. Grimes 114df8bae1dSRodney W. Grimes /* 1151c7c3c6aSMatthew Dillon * malloc: 1161c7c3c6aSMatthew Dillon * 1171c7c3c6aSMatthew Dillon * Allocate a block of memory. 1181c7c3c6aSMatthew Dillon * 1191c7c3c6aSMatthew Dillon * If M_NOWAIT is set, this routine will not block and return NULL if 1201c7c3c6aSMatthew Dillon * the allocation fails. 1211c7c3c6aSMatthew Dillon * 1221c7c3c6aSMatthew Dillon * If M_ASLEEP is set (M_NOWAIT must also be set), this routine 1231c7c3c6aSMatthew Dillon * will have the side effect of calling asleep() if it returns NULL, 1241c7c3c6aSMatthew Dillon * allowing the parent to await() at some future time. 125df8bae1dSRodney W. Grimes */ 126df8bae1dSRodney W. Grimes void * 127df8bae1dSRodney W. Grimes malloc(size, type, flags) 128df8bae1dSRodney W. Grimes unsigned long size; 12960a513e9SPoul-Henning Kamp struct malloc_type *type; 130254c6cb3SPoul-Henning Kamp int flags; 131df8bae1dSRodney W. Grimes { 132df8bae1dSRodney W. Grimes register struct kmembuckets *kbp; 133df8bae1dSRodney W. Grimes register struct kmemusage *kup; 134df8bae1dSRodney W. Grimes register struct freelist *freep; 135df8bae1dSRodney W. Grimes long indx, npg, allocsize; 136df8bae1dSRodney W. Grimes int s; 137df8bae1dSRodney W. Grimes caddr_t va, cp, savedlist; 1385526d2d9SEivind Eklund #ifdef INVARIANTS 139df8bae1dSRodney W. Grimes long *end, *lp; 140df8bae1dSRodney W. Grimes int copysize; 141d254af07SMatthew Dillon const char *savedtype; 142df8bae1dSRodney W. Grimes #endif 14360a513e9SPoul-Henning Kamp register struct malloc_type *ksp = type; 144df8bae1dSRodney W. Grimes 145984982d6SPoul-Henning Kamp #if defined(INVARIANTS) && defined(__i386__) 146984982d6SPoul-Henning Kamp if (flags == M_WAITOK) 147984982d6SPoul-Henning Kamp KASSERT(intr_nesting_level == 0, 148984982d6SPoul-Henning Kamp ("malloc(M_WAITOK) in interrupt context")); 149984982d6SPoul-Henning Kamp #endif 1501c7c3c6aSMatthew Dillon /* 1511c7c3c6aSMatthew Dillon * Must be at splmem() prior to initializing segment to handle 1521c7c3c6aSMatthew Dillon * potential initialization race. 1531c7c3c6aSMatthew Dillon */ 1541c7c3c6aSMatthew Dillon 1551c7c3c6aSMatthew Dillon s = splmem(); 1561c7c3c6aSMatthew Dillon 157ce45b512SBruce Evans if (type->ks_limit == 0) 158254c6cb3SPoul-Henning Kamp malloc_init(type); 159254c6cb3SPoul-Henning Kamp 160df8bae1dSRodney W. Grimes indx = BUCKETINDX(size); 161df8bae1dSRodney W. Grimes kbp = &bucket[indx]; 1621c7c3c6aSMatthew Dillon 163df8bae1dSRodney W. Grimes while (ksp->ks_memuse >= ksp->ks_limit) { 1641c7c3c6aSMatthew Dillon if (flags & M_ASLEEP) { 1651c7c3c6aSMatthew Dillon if (ksp->ks_limblocks < 65535) 1661c7c3c6aSMatthew Dillon ksp->ks_limblocks++; 1671c7c3c6aSMatthew Dillon asleep((caddr_t)ksp, PSWP+2, type->ks_shortdesc, 0); 1681c7c3c6aSMatthew Dillon } 169df8bae1dSRodney W. Grimes if (flags & M_NOWAIT) { 170df8bae1dSRodney W. Grimes splx(s); 171df8bae1dSRodney W. Grimes return ((void *) NULL); 172df8bae1dSRodney W. Grimes } 173df8bae1dSRodney W. Grimes if (ksp->ks_limblocks < 65535) 174df8bae1dSRodney W. Grimes ksp->ks_limblocks++; 175254c6cb3SPoul-Henning Kamp tsleep((caddr_t)ksp, PSWP+2, type->ks_shortdesc, 0); 176df8bae1dSRodney W. Grimes } 177df8bae1dSRodney W. Grimes ksp->ks_size |= 1 << indx; 1785526d2d9SEivind Eklund #ifdef INVARIANTS 179df8bae1dSRodney W. Grimes copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY; 180df8bae1dSRodney W. Grimes #endif 181df8bae1dSRodney W. Grimes if (kbp->kb_next == NULL) { 182df8bae1dSRodney W. Grimes kbp->kb_last = NULL; 183df8bae1dSRodney W. Grimes if (size > MAXALLOCSAVE) 184f8845af0SPoul-Henning Kamp allocsize = roundup(size, PAGE_SIZE); 185df8bae1dSRodney W. Grimes else 186df8bae1dSRodney W. Grimes allocsize = 1 << indx; 187e911eafcSPoul-Henning Kamp npg = btoc(allocsize); 1889f518539SDavid Greenman va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg), flags); 189df8bae1dSRodney W. Grimes if (va == NULL) { 190df8bae1dSRodney W. Grimes splx(s); 191df8bae1dSRodney W. Grimes return ((void *) NULL); 192df8bae1dSRodney W. Grimes } 193df8bae1dSRodney W. Grimes kbp->kb_total += kbp->kb_elmpercl; 194df8bae1dSRodney W. Grimes kup = btokup(va); 195df8bae1dSRodney W. Grimes kup->ku_indx = indx; 196df8bae1dSRodney W. Grimes if (allocsize > MAXALLOCSAVE) { 197df8bae1dSRodney W. Grimes if (npg > 65535) 198df8bae1dSRodney W. Grimes panic("malloc: allocation too large"); 199df8bae1dSRodney W. Grimes kup->ku_pagecnt = npg; 200df8bae1dSRodney W. Grimes ksp->ks_memuse += allocsize; 201df8bae1dSRodney W. Grimes goto out; 202df8bae1dSRodney W. Grimes } 203df8bae1dSRodney W. Grimes kup->ku_freecnt = kbp->kb_elmpercl; 204df8bae1dSRodney W. Grimes kbp->kb_totalfree += kbp->kb_elmpercl; 205df8bae1dSRodney W. Grimes /* 206df8bae1dSRodney W. Grimes * Just in case we blocked while allocating memory, 207df8bae1dSRodney W. Grimes * and someone else also allocated memory for this 208df8bae1dSRodney W. Grimes * bucket, don't assume the list is still empty. 209df8bae1dSRodney W. Grimes */ 210df8bae1dSRodney W. Grimes savedlist = kbp->kb_next; 211e911eafcSPoul-Henning Kamp kbp->kb_next = cp = va + (npg * PAGE_SIZE) - allocsize; 212df8bae1dSRodney W. Grimes for (;;) { 213df8bae1dSRodney W. Grimes freep = (struct freelist *)cp; 2145526d2d9SEivind Eklund #ifdef INVARIANTS 215df8bae1dSRodney W. Grimes /* 216df8bae1dSRodney W. Grimes * Copy in known text to detect modification 217df8bae1dSRodney W. Grimes * after freeing. 218df8bae1dSRodney W. Grimes */ 219df8bae1dSRodney W. Grimes end = (long *)&cp[copysize]; 220df8bae1dSRodney W. Grimes for (lp = (long *)cp; lp < end; lp++) 221df8bae1dSRodney W. Grimes *lp = WEIRD_ADDR; 222df8bae1dSRodney W. Grimes freep->type = M_FREE; 2235526d2d9SEivind Eklund #endif /* INVARIANTS */ 224df8bae1dSRodney W. Grimes if (cp <= va) 225df8bae1dSRodney W. Grimes break; 226df8bae1dSRodney W. Grimes cp -= allocsize; 227df8bae1dSRodney W. Grimes freep->next = cp; 228df8bae1dSRodney W. Grimes } 229df8bae1dSRodney W. Grimes freep->next = savedlist; 230df8bae1dSRodney W. Grimes if (kbp->kb_last == NULL) 231df8bae1dSRodney W. Grimes kbp->kb_last = (caddr_t)freep; 232df8bae1dSRodney W. Grimes } 233df8bae1dSRodney W. Grimes va = kbp->kb_next; 234df8bae1dSRodney W. Grimes kbp->kb_next = ((struct freelist *)va)->next; 2355526d2d9SEivind Eklund #ifdef INVARIANTS 236df8bae1dSRodney W. Grimes freep = (struct freelist *)va; 237d254af07SMatthew Dillon savedtype = (const char *) type->ks_shortdesc; 238df8bae1dSRodney W. Grimes #if BYTE_ORDER == BIG_ENDIAN 23960a513e9SPoul-Henning Kamp freep->type = (struct malloc_type *)WEIRD_ADDR >> 16; 240df8bae1dSRodney W. Grimes #endif 241df8bae1dSRodney W. Grimes #if BYTE_ORDER == LITTLE_ENDIAN 24260a513e9SPoul-Henning Kamp freep->type = (struct malloc_type *)WEIRD_ADDR; 243df8bae1dSRodney W. Grimes #endif 24486a14a7aSBruce Evans if ((intptr_t)(void *)&freep->next & 0x2) 245df8bae1dSRodney W. Grimes freep->next = (caddr_t)((WEIRD_ADDR >> 16)|(WEIRD_ADDR << 16)); 246df8bae1dSRodney W. Grimes else 247df8bae1dSRodney W. Grimes freep->next = (caddr_t)WEIRD_ADDR; 248df8bae1dSRodney W. Grimes end = (long *)&va[copysize]; 249df8bae1dSRodney W. Grimes for (lp = (long *)va; lp < end; lp++) { 250df8bae1dSRodney W. Grimes if (*lp == WEIRD_ADDR) 251df8bae1dSRodney W. Grimes continue; 252d974cf4dSBruce Evans printf("%s %ld of object %p size %lu %s %s (0x%lx != 0x%lx)\n", 253d974cf4dSBruce Evans "Data modified on freelist: word", 254d974cf4dSBruce Evans (long)(lp - (long *)va), (void *)va, size, 255d974cf4dSBruce Evans "previous type", savedtype, *lp, (u_long)WEIRD_ADDR); 256df8bae1dSRodney W. Grimes break; 257df8bae1dSRodney W. Grimes } 258df8bae1dSRodney W. Grimes freep->spare0 = 0; 2595526d2d9SEivind Eklund #endif /* INVARIANTS */ 260df8bae1dSRodney W. Grimes kup = btokup(va); 261df8bae1dSRodney W. Grimes if (kup->ku_indx != indx) 262df8bae1dSRodney W. Grimes panic("malloc: wrong bucket"); 263df8bae1dSRodney W. Grimes if (kup->ku_freecnt == 0) 264df8bae1dSRodney W. Grimes panic("malloc: lost data"); 265df8bae1dSRodney W. Grimes kup->ku_freecnt--; 266df8bae1dSRodney W. Grimes kbp->kb_totalfree--; 267df8bae1dSRodney W. Grimes ksp->ks_memuse += 1 << indx; 268df8bae1dSRodney W. Grimes out: 269df8bae1dSRodney W. Grimes kbp->kb_calls++; 270df8bae1dSRodney W. Grimes ksp->ks_inuse++; 271df8bae1dSRodney W. Grimes ksp->ks_calls++; 272df8bae1dSRodney W. Grimes if (ksp->ks_memuse > ksp->ks_maxused) 273df8bae1dSRodney W. Grimes ksp->ks_maxused = ksp->ks_memuse; 274df8bae1dSRodney W. Grimes splx(s); 275df8bae1dSRodney W. Grimes return ((void *) va); 276df8bae1dSRodney W. Grimes } 277df8bae1dSRodney W. Grimes 278df8bae1dSRodney W. Grimes /* 2791c7c3c6aSMatthew Dillon * free: 2801c7c3c6aSMatthew Dillon * 281df8bae1dSRodney W. Grimes * Free a block of memory allocated by malloc. 2821c7c3c6aSMatthew Dillon * 2831c7c3c6aSMatthew Dillon * This routine may not block. 284df8bae1dSRodney W. Grimes */ 285df8bae1dSRodney W. Grimes void 286df8bae1dSRodney W. Grimes free(addr, type) 287df8bae1dSRodney W. Grimes void *addr; 28860a513e9SPoul-Henning Kamp struct malloc_type *type; 289df8bae1dSRodney W. Grimes { 290df8bae1dSRodney W. Grimes register struct kmembuckets *kbp; 291df8bae1dSRodney W. Grimes register struct kmemusage *kup; 292df8bae1dSRodney W. Grimes register struct freelist *freep; 293df8bae1dSRodney W. Grimes long size; 294df8bae1dSRodney W. Grimes int s; 2955526d2d9SEivind Eklund #ifdef INVARIANTS 296ca67a4e4SPoul-Henning Kamp struct freelist *fp; 297df8bae1dSRodney W. Grimes long *end, *lp, alloc, copysize; 298df8bae1dSRodney W. Grimes #endif 29960a513e9SPoul-Henning Kamp register struct malloc_type *ksp = type; 300254c6cb3SPoul-Henning Kamp 301ce45b512SBruce Evans if (type->ks_limit == 0) 30222c64348SPoul-Henning Kamp panic("freeing with unknown type (%s)", type->ks_shortdesc); 303df8bae1dSRodney W. Grimes 3045526d2d9SEivind Eklund KASSERT(kmembase <= (char *)addr && (char *)addr < kmemlimit, 3055526d2d9SEivind Eklund ("free: address %p out of range", (void *)addr)); 306df8bae1dSRodney W. Grimes kup = btokup(addr); 307df8bae1dSRodney W. Grimes size = 1 << kup->ku_indx; 308df8bae1dSRodney W. Grimes kbp = &bucket[kup->ku_indx]; 309b1897c19SJulian Elischer s = splmem(); 3105526d2d9SEivind Eklund #ifdef INVARIANTS 311df8bae1dSRodney W. Grimes /* 312df8bae1dSRodney W. Grimes * Check for returns of data that do not point to the 313df8bae1dSRodney W. Grimes * beginning of the allocation. 314df8bae1dSRodney W. Grimes */ 315f8845af0SPoul-Henning Kamp if (size > PAGE_SIZE) 316f8845af0SPoul-Henning Kamp alloc = addrmask[BUCKETINDX(PAGE_SIZE)]; 317df8bae1dSRodney W. Grimes else 318df8bae1dSRodney W. Grimes alloc = addrmask[kup->ku_indx]; 31986a14a7aSBruce Evans if (((uintptr_t)(void *)addr & alloc) != 0) 320d974cf4dSBruce Evans panic("free: unaligned addr %p, size %ld, type %s, mask %ld", 321d974cf4dSBruce Evans (void *)addr, size, type->ks_shortdesc, alloc); 3225526d2d9SEivind Eklund #endif /* INVARIANTS */ 323df8bae1dSRodney W. Grimes if (size > MAXALLOCSAVE) { 324df8bae1dSRodney W. Grimes kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt)); 325e911eafcSPoul-Henning Kamp size = kup->ku_pagecnt << PAGE_SHIFT; 326df8bae1dSRodney W. Grimes ksp->ks_memuse -= size; 327df8bae1dSRodney W. Grimes kup->ku_indx = 0; 328df8bae1dSRodney W. Grimes kup->ku_pagecnt = 0; 329df8bae1dSRodney W. Grimes if (ksp->ks_memuse + size >= ksp->ks_limit && 330df8bae1dSRodney W. Grimes ksp->ks_memuse < ksp->ks_limit) 331df8bae1dSRodney W. Grimes wakeup((caddr_t)ksp); 332df8bae1dSRodney W. Grimes ksp->ks_inuse--; 333df8bae1dSRodney W. Grimes kbp->kb_total -= 1; 334df8bae1dSRodney W. Grimes splx(s); 335df8bae1dSRodney W. Grimes return; 336df8bae1dSRodney W. Grimes } 337df8bae1dSRodney W. Grimes freep = (struct freelist *)addr; 3385526d2d9SEivind Eklund #ifdef INVARIANTS 339df8bae1dSRodney W. Grimes /* 340df8bae1dSRodney W. Grimes * Check for multiple frees. Use a quick check to see if 341df8bae1dSRodney W. Grimes * it looks free before laboriously searching the freelist. 342df8bae1dSRodney W. Grimes */ 343df8bae1dSRodney W. Grimes if (freep->spare0 == WEIRD_ADDR) { 344ca67a4e4SPoul-Henning Kamp fp = (struct freelist *)kbp->kb_next; 345ca67a4e4SPoul-Henning Kamp while (fp) { 346219cbf59SEivind Eklund if (fp->spare0 != WEIRD_ADDR) 3475526d2d9SEivind Eklund panic("free: free item %p modified", fp); 348219cbf59SEivind Eklund else if (addr == (caddr_t)fp) 3495526d2d9SEivind Eklund panic("free: multiple freed item %p", addr); 350ca67a4e4SPoul-Henning Kamp fp = (struct freelist *)fp->next; 351df8bae1dSRodney W. Grimes } 352df8bae1dSRodney W. Grimes } 353df8bae1dSRodney W. Grimes /* 354df8bae1dSRodney W. Grimes * Copy in known text to detect modification after freeing 355df8bae1dSRodney W. Grimes * and to make it look free. Also, save the type being freed 356df8bae1dSRodney W. Grimes * so we can list likely culprit if modification is detected 357df8bae1dSRodney W. Grimes * when the object is reallocated. 358df8bae1dSRodney W. Grimes */ 359df8bae1dSRodney W. Grimes copysize = size < MAX_COPY ? size : MAX_COPY; 360df8bae1dSRodney W. Grimes end = (long *)&((caddr_t)addr)[copysize]; 361df8bae1dSRodney W. Grimes for (lp = (long *)addr; lp < end; lp++) 362df8bae1dSRodney W. Grimes *lp = WEIRD_ADDR; 363df8bae1dSRodney W. Grimes freep->type = type; 3645526d2d9SEivind Eklund #endif /* INVARIANTS */ 365df8bae1dSRodney W. Grimes kup->ku_freecnt++; 366dfd5dee1SPeter Wemm if (kup->ku_freecnt >= kbp->kb_elmpercl) { 367df8bae1dSRodney W. Grimes if (kup->ku_freecnt > kbp->kb_elmpercl) 368df8bae1dSRodney W. Grimes panic("free: multiple frees"); 369df8bae1dSRodney W. Grimes else if (kbp->kb_totalfree > kbp->kb_highwat) 370df8bae1dSRodney W. Grimes kbp->kb_couldfree++; 371dfd5dee1SPeter Wemm } 372df8bae1dSRodney W. Grimes kbp->kb_totalfree++; 373df8bae1dSRodney W. Grimes ksp->ks_memuse -= size; 374df8bae1dSRodney W. Grimes if (ksp->ks_memuse + size >= ksp->ks_limit && 375df8bae1dSRodney W. Grimes ksp->ks_memuse < ksp->ks_limit) 376df8bae1dSRodney W. Grimes wakeup((caddr_t)ksp); 377df8bae1dSRodney W. Grimes ksp->ks_inuse--; 37814bf02f8SJohn Dyson #ifdef OLD_MALLOC_MEMORY_POLICY 379df8bae1dSRodney W. Grimes if (kbp->kb_next == NULL) 380df8bae1dSRodney W. Grimes kbp->kb_next = addr; 381df8bae1dSRodney W. Grimes else 382df8bae1dSRodney W. Grimes ((struct freelist *)kbp->kb_last)->next = addr; 383df8bae1dSRodney W. Grimes freep->next = NULL; 384df8bae1dSRodney W. Grimes kbp->kb_last = addr; 38514bf02f8SJohn Dyson #else 38614bf02f8SJohn Dyson /* 38714bf02f8SJohn Dyson * Return memory to the head of the queue for quick reuse. This 38814bf02f8SJohn Dyson * can improve performance by improving the probability of the 38914bf02f8SJohn Dyson * item being in the cache when it is reused. 39014bf02f8SJohn Dyson */ 39114bf02f8SJohn Dyson if (kbp->kb_next == NULL) { 39214bf02f8SJohn Dyson kbp->kb_next = addr; 39314bf02f8SJohn Dyson kbp->kb_last = addr; 39414bf02f8SJohn Dyson freep->next = NULL; 39514bf02f8SJohn Dyson } else { 39614bf02f8SJohn Dyson freep->next = kbp->kb_next; 39714bf02f8SJohn Dyson kbp->kb_next = addr; 39814bf02f8SJohn Dyson } 39914bf02f8SJohn Dyson #endif 400df8bae1dSRodney W. Grimes splx(s); 401df8bae1dSRodney W. Grimes } 402df8bae1dSRodney W. Grimes 403df8bae1dSRodney W. Grimes /* 404df8bae1dSRodney W. Grimes * Initialize the kernel memory allocator 405df8bae1dSRodney W. Grimes */ 4062b14f991SJulian Elischer /* ARGSUSED*/ 4072b14f991SJulian Elischer static void 408d841aaa7SBruce Evans kmeminit(dummy) 409d841aaa7SBruce Evans void *dummy; 410df8bae1dSRodney W. Grimes { 411df8bae1dSRodney W. Grimes register long indx; 41227b8623fSDavid Greenman u_long npg; 41327b8623fSDavid Greenman u_long mem_size; 41427b8623fSDavid Greenman u_long xvm_kmem_size; 415df8bae1dSRodney W. Grimes 416df8bae1dSRodney W. Grimes #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0) 417cb7545a9SGarrett Wollman #error "kmeminit: MAXALLOCSAVE not power of 2" 418df8bae1dSRodney W. Grimes #endif 419df8bae1dSRodney W. Grimes #if (MAXALLOCSAVE > MINALLOCSIZE * 32768) 420cb7545a9SGarrett Wollman #error "kmeminit: MAXALLOCSAVE too big" 421df8bae1dSRodney W. Grimes #endif 422f8845af0SPoul-Henning Kamp #if (MAXALLOCSAVE < PAGE_SIZE) 423cb7545a9SGarrett Wollman #error "kmeminit: MAXALLOCSAVE too small" 424df8bae1dSRodney W. Grimes #endif 4258a58a9f6SJohn Dyson 4268a58a9f6SJohn Dyson /* 4278a58a9f6SJohn Dyson * Try to auto-tune the kernel memory size, so that it is 4288a58a9f6SJohn Dyson * more applicable for a wider range of machine sizes. 4298a58a9f6SJohn Dyson * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while 4308a58a9f6SJohn Dyson * a VM_KMEM_SIZE of 12MB is a fair compromise. The 4318a58a9f6SJohn Dyson * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space 4328a58a9f6SJohn Dyson * available, and on an X86 with a total KVA space of 256MB, 4338a58a9f6SJohn Dyson * try to keep VM_KMEM_SIZE_MAX at 80MB or below. 4348a58a9f6SJohn Dyson * 4358a58a9f6SJohn Dyson * Note that the kmem_map is also used by the zone allocator, 4368a58a9f6SJohn Dyson * so make sure that there is enough space. 4378a58a9f6SJohn Dyson */ 438134c934cSMike Smith xvm_kmem_size = VM_KMEM_SIZE; 4398a58a9f6SJohn Dyson mem_size = cnt.v_page_count * PAGE_SIZE; 4408a58a9f6SJohn Dyson 4418a58a9f6SJohn Dyson #if defined(VM_KMEM_SIZE_SCALE) 442134c934cSMike Smith if ((mem_size / VM_KMEM_SIZE_SCALE) > xvm_kmem_size) 443134c934cSMike Smith xvm_kmem_size = mem_size / VM_KMEM_SIZE_SCALE; 4448a58a9f6SJohn Dyson #endif 4458a58a9f6SJohn Dyson 4468a58a9f6SJohn Dyson #if defined(VM_KMEM_SIZE_MAX) 447134c934cSMike Smith if (xvm_kmem_size >= VM_KMEM_SIZE_MAX) 448134c934cSMike Smith xvm_kmem_size = VM_KMEM_SIZE_MAX; 4498a58a9f6SJohn Dyson #endif 4508a58a9f6SJohn Dyson 4518de6e8e1SMike Smith /* Allow final override from the kernel environment */ 452134c934cSMike Smith TUNABLE_INT_FETCH("kern.vm.kmem.size", xvm_kmem_size, vm_kmem_size); 4538de6e8e1SMike Smith 45427b8623fSDavid Greenman /* 45527b8623fSDavid Greenman * Limit kmem virtual size to twice the physical memory. 45627b8623fSDavid Greenman * This allows for kmem map sparseness, but limits the size 45727b8623fSDavid Greenman * to something sane. Be careful to not overflow the 32bit 45827b8623fSDavid Greenman * ints while doing the check. 45927b8623fSDavid Greenman */ 46027b8623fSDavid Greenman if ((vm_kmem_size / 2) > (cnt.v_page_count * PAGE_SIZE)) 46127b8623fSDavid Greenman vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE; 4628a58a9f6SJohn Dyson 4638a58a9f6SJohn Dyson npg = (nmbufs * MSIZE + nmbclusters * MCLBYTES + vm_kmem_size) 464cb7545a9SGarrett Wollman / PAGE_SIZE; 4650d94caffSDavid Greenman 466df8bae1dSRodney W. Grimes kmemusage = (struct kmemusage *) kmem_alloc(kernel_map, 467df8bae1dSRodney W. Grimes (vm_size_t)(npg * sizeof(struct kmemusage))); 468df8bae1dSRodney W. Grimes kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase, 4692d8acc0fSJohn Dyson (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * PAGE_SIZE)); 4703075778bSJohn Dyson kmem_map->system_map = 1; 471df8bae1dSRodney W. Grimes for (indx = 0; indx < MINBUCKET + 16; indx++) { 472f8845af0SPoul-Henning Kamp if (1 << indx >= PAGE_SIZE) 473df8bae1dSRodney W. Grimes bucket[indx].kb_elmpercl = 1; 474df8bae1dSRodney W. Grimes else 475f8845af0SPoul-Henning Kamp bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx); 476df8bae1dSRodney W. Grimes bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl; 477df8bae1dSRodney W. Grimes } 478254c6cb3SPoul-Henning Kamp } 479254c6cb3SPoul-Henning Kamp 480db669378SPeter Wemm void 481db669378SPeter Wemm malloc_init(data) 482db669378SPeter Wemm void *data; 483254c6cb3SPoul-Henning Kamp { 484db669378SPeter Wemm struct malloc_type *type = (struct malloc_type *)data; 485254c6cb3SPoul-Henning Kamp 486d1bbc7ecSPoul-Henning Kamp if (type->ks_magic != M_MAGIC) 487d1bbc7ecSPoul-Henning Kamp panic("malloc type lacks magic"); 488d1bbc7ecSPoul-Henning Kamp 489ce45b512SBruce Evans if (type->ks_limit != 0) 490db669378SPeter Wemm return; 491db669378SPeter Wemm 492d4060a87SJohn Dyson if (cnt.v_page_count == 0) 493d4060a87SJohn Dyson panic("malloc_init not allowed before vm init"); 494d4060a87SJohn Dyson 49507bbd7f1SDavid Greenman /* 4968a58a9f6SJohn Dyson * The default limits for each malloc region is 1/2 of the 4978a58a9f6SJohn Dyson * malloc portion of the kmem map size. 49807bbd7f1SDavid Greenman */ 4998a58a9f6SJohn Dyson type->ks_limit = vm_kmem_size / 2; 500254c6cb3SPoul-Henning Kamp type->ks_next = kmemstatistics; 501254c6cb3SPoul-Henning Kamp kmemstatistics = type; 502df8bae1dSRodney W. Grimes } 503db669378SPeter Wemm 504db669378SPeter Wemm void 505db669378SPeter Wemm malloc_uninit(data) 506db669378SPeter Wemm void *data; 507db669378SPeter Wemm { 508db669378SPeter Wemm struct malloc_type *type = (struct malloc_type *)data; 509db669378SPeter Wemm struct malloc_type *t; 510db669378SPeter Wemm 511db669378SPeter Wemm if (type->ks_magic != M_MAGIC) 512db669378SPeter Wemm panic("malloc type lacks magic"); 513db669378SPeter Wemm 514db669378SPeter Wemm if (cnt.v_page_count == 0) 515db669378SPeter Wemm panic("malloc_uninit not allowed before vm init"); 516db669378SPeter Wemm 517ce45b512SBruce Evans if (type->ks_limit == 0) 518ce45b512SBruce Evans panic("malloc_uninit on uninitialized type"); 519ce45b512SBruce Evans 520db669378SPeter Wemm if (type == kmemstatistics) 521db669378SPeter Wemm kmemstatistics = type->ks_next; 522db669378SPeter Wemm else { 523db669378SPeter Wemm for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 524db669378SPeter Wemm if (t->ks_next == type) { 525db669378SPeter Wemm t->ks_next = type->ks_next; 526db669378SPeter Wemm break; 527db669378SPeter Wemm } 528db669378SPeter Wemm } 529db669378SPeter Wemm } 530ce45b512SBruce Evans type->ks_next = NULL; 531ce45b512SBruce Evans type->ks_limit = 0; 532db669378SPeter Wemm } 533