1df8bae1dSRodney W. Grimes /* 2df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * 5df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 6df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 7df8bae1dSRodney W. Grimes * are met: 8df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 9df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 10df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 11df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 12df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 13df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 14df8bae1dSRodney W. Grimes * must display the following acknowledgement: 15df8bae1dSRodney W. Grimes * This product includes software developed by the University of 16df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 17df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 18df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 19df8bae1dSRodney W. Grimes * without specific prior written permission. 20df8bae1dSRodney W. Grimes * 21df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31df8bae1dSRodney W. Grimes * SUCH DAMAGE. 32df8bae1dSRodney W. Grimes * 33df8bae1dSRodney W. Grimes * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 34c3aac50fSPeter Wemm * $FreeBSD$ 35df8bae1dSRodney W. Grimes */ 36df8bae1dSRodney W. Grimes 378a58a9f6SJohn Dyson #include "opt_vm.h" 388a58a9f6SJohn Dyson 39df8bae1dSRodney W. Grimes #include <sys/param.h> 4026f9a767SRodney W. Grimes #include <sys/systm.h> 41df8bae1dSRodney W. Grimes #include <sys/kernel.h> 42fb919e4dSMark Murray #include <sys/lock.h> 43df8bae1dSRodney W. Grimes #include <sys/malloc.h> 4454e7152cSDavid Greenman #include <sys/mbuf.h> 45eec258d2SJohn Baldwin #include <sys/mutex.h> 46efeaf95aSDavid Greenman #include <sys/vmmeter.h> 47a448b62aSJake Burkholder #include <sys/proc.h> 489a02e8c6SJason Evans 49df8bae1dSRodney W. Grimes #include <vm/vm.h> 50efeaf95aSDavid Greenman #include <vm/vm_param.h> 51df8bae1dSRodney W. Grimes #include <vm/vm_kern.h> 52efeaf95aSDavid Greenman #include <vm/vm_extern.h> 533075778bSJohn Dyson #include <vm/pmap.h> 543075778bSJohn Dyson #include <vm/vm_map.h> 558355f576SJeff Roberson #include <vm/uma.h> 568355f576SJeff Roberson #include <vm/uma_int.h> 57df8bae1dSRodney W. Grimes 58984982d6SPoul-Henning Kamp #if defined(INVARIANTS) && defined(__i386__) 59984982d6SPoul-Henning Kamp #include <machine/cpu.h> 60984982d6SPoul-Henning Kamp #endif 61984982d6SPoul-Henning Kamp 6244a8ff31SArchie Cobbs /* 6344a8ff31SArchie Cobbs * When realloc() is called, if the new size is sufficiently smaller than 6444a8ff31SArchie Cobbs * the old size, realloc() will allocate a new, smaller block to avoid 6544a8ff31SArchie Cobbs * wasting memory. 'Sufficiently smaller' is defined as: newsize <= 6644a8ff31SArchie Cobbs * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'. 6744a8ff31SArchie Cobbs */ 6844a8ff31SArchie Cobbs #ifndef REALLOC_FRACTION 6944a8ff31SArchie Cobbs #define REALLOC_FRACTION 1 /* new block if <= half the size */ 7044a8ff31SArchie Cobbs #endif 7144a8ff31SArchie Cobbs 723b6fb885SPoul-Henning Kamp MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 739ef246c6SBruce Evans MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 749ef246c6SBruce Evans MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 759ef246c6SBruce Evans 7682cd038dSYoshinobu Inoue MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 7782cd038dSYoshinobu Inoue MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 7882cd038dSYoshinobu Inoue 794590fd3aSDavid Greenman static void kmeminit __P((void *)); 802b14f991SJulian Elischer SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL) 812b14f991SJulian Elischer 82a1c995b6SPoul-Henning Kamp static MALLOC_DEFINE(M_FREE, "free", "should be on free list"); 83a1c995b6SPoul-Henning Kamp 84db669378SPeter Wemm static struct malloc_type *kmemstatistics; 85254c6cb3SPoul-Henning Kamp static char *kmembase; 86043a2f3bSBruce Evans static char *kmemlimit; 871f6889a1SMatthew Dillon 888355f576SJeff Roberson #define KMEM_ZSHIFT 4 898355f576SJeff Roberson #define KMEM_ZBASE 16 908355f576SJeff Roberson #define KMEM_ZMASK (KMEM_ZBASE - 1) 918355f576SJeff Roberson 928355f576SJeff Roberson #define KMEM_ZMAX 65536 938355f576SJeff Roberson #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT) 948355f576SJeff Roberson static uma_zone_t kmemzones[KMEM_ZSIZE + 1]; 958355f576SJeff Roberson 968355f576SJeff Roberson 978355f576SJeff Roberson /* These won't be powers of two for long */ 988355f576SJeff Roberson struct { 998355f576SJeff Roberson int size; 1008355f576SJeff Roberson char *name; 1018355f576SJeff Roberson } kmemsizes[] = { 1028355f576SJeff Roberson {16, "16"}, 1038355f576SJeff Roberson {32, "32"}, 1048355f576SJeff Roberson {64, "64"}, 1058355f576SJeff Roberson {128, "128"}, 1068355f576SJeff Roberson {256, "256"}, 1078355f576SJeff Roberson {512, "512"}, 1088355f576SJeff Roberson {1024, "1024"}, 1098355f576SJeff Roberson {2048, "2048"}, 1108355f576SJeff Roberson {4096, "4096"}, 1118355f576SJeff Roberson {8192, "8192"}, 1128355f576SJeff Roberson {16384, "16384"}, 1138355f576SJeff Roberson {32768, "32768"}, 1148355f576SJeff Roberson {65536, "65536"}, 1158355f576SJeff Roberson {0, NULL}, 1168355f576SJeff Roberson }; 1178355f576SJeff Roberson 118d1c1b841SJason Evans static struct mtx malloc_mtx; 11969ef67f9SJason Evans 1201f6889a1SMatthew Dillon u_int vm_kmem_size; 121df8bae1dSRodney W. Grimes 122df8bae1dSRodney W. Grimes /* 1231c7c3c6aSMatthew Dillon * malloc: 1241c7c3c6aSMatthew Dillon * 1251c7c3c6aSMatthew Dillon * Allocate a block of memory. 1261c7c3c6aSMatthew Dillon * 1271c7c3c6aSMatthew Dillon * If M_NOWAIT is set, this routine will not block and return NULL if 1281c7c3c6aSMatthew Dillon * the allocation fails. 129df8bae1dSRodney W. Grimes */ 130df8bae1dSRodney W. Grimes void * 131df8bae1dSRodney W. Grimes malloc(size, type, flags) 132df8bae1dSRodney W. Grimes unsigned long size; 13360a513e9SPoul-Henning Kamp struct malloc_type *type; 134254c6cb3SPoul-Henning Kamp int flags; 135df8bae1dSRodney W. Grimes { 136df8bae1dSRodney W. Grimes int s; 1378355f576SJeff Roberson long indx; 1388355f576SJeff Roberson caddr_t va; 1398355f576SJeff Roberson uma_zone_t zone; 14060a513e9SPoul-Henning Kamp register struct malloc_type *ksp = type; 141df8bae1dSRodney W. Grimes 1420fee3d35SPeter Wemm #if defined(INVARIANTS) 143984982d6SPoul-Henning Kamp if (flags == M_WAITOK) 144b40ce416SJulian Elischer KASSERT(curthread->td_intr_nesting_level == 0, 145984982d6SPoul-Henning Kamp ("malloc(M_WAITOK) in interrupt context")); 146984982d6SPoul-Henning Kamp #endif 1478e8cac55SBruce Evans s = splmem(); 1488355f576SJeff Roberson /* mtx_lock(&malloc_mtx); XXX */ 149df8bae1dSRodney W. Grimes while (ksp->ks_memuse >= ksp->ks_limit) { 150df8bae1dSRodney W. Grimes if (flags & M_NOWAIT) { 151df8bae1dSRodney W. Grimes splx(s); 1528355f576SJeff Roberson /* mtx_unlock(&malloc_mtx); XXX */ 1531707240dSBoris Popov return ((void *) NULL); 154df8bae1dSRodney W. Grimes } 155df8bae1dSRodney W. Grimes if (ksp->ks_limblocks < 65535) 156df8bae1dSRodney W. Grimes ksp->ks_limblocks++; 1578355f576SJeff Roberson msleep((caddr_t)ksp, /* &malloc_mtx */ NULL, PSWP+2, type->ks_shortdesc, 15869ef67f9SJason Evans 0); 159df8bae1dSRodney W. Grimes } 1608355f576SJeff Roberson /* mtx_unlock(&malloc_mtx); XXX */ 16169ef67f9SJason Evans 1628355f576SJeff Roberson if (size <= KMEM_ZMAX) { 1638355f576SJeff Roberson indx = size; 1648355f576SJeff Roberson if (indx & KMEM_ZMASK) 1658355f576SJeff Roberson indx = (indx & ~KMEM_ZMASK) + KMEM_ZBASE; 1668355f576SJeff Roberson zone = kmemzones[indx >> KMEM_ZSHIFT]; 1678355f576SJeff Roberson indx = zone->uz_size; 1688355f576SJeff Roberson va = uma_zalloc(zone, flags); 169df8bae1dSRodney W. Grimes if (va == NULL) { 1708355f576SJeff Roberson /* mtx_lock(&malloc_mtx); XXX */ 171df8bae1dSRodney W. Grimes goto out; 172df8bae1dSRodney W. Grimes } 1738355f576SJeff Roberson ksp->ks_size |= indx; 1748355f576SJeff Roberson } else { 1758355f576SJeff Roberson /* XXX This is not the next power of two so this will break ks_size */ 1768355f576SJeff Roberson indx = roundup(size, PAGE_SIZE); 1778355f576SJeff Roberson zone = NULL; 1788355f576SJeff Roberson va = uma_large_malloc(size, flags); 1798355f576SJeff Roberson if (va == NULL) { 1808355f576SJeff Roberson /* mtx_lock(&malloc_mtx); XXX */ 1818355f576SJeff Roberson goto out; 182df8bae1dSRodney W. Grimes } 183df8bae1dSRodney W. Grimes } 1848355f576SJeff Roberson /* mtx_lock(&malloc_mtx); XXX */ 1858355f576SJeff Roberson ksp->ks_memuse += indx; 186df8bae1dSRodney W. Grimes ksp->ks_inuse++; 1878355f576SJeff Roberson out: 188df8bae1dSRodney W. Grimes ksp->ks_calls++; 189df8bae1dSRodney W. Grimes if (ksp->ks_memuse > ksp->ks_maxused) 190df8bae1dSRodney W. Grimes ksp->ks_maxused = ksp->ks_memuse; 191df8bae1dSRodney W. Grimes splx(s); 1928355f576SJeff Roberson /* mtx_unlock(&malloc_mtx); XXX */ 1931921a06dSPoul-Henning Kamp /* XXX: Do idle pre-zeroing. */ 1941921a06dSPoul-Henning Kamp if (va != NULL && (flags & M_ZERO)) 1951921a06dSPoul-Henning Kamp bzero(va, size); 196df8bae1dSRodney W. Grimes return ((void *) va); 197df8bae1dSRodney W. Grimes } 198df8bae1dSRodney W. Grimes 199df8bae1dSRodney W. Grimes /* 2001c7c3c6aSMatthew Dillon * free: 2011c7c3c6aSMatthew Dillon * 202df8bae1dSRodney W. Grimes * Free a block of memory allocated by malloc. 2031c7c3c6aSMatthew Dillon * 2041c7c3c6aSMatthew Dillon * This routine may not block. 205df8bae1dSRodney W. Grimes */ 206df8bae1dSRodney W. Grimes void 207df8bae1dSRodney W. Grimes free(addr, type) 208df8bae1dSRodney W. Grimes void *addr; 20960a513e9SPoul-Henning Kamp struct malloc_type *type; 210df8bae1dSRodney W. Grimes { 2118355f576SJeff Roberson uma_slab_t slab; 2128355f576SJeff Roberson void *mem; 2138355f576SJeff Roberson u_long size; 214df8bae1dSRodney W. Grimes int s; 21560a513e9SPoul-Henning Kamp register struct malloc_type *ksp = type; 216254c6cb3SPoul-Henning Kamp 21744a8ff31SArchie Cobbs /* free(NULL, ...) does nothing */ 21844a8ff31SArchie Cobbs if (addr == NULL) 21944a8ff31SArchie Cobbs return; 22044a8ff31SArchie Cobbs 2218355f576SJeff Roberson size = 0; 222b1897c19SJulian Elischer s = splmem(); 22369ef67f9SJason Evans 2248355f576SJeff Roberson mem = (void *)((u_long)addr & (~UMA_SLAB_MASK)); 2258355f576SJeff Roberson slab = hash_sfind(mallochash, mem); 2268355f576SJeff Roberson 2278355f576SJeff Roberson if (slab == NULL) 2288355f576SJeff Roberson panic("free: address %p(%p) has not been allocated.\n", addr, mem); 2298355f576SJeff Roberson 2308355f576SJeff Roberson if (!(slab->us_flags & UMA_SLAB_MALLOC)) { 2318355f576SJeff Roberson size = slab->us_zone->uz_size; 2328355f576SJeff Roberson uma_zfree_arg(slab->us_zone, addr, slab); 23314bf02f8SJohn Dyson } else { 2348355f576SJeff Roberson size = slab->us_size; 2358355f576SJeff Roberson uma_large_free(slab); 23614bf02f8SJohn Dyson } 2378355f576SJeff Roberson /* mtx_lock(&malloc_mtx); XXX */ 2388355f576SJeff Roberson 2398355f576SJeff Roberson ksp->ks_memuse -= size; 2408355f576SJeff Roberson if (ksp->ks_memuse + size >= ksp->ks_limit && 2418355f576SJeff Roberson ksp->ks_memuse < ksp->ks_limit) 2428355f576SJeff Roberson wakeup((caddr_t)ksp); 2438355f576SJeff Roberson ksp->ks_inuse--; 244df8bae1dSRodney W. Grimes splx(s); 2458355f576SJeff Roberson /* mtx_unlock(&malloc_mtx); XXX */ 246df8bae1dSRodney W. Grimes } 247df8bae1dSRodney W. Grimes 248df8bae1dSRodney W. Grimes /* 24944a8ff31SArchie Cobbs * realloc: change the size of a memory block 25044a8ff31SArchie Cobbs */ 25144a8ff31SArchie Cobbs void * 25244a8ff31SArchie Cobbs realloc(addr, size, type, flags) 25344a8ff31SArchie Cobbs void *addr; 25444a8ff31SArchie Cobbs unsigned long size; 25544a8ff31SArchie Cobbs struct malloc_type *type; 25644a8ff31SArchie Cobbs int flags; 25744a8ff31SArchie Cobbs { 2588355f576SJeff Roberson uma_slab_t slab; 25944a8ff31SArchie Cobbs unsigned long alloc; 26044a8ff31SArchie Cobbs void *newaddr; 26144a8ff31SArchie Cobbs 26244a8ff31SArchie Cobbs /* realloc(NULL, ...) is equivalent to malloc(...) */ 26344a8ff31SArchie Cobbs if (addr == NULL) 26444a8ff31SArchie Cobbs return (malloc(size, type, flags)); 26544a8ff31SArchie Cobbs 2668355f576SJeff Roberson slab = hash_sfind(mallochash, 2678355f576SJeff Roberson (void *)((u_long)addr & ~(UMA_SLAB_MASK))); 2688355f576SJeff Roberson 26944a8ff31SArchie Cobbs /* Sanity check */ 2708355f576SJeff Roberson KASSERT(slab != NULL, 27144a8ff31SArchie Cobbs ("realloc: address %p out of range", (void *)addr)); 27244a8ff31SArchie Cobbs 27344a8ff31SArchie Cobbs /* Get the size of the original block */ 2748355f576SJeff Roberson if (slab->us_zone) 2758355f576SJeff Roberson alloc = slab->us_zone->uz_size; 2768355f576SJeff Roberson else 2778355f576SJeff Roberson alloc = slab->us_size; 27844a8ff31SArchie Cobbs 27944a8ff31SArchie Cobbs /* Reuse the original block if appropriate */ 28044a8ff31SArchie Cobbs if (size <= alloc 28144a8ff31SArchie Cobbs && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) 28244a8ff31SArchie Cobbs return (addr); 28344a8ff31SArchie Cobbs 28444a8ff31SArchie Cobbs /* Allocate a new, bigger (or smaller) block */ 28544a8ff31SArchie Cobbs if ((newaddr = malloc(size, type, flags)) == NULL) 28644a8ff31SArchie Cobbs return (NULL); 28744a8ff31SArchie Cobbs 28844a8ff31SArchie Cobbs /* Copy over original contents */ 28944a8ff31SArchie Cobbs bcopy(addr, newaddr, min(size, alloc)); 29044a8ff31SArchie Cobbs free(addr, type); 29144a8ff31SArchie Cobbs return (newaddr); 29244a8ff31SArchie Cobbs } 29344a8ff31SArchie Cobbs 29444a8ff31SArchie Cobbs /* 29544a8ff31SArchie Cobbs * reallocf: same as realloc() but free memory on failure. 29644a8ff31SArchie Cobbs */ 29744a8ff31SArchie Cobbs void * 29844a8ff31SArchie Cobbs reallocf(addr, size, type, flags) 29944a8ff31SArchie Cobbs void *addr; 30044a8ff31SArchie Cobbs unsigned long size; 30144a8ff31SArchie Cobbs struct malloc_type *type; 30244a8ff31SArchie Cobbs int flags; 30344a8ff31SArchie Cobbs { 30444a8ff31SArchie Cobbs void *mem; 30544a8ff31SArchie Cobbs 30644a8ff31SArchie Cobbs if ((mem = realloc(addr, size, type, flags)) == NULL) 30744a8ff31SArchie Cobbs free(addr, type); 30844a8ff31SArchie Cobbs return (mem); 30944a8ff31SArchie Cobbs } 31044a8ff31SArchie Cobbs 31144a8ff31SArchie Cobbs /* 312df8bae1dSRodney W. Grimes * Initialize the kernel memory allocator 313df8bae1dSRodney W. Grimes */ 3142b14f991SJulian Elischer /* ARGSUSED*/ 3152b14f991SJulian Elischer static void 316d841aaa7SBruce Evans kmeminit(dummy) 317d841aaa7SBruce Evans void *dummy; 318df8bae1dSRodney W. Grimes { 319df8bae1dSRodney W. Grimes register long indx; 32027b8623fSDavid Greenman u_long npg; 32127b8623fSDavid Greenman u_long mem_size; 3228355f576SJeff Roberson void *hashmem; 3238355f576SJeff Roberson u_long hashsize; 3248355f576SJeff Roberson int highbit; 3258355f576SJeff Roberson int bits; 3268355f576SJeff Roberson int i; 3278a58a9f6SJohn Dyson 328d1c1b841SJason Evans mtx_init(&malloc_mtx, "malloc", MTX_DEF); 32969ef67f9SJason Evans 3308a58a9f6SJohn Dyson /* 3318a58a9f6SJohn Dyson * Try to auto-tune the kernel memory size, so that it is 3328a58a9f6SJohn Dyson * more applicable for a wider range of machine sizes. 3338a58a9f6SJohn Dyson * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while 3348a58a9f6SJohn Dyson * a VM_KMEM_SIZE of 12MB is a fair compromise. The 3358a58a9f6SJohn Dyson * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space 3368a58a9f6SJohn Dyson * available, and on an X86 with a total KVA space of 256MB, 3378a58a9f6SJohn Dyson * try to keep VM_KMEM_SIZE_MAX at 80MB or below. 3388a58a9f6SJohn Dyson * 3398a58a9f6SJohn Dyson * Note that the kmem_map is also used by the zone allocator, 3408a58a9f6SJohn Dyson * so make sure that there is enough space. 3418a58a9f6SJohn Dyson */ 34281930014SPeter Wemm vm_kmem_size = VM_KMEM_SIZE; 3438a58a9f6SJohn Dyson mem_size = cnt.v_page_count * PAGE_SIZE; 3448a58a9f6SJohn Dyson 3458a58a9f6SJohn Dyson #if defined(VM_KMEM_SIZE_SCALE) 34681930014SPeter Wemm if ((mem_size / VM_KMEM_SIZE_SCALE) > vm_kmem_size) 34781930014SPeter Wemm vm_kmem_size = mem_size / VM_KMEM_SIZE_SCALE; 3488a58a9f6SJohn Dyson #endif 3498a58a9f6SJohn Dyson 3508a58a9f6SJohn Dyson #if defined(VM_KMEM_SIZE_MAX) 35181930014SPeter Wemm if (vm_kmem_size >= VM_KMEM_SIZE_MAX) 35281930014SPeter Wemm vm_kmem_size = VM_KMEM_SIZE_MAX; 3538a58a9f6SJohn Dyson #endif 3548a58a9f6SJohn Dyson 3558de6e8e1SMike Smith /* Allow final override from the kernel environment */ 35609786698SPeter Wemm TUNABLE_INT_FETCH("kern.vm.kmem.size", &vm_kmem_size); 3578de6e8e1SMike Smith 35827b8623fSDavid Greenman /* 35927b8623fSDavid Greenman * Limit kmem virtual size to twice the physical memory. 36027b8623fSDavid Greenman * This allows for kmem map sparseness, but limits the size 36127b8623fSDavid Greenman * to something sane. Be careful to not overflow the 32bit 36227b8623fSDavid Greenman * ints while doing the check. 36327b8623fSDavid Greenman */ 36427b8623fSDavid Greenman if ((vm_kmem_size / 2) > (cnt.v_page_count * PAGE_SIZE)) 36527b8623fSDavid Greenman vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE; 3668a58a9f6SJohn Dyson 36708442f8aSBosko Milekic /* 368ba3e8826SBosko Milekic * In mbuf_init(), we set up submaps for mbufs and clusters, in which 36908442f8aSBosko Milekic * case we rounddown() (nmbufs * MSIZE) and (nmbclusters * MCLBYTES), 37008442f8aSBosko Milekic * respectively. Mathematically, this means that what we do here may 37108442f8aSBosko Milekic * amount to slightly more address space than we need for the submaps, 37208442f8aSBosko Milekic * but it never hurts to have an extra page in kmem_map. 37308442f8aSBosko Milekic */ 374d04d50d1SBosko Milekic npg = (nmbufs * MSIZE + nmbclusters * MCLBYTES + nmbcnt * 37508442f8aSBosko Milekic sizeof(u_int) + vm_kmem_size) / PAGE_SIZE; 3760d94caffSDavid Greenman 377df8bae1dSRodney W. Grimes kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase, 3782d8acc0fSJohn Dyson (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * PAGE_SIZE)); 3793075778bSJohn Dyson kmem_map->system_map = 1; 3808355f576SJeff Roberson 3818355f576SJeff Roberson hashsize = npg * sizeof(void *); 3828355f576SJeff Roberson 3838355f576SJeff Roberson highbit = 0; 3848355f576SJeff Roberson bits = 0; 3858355f576SJeff Roberson /* The hash size must be a power of two */ 3868355f576SJeff Roberson for (i = 0; i < 8 * sizeof(hashsize); i++) 3878355f576SJeff Roberson if (hashsize & (1 << i)) { 3888355f576SJeff Roberson highbit = i; 3898355f576SJeff Roberson bits++; 3908355f576SJeff Roberson } 3918355f576SJeff Roberson if (bits > 1) 3928355f576SJeff Roberson hashsize = 1 << (highbit); 3938355f576SJeff Roberson 3948355f576SJeff Roberson hashmem = (void *)kmem_alloc(kernel_map, (vm_size_t)hashsize); 3958355f576SJeff Roberson uma_startup2(hashmem, hashsize / sizeof(void *)); 3968355f576SJeff Roberson 3978355f576SJeff Roberson for (i = 0, indx = 0; kmemsizes[indx].size != 0; indx++) { 3988355f576SJeff Roberson uma_zone_t zone; 3998355f576SJeff Roberson int size = kmemsizes[indx].size; 4008355f576SJeff Roberson char *name = kmemsizes[indx].name; 4018355f576SJeff Roberson 4028355f576SJeff Roberson zone = uma_zcreate(name, size, NULL, NULL, NULL, NULL, 4038355f576SJeff Roberson UMA_ALIGN_PTR, UMA_ZONE_MALLOC); 4048355f576SJeff Roberson for (;i <= size; i+= KMEM_ZBASE) 4058355f576SJeff Roberson kmemzones[i >> KMEM_ZSHIFT] = zone; 4068355f576SJeff Roberson 407df8bae1dSRodney W. Grimes } 408254c6cb3SPoul-Henning Kamp } 409254c6cb3SPoul-Henning Kamp 410db669378SPeter Wemm void 411db669378SPeter Wemm malloc_init(data) 412db669378SPeter Wemm void *data; 413254c6cb3SPoul-Henning Kamp { 414db669378SPeter Wemm struct malloc_type *type = (struct malloc_type *)data; 415254c6cb3SPoul-Henning Kamp 416d1bbc7ecSPoul-Henning Kamp if (type->ks_magic != M_MAGIC) 417d1bbc7ecSPoul-Henning Kamp panic("malloc type lacks magic"); 418d1bbc7ecSPoul-Henning Kamp 419ce45b512SBruce Evans if (type->ks_limit != 0) 420db669378SPeter Wemm return; 421db669378SPeter Wemm 422d4060a87SJohn Dyson if (cnt.v_page_count == 0) 423d4060a87SJohn Dyson panic("malloc_init not allowed before vm init"); 424d4060a87SJohn Dyson 42507bbd7f1SDavid Greenman /* 4268a58a9f6SJohn Dyson * The default limits for each malloc region is 1/2 of the 4278a58a9f6SJohn Dyson * malloc portion of the kmem map size. 42807bbd7f1SDavid Greenman */ 4298a58a9f6SJohn Dyson type->ks_limit = vm_kmem_size / 2; 430254c6cb3SPoul-Henning Kamp type->ks_next = kmemstatistics; 431254c6cb3SPoul-Henning Kamp kmemstatistics = type; 432df8bae1dSRodney W. Grimes } 433db669378SPeter Wemm 434db669378SPeter Wemm void 435db669378SPeter Wemm malloc_uninit(data) 436db669378SPeter Wemm void *data; 437db669378SPeter Wemm { 438db669378SPeter Wemm struct malloc_type *type = (struct malloc_type *)data; 439db669378SPeter Wemm struct malloc_type *t; 440db669378SPeter Wemm 441db669378SPeter Wemm if (type->ks_magic != M_MAGIC) 442db669378SPeter Wemm panic("malloc type lacks magic"); 443db669378SPeter Wemm 444db669378SPeter Wemm if (cnt.v_page_count == 0) 445db669378SPeter Wemm panic("malloc_uninit not allowed before vm init"); 446db669378SPeter Wemm 447ce45b512SBruce Evans if (type->ks_limit == 0) 448ce45b512SBruce Evans panic("malloc_uninit on uninitialized type"); 449ce45b512SBruce Evans 450db669378SPeter Wemm if (type == kmemstatistics) 451db669378SPeter Wemm kmemstatistics = type->ks_next; 452db669378SPeter Wemm else { 453db669378SPeter Wemm for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 454db669378SPeter Wemm if (t->ks_next == type) { 455db669378SPeter Wemm t->ks_next = type->ks_next; 456db669378SPeter Wemm break; 457db669378SPeter Wemm } 458db669378SPeter Wemm } 459db669378SPeter Wemm } 460ce45b512SBruce Evans type->ks_next = NULL; 461ce45b512SBruce Evans type->ks_limit = 0; 462db669378SPeter Wemm } 463