1 /* 2 * Copyright (c) 1987, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 34 * $Id: kern_malloc.c,v 1.29 1997/09/02 20:05:39 bde Exp $ 35 */ 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/malloc.h> 41 #include <sys/mbuf.h> 42 #include <sys/vmmeter.h> 43 #include <sys/lock.h> 44 45 #include <vm/vm.h> 46 #include <vm/vm_param.h> 47 #include <vm/vm_kern.h> 48 #include <vm/vm_extern.h> 49 #include <vm/pmap.h> 50 #include <vm/vm_map.h> 51 52 static void kmeminit __P((void *)); 53 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL) 54 55 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) 56 #define MAYBE_STATIC static 57 #else 58 #define MAYBE_STATIC 59 #endif 60 61 MAYBE_STATIC struct kmembuckets bucket[MINBUCKET + 16]; 62 #ifdef KMEMSTATS 63 static struct kmemstats kmemstats[M_LAST]; 64 #endif 65 MAYBE_STATIC struct kmemusage *kmemusage; 66 MAYBE_STATIC char *kmembase; 67 static char *kmemlimit; 68 #if defined(KMEMSTATS) || defined(DIAGNOSTIC) 69 static char *memname[] = INITKMEMNAMES; 70 #endif 71 72 #ifdef DIAGNOSTIC 73 /* 74 * This structure provides a set of masks to catch unaligned frees. 75 */ 76 static long addrmask[] = { 0, 77 0x00000001, 0x00000003, 0x00000007, 0x0000000f, 78 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff, 79 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff, 80 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff, 81 }; 82 83 /* 84 * The WEIRD_ADDR is used as known text to copy into free objects so 85 * that modifications after frees can be detected. 86 */ 87 #define WEIRD_ADDR 0xdeadc0de 88 #define MAX_COPY 64 89 90 /* 91 * Normally the first word of the structure is used to hold the list 92 * pointer for free objects. However, when running with diagnostics, 93 * we use the third and fourth fields, so as to catch modifications 94 * in the most commonly trashed first two words. 95 */ 96 struct freelist { 97 long spare0; 98 short type; 99 long spare1; 100 caddr_t next; 101 }; 102 #else /* !DIAGNOSTIC */ 103 struct freelist { 104 caddr_t next; 105 }; 106 #endif /* DIAGNOSTIC */ 107 108 /* 109 * Allocate a block of memory 110 */ 111 void * 112 malloc(size, type, flags) 113 unsigned long size; 114 int type, flags; 115 { 116 register struct kmembuckets *kbp; 117 register struct kmemusage *kup; 118 register struct freelist *freep; 119 long indx, npg, allocsize; 120 int s; 121 caddr_t va, cp, savedlist; 122 #ifdef DIAGNOSTIC 123 long *end, *lp; 124 int copysize; 125 char *savedtype; 126 #endif 127 #ifdef KMEMSTATS 128 register struct kmemstats *ksp = &kmemstats[type]; 129 130 if (((unsigned long)type) > M_LAST) 131 panic("malloc - bogus type"); 132 #endif 133 indx = BUCKETINDX(size); 134 kbp = &bucket[indx]; 135 s = splhigh(); 136 #ifdef KMEMSTATS 137 while (ksp->ks_memuse >= ksp->ks_limit) { 138 if (flags & M_NOWAIT) { 139 splx(s); 140 return ((void *) NULL); 141 } 142 if (ksp->ks_limblocks < 65535) 143 ksp->ks_limblocks++; 144 tsleep((caddr_t)ksp, PSWP+2, memname[type], 0); 145 } 146 ksp->ks_size |= 1 << indx; 147 #endif 148 #ifdef DIAGNOSTIC 149 copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY; 150 #endif 151 if (kbp->kb_next == NULL) { 152 kbp->kb_last = NULL; 153 if (size > MAXALLOCSAVE) 154 allocsize = roundup(size, PAGE_SIZE); 155 else 156 allocsize = 1 << indx; 157 npg = btoc(allocsize); 158 va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg), flags); 159 if (va == NULL) { 160 splx(s); 161 return ((void *) NULL); 162 } 163 #ifdef KMEMSTATS 164 kbp->kb_total += kbp->kb_elmpercl; 165 #endif 166 kup = btokup(va); 167 kup->ku_indx = indx; 168 if (allocsize > MAXALLOCSAVE) { 169 if (npg > 65535) 170 panic("malloc: allocation too large"); 171 kup->ku_pagecnt = npg; 172 #ifdef KMEMSTATS 173 ksp->ks_memuse += allocsize; 174 #endif 175 goto out; 176 } 177 #ifdef KMEMSTATS 178 kup->ku_freecnt = kbp->kb_elmpercl; 179 kbp->kb_totalfree += kbp->kb_elmpercl; 180 #endif 181 /* 182 * Just in case we blocked while allocating memory, 183 * and someone else also allocated memory for this 184 * bucket, don't assume the list is still empty. 185 */ 186 savedlist = kbp->kb_next; 187 kbp->kb_next = cp = va + (npg * PAGE_SIZE) - allocsize; 188 for (;;) { 189 freep = (struct freelist *)cp; 190 #ifdef DIAGNOSTIC 191 /* 192 * Copy in known text to detect modification 193 * after freeing. 194 */ 195 end = (long *)&cp[copysize]; 196 for (lp = (long *)cp; lp < end; lp++) 197 *lp = WEIRD_ADDR; 198 freep->type = M_FREE; 199 #endif /* DIAGNOSTIC */ 200 if (cp <= va) 201 break; 202 cp -= allocsize; 203 freep->next = cp; 204 } 205 freep->next = savedlist; 206 if (kbp->kb_last == NULL) 207 kbp->kb_last = (caddr_t)freep; 208 } 209 va = kbp->kb_next; 210 kbp->kb_next = ((struct freelist *)va)->next; 211 #ifdef DIAGNOSTIC 212 freep = (struct freelist *)va; 213 savedtype = (unsigned)freep->type < M_LAST ? 214 memname[freep->type] : "???"; 215 #if BYTE_ORDER == BIG_ENDIAN 216 freep->type = WEIRD_ADDR >> 16; 217 #endif 218 #if BYTE_ORDER == LITTLE_ENDIAN 219 freep->type = (short)WEIRD_ADDR; 220 #endif 221 if (((long)(&freep->next)) & 0x2) 222 freep->next = (caddr_t)((WEIRD_ADDR >> 16)|(WEIRD_ADDR << 16)); 223 else 224 freep->next = (caddr_t)WEIRD_ADDR; 225 end = (long *)&va[copysize]; 226 for (lp = (long *)va; lp < end; lp++) { 227 if (*lp == WEIRD_ADDR) 228 continue; 229 printf("%s %d of object %p size %ld %s %s (0x%lx != 0x%x)\n", 230 "Data modified on freelist: word", lp - (long *)va, 231 va, size, "previous type", savedtype, *lp, WEIRD_ADDR); 232 break; 233 } 234 freep->spare0 = 0; 235 #endif /* DIAGNOSTIC */ 236 #ifdef KMEMSTATS 237 kup = btokup(va); 238 if (kup->ku_indx != indx) 239 panic("malloc: wrong bucket"); 240 if (kup->ku_freecnt == 0) 241 panic("malloc: lost data"); 242 kup->ku_freecnt--; 243 kbp->kb_totalfree--; 244 ksp->ks_memuse += 1 << indx; 245 out: 246 kbp->kb_calls++; 247 ksp->ks_inuse++; 248 ksp->ks_calls++; 249 if (ksp->ks_memuse > ksp->ks_maxused) 250 ksp->ks_maxused = ksp->ks_memuse; 251 #else 252 out: 253 #endif 254 splx(s); 255 return ((void *) va); 256 } 257 258 /* 259 * Free a block of memory allocated by malloc. 260 */ 261 void 262 free(addr, type) 263 void *addr; 264 int type; 265 { 266 register struct kmembuckets *kbp; 267 register struct kmemusage *kup; 268 register struct freelist *freep; 269 long size; 270 int s; 271 #ifdef DIAGNOSTIC 272 struct freelist *fp; 273 long *end, *lp, alloc, copysize; 274 #endif 275 #ifdef KMEMSTATS 276 register struct kmemstats *ksp = &kmemstats[type]; 277 #endif 278 279 #ifdef DIAGNOSTIC 280 if ((char *)addr < kmembase || (char *)addr >= kmemlimit) { 281 panic("free: address 0x%x out of range", addr); 282 } 283 if ((u_long)type > M_LAST) { 284 panic("free: type %d out of range", type); 285 } 286 #endif 287 kup = btokup(addr); 288 size = 1 << kup->ku_indx; 289 kbp = &bucket[kup->ku_indx]; 290 s = splhigh(); 291 #ifdef DIAGNOSTIC 292 /* 293 * Check for returns of data that do not point to the 294 * beginning of the allocation. 295 */ 296 if (size > PAGE_SIZE) 297 alloc = addrmask[BUCKETINDX(PAGE_SIZE)]; 298 else 299 alloc = addrmask[kup->ku_indx]; 300 if (((u_long)addr & alloc) != 0) 301 panic("free: unaligned addr 0x%x, size %d, type %s, mask %d", 302 addr, size, memname[type], alloc); 303 #endif /* DIAGNOSTIC */ 304 if (size > MAXALLOCSAVE) { 305 kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt)); 306 #ifdef KMEMSTATS 307 size = kup->ku_pagecnt << PAGE_SHIFT; 308 ksp->ks_memuse -= size; 309 kup->ku_indx = 0; 310 kup->ku_pagecnt = 0; 311 if (ksp->ks_memuse + size >= ksp->ks_limit && 312 ksp->ks_memuse < ksp->ks_limit) 313 wakeup((caddr_t)ksp); 314 ksp->ks_inuse--; 315 kbp->kb_total -= 1; 316 #endif 317 splx(s); 318 return; 319 } 320 freep = (struct freelist *)addr; 321 #ifdef DIAGNOSTIC 322 /* 323 * Check for multiple frees. Use a quick check to see if 324 * it looks free before laboriously searching the freelist. 325 */ 326 if (freep->spare0 == WEIRD_ADDR) { 327 fp = (struct freelist *)kbp->kb_next; 328 while (fp) { 329 if (fp->spare0 != WEIRD_ADDR) { 330 printf("trashed free item %p\n", fp); 331 panic("free: free item modified"); 332 } else if (addr == (caddr_t)fp) { 333 printf("multiple freed item %p\n", addr); 334 panic("free: multiple free"); 335 } 336 fp = (struct freelist *)fp->next; 337 } 338 } 339 /* 340 * Copy in known text to detect modification after freeing 341 * and to make it look free. Also, save the type being freed 342 * so we can list likely culprit if modification is detected 343 * when the object is reallocated. 344 */ 345 copysize = size < MAX_COPY ? size : MAX_COPY; 346 end = (long *)&((caddr_t)addr)[copysize]; 347 for (lp = (long *)addr; lp < end; lp++) 348 *lp = WEIRD_ADDR; 349 freep->type = type; 350 #endif /* DIAGNOSTIC */ 351 #ifdef KMEMSTATS 352 kup->ku_freecnt++; 353 if (kup->ku_freecnt >= kbp->kb_elmpercl) 354 if (kup->ku_freecnt > kbp->kb_elmpercl) 355 panic("free: multiple frees"); 356 else if (kbp->kb_totalfree > kbp->kb_highwat) 357 kbp->kb_couldfree++; 358 kbp->kb_totalfree++; 359 ksp->ks_memuse -= size; 360 if (ksp->ks_memuse + size >= ksp->ks_limit && 361 ksp->ks_memuse < ksp->ks_limit) 362 wakeup((caddr_t)ksp); 363 ksp->ks_inuse--; 364 #endif 365 #ifdef OLD_MALLOC_MEMORY_POLICY 366 if (kbp->kb_next == NULL) 367 kbp->kb_next = addr; 368 else 369 ((struct freelist *)kbp->kb_last)->next = addr; 370 freep->next = NULL; 371 kbp->kb_last = addr; 372 #else 373 /* 374 * Return memory to the head of the queue for quick reuse. This 375 * can improve performance by improving the probability of the 376 * item being in the cache when it is reused. 377 */ 378 if (kbp->kb_next == NULL) { 379 kbp->kb_next = addr; 380 kbp->kb_last = addr; 381 freep->next = NULL; 382 } else { 383 freep->next = kbp->kb_next; 384 kbp->kb_next = addr; 385 } 386 #endif 387 splx(s); 388 } 389 390 /* 391 * Initialize the kernel memory allocator 392 */ 393 /* ARGSUSED*/ 394 static void 395 kmeminit(dummy) 396 void *dummy; 397 { 398 register long indx; 399 int npg; 400 401 #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0) 402 #error "kmeminit: MAXALLOCSAVE not power of 2" 403 #endif 404 #if (MAXALLOCSAVE > MINALLOCSIZE * 32768) 405 #error "kmeminit: MAXALLOCSAVE too big" 406 #endif 407 #if (MAXALLOCSAVE < PAGE_SIZE) 408 #error "kmeminit: MAXALLOCSAVE too small" 409 #endif 410 npg = (nmbufs * MSIZE + nmbclusters * MCLBYTES + VM_KMEM_SIZE) 411 / PAGE_SIZE; 412 413 kmemusage = (struct kmemusage *) kmem_alloc(kernel_map, 414 (vm_size_t)(npg * sizeof(struct kmemusage))); 415 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase, 416 (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * PAGE_SIZE), 417 FALSE); 418 kmem_map->system_map = 1; 419 #ifdef KMEMSTATS 420 for (indx = 0; indx < MINBUCKET + 16; indx++) { 421 if (1 << indx >= PAGE_SIZE) 422 bucket[indx].kb_elmpercl = 1; 423 else 424 bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx); 425 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl; 426 } 427 /* 428 * Limit maximum memory for each type to 60% of malloc area size or 429 * 60% of physical memory, whichever is smaller. 430 */ 431 for (indx = 0; indx < M_LAST; indx++) { 432 kmemstats[indx].ks_limit = min(cnt.v_page_count * PAGE_SIZE, 433 (npg * PAGE_SIZE - nmbclusters * MCLBYTES 434 - nmbufs * MSIZE)) * 6 / 10; 435 } 436 #endif 437 } 438