1 /* $Header: /p/tcsh/cvsroot/tcsh/tc.alloc.c,v 3.56 2016/03/08 12:47:43 christos Exp $ */ 2 /* 3 * tc.alloc.c (Caltech) 2/21/82 4 * Chris Kingsley, kingsley@cit-20. 5 * 6 * This is a very fast storage allocator. It allocates blocks of a small 7 * number of different sizes, and keeps free lists of each size. Blocks that 8 * don't exactly fit are passed up to the next larger size. In this 9 * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long. 10 * This is designed for use in a program that uses vast quantities of memory, 11 * but bombs when it runs out. 12 */ 13 /*- 14 * Copyright (c) 1980, 1991 The Regents of the University of California. 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions 19 * are met: 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 3. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 */ 41 #include "sh.h" 42 #ifdef HAVE_MALLINFO 43 #include <malloc.h> 44 #endif 45 #if defined(HAVE_SBRK) && !defined(__APPLE__) 46 #define USE_SBRK 47 #endif 48 49 RCSID("$tcsh: tc.alloc.c,v 3.56 2016/03/08 12:47:43 christos Exp $") 50 51 #define RCHECK 52 #define DEBUG 53 54 static char *memtop = NULL; /* PWP: top of current memory */ 55 static char *membot = NULL; /* PWP: bottom of allocatable memory */ 56 57 int dont_free = 0; 58 59 #ifdef WINNT_NATIVE 60 # define malloc fmalloc 61 # define free ffree 62 # define calloc fcalloc 63 # define realloc frealloc 64 #endif /* WINNT_NATIVE */ 65 66 #if !defined(DEBUG) || defined(SYSMALLOC) 67 static void 68 out_of_memory (void) 69 { 70 static const char msg[] = "Out of memory\n"; 71 72 TCSH_IGNORE(write(didfds ? 2 : SHDIAG, msg, strlen(msg))); 73 _exit(1); 74 } 75 #endif 76 77 #ifndef SYSMALLOC 78 79 #ifdef SX 80 extern void* sbrk(); 81 #endif 82 /* 83 * Lots of os routines are busted and try to free invalid pointers. 84 * Although our free routine is smart enough and it will pick bad 85 * pointers most of the time, in cases where we know we are going to get 86 * a bad pointer, we'd rather leak. 87 */ 88 89 #ifndef NULL 90 #define NULL 0 91 #endif 92 93 typedef unsigned char U_char; /* we don't really have signed chars */ 94 typedef unsigned int U_int; 95 typedef unsigned short U_short; 96 typedef unsigned long U_long; 97 98 99 /* 100 * The overhead on a block is at least 4 bytes. When free, this space 101 * contains a pointer to the next free block, and the bottom two bits must 102 * be zero. When in use, the first byte is set to MAGIC, and the second 103 * byte is the size index. The remaining bytes are for alignment. 104 * If range checking is enabled and the size of the block fits 105 * in two bytes, then the top two bytes hold the size of the requested block 106 * plus the range checking words, and the header word MINUS ONE. 107 */ 108 109 110 #define MEMALIGN(a) (((a) + ROUNDUP) & ~ROUNDUP) 111 112 union overhead { 113 union overhead *ov_next; /* when free */ 114 struct { 115 U_char ovu_magic; /* magic number */ 116 U_char ovu_index; /* bucket # */ 117 #ifdef RCHECK 118 U_short ovu_size; /* actual block size */ 119 U_int ovu_rmagic; /* range magic number */ 120 #endif 121 } ovu; 122 #define ov_magic ovu.ovu_magic 123 #define ov_index ovu.ovu_index 124 #define ov_size ovu.ovu_size 125 #define ov_rmagic ovu.ovu_rmagic 126 }; 127 128 #define MAGIC 0xfd /* magic # on accounting info */ 129 #define RMAGIC 0x55555555 /* magic # on range info */ 130 #ifdef RCHECK 131 #define RSLOP sizeof (U_int) 132 #else 133 #define RSLOP 0 134 #endif 135 136 137 #ifdef _LP64 138 #define ROUNDUP 15 139 #else 140 #define ROUNDUP 7 141 #endif 142 143 /* 144 * nextf[i] is the pointer to the next free block of size 2^(i+3). The 145 * smallest allocatable block is 8 bytes. The overhead information 146 * precedes the data area returned to the user. 147 */ 148 #define NBUCKETS ((sizeof(long) << 3) - 3) 149 static union overhead *nextf[NBUCKETS] IZERO_STRUCT; 150 151 /* 152 * nmalloc[i] is the difference between the number of mallocs and frees 153 * for a given block size. 154 */ 155 static U_int nmalloc[NBUCKETS] IZERO_STRUCT; 156 157 #ifndef lint 158 static int findbucket (union overhead *, int); 159 static void morecore (int); 160 #endif 161 162 163 #ifdef DEBUG 164 # define CHECK(a, str, p) \ 165 if (a) { \ 166 xprintf(str, p); \ 167 xprintf(" (memtop = %p membot = %p)\n", memtop, membot); \ 168 abort(); \ 169 } 170 #else 171 # define CHECK(a, str, p) \ 172 if (a) { \ 173 xprintf(str, p); \ 174 xprintf(" (memtop = %p membot = %p)\n", memtop, membot); \ 175 return; \ 176 } 177 #endif 178 179 memalign_t 180 malloc(size_t nbytes) 181 { 182 #ifndef lint 183 union overhead *p; 184 int bucket = 0; 185 unsigned shiftr; 186 187 /* 188 * Convert amount of memory requested into closest block size stored in 189 * hash buckets which satisfies request. Account for space used per block 190 * for accounting. 191 */ 192 #ifdef SUNOS4 193 /* 194 * SunOS localtime() overwrites the 9th byte on an 8 byte malloc().... 195 * so we get one more... 196 * From Michael Schroeder: This is not true. It depends on the 197 * timezone string. In Europe it can overwrite the 13th byte on a 198 * 12 byte malloc. 199 * So we punt and we always allocate an extra byte. 200 */ 201 nbytes++; 202 #endif 203 204 nbytes = MEMALIGN(MEMALIGN(sizeof(union overhead)) + nbytes + RSLOP); 205 shiftr = (nbytes - 1) >> 2; 206 207 /* apart from this loop, this is O(1) */ 208 while ((shiftr >>= 1) != 0) 209 bucket++; 210 /* 211 * If nothing in hash bucket right now, request more memory from the 212 * system. 213 */ 214 if (nextf[bucket] == NULL) 215 morecore(bucket); 216 if ((p = nextf[bucket]) == NULL) { 217 child++; 218 #ifndef DEBUG 219 out_of_memory(); 220 #else 221 showall(NULL, NULL); 222 xprintf(CGETS(19, 1, "nbytes=%zu: Out of memory\n"), nbytes); 223 abort(); 224 #endif 225 /* fool lint */ 226 return ((memalign_t) 0); 227 } 228 /* remove from linked list */ 229 nextf[bucket] = nextf[bucket]->ov_next; 230 p->ov_magic = MAGIC; 231 p->ov_index = bucket; 232 nmalloc[bucket]++; 233 #ifdef RCHECK 234 /* 235 * Record allocated size of block and bound space with magic numbers. 236 */ 237 p->ov_size = (p->ov_index <= 13) ? nbytes - 1 : 0; 238 p->ov_rmagic = RMAGIC; 239 *((U_int *) (((caddr_t) p) + nbytes - RSLOP)) = RMAGIC; 240 #endif 241 return ((memalign_t) (((caddr_t) p) + MEMALIGN(sizeof(union overhead)))); 242 #else 243 if (nbytes) 244 return ((memalign_t) 0); 245 else 246 return ((memalign_t) 0); 247 #endif /* !lint */ 248 } 249 250 #ifndef lint 251 /* 252 * Allocate more memory to the indicated bucket. 253 */ 254 static void 255 morecore(int bucket) 256 { 257 union overhead *op; 258 int rnu; /* 2^rnu bytes will be requested */ 259 int nblks; /* become nblks blocks of the desired size */ 260 int siz; 261 262 if (nextf[bucket]) 263 return; 264 /* 265 * Insure memory is allocated on a page boundary. Should make getpageize 266 * call? 267 */ 268 op = (union overhead *) sbrk(0); 269 memtop = (char *) op; 270 if (membot == NULL) 271 membot = memtop; 272 if ((long) op & 0x3ff) { 273 memtop = sbrk((int) (1024 - ((long) op & 0x3ff))); 274 memtop += (long) (1024 - ((long) op & 0x3ff)); 275 } 276 277 /* take 2k unless the block is bigger than that */ 278 rnu = (bucket <= 8) ? 11 : bucket + 3; 279 nblks = 1 << (rnu - (bucket + 3)); /* how many blocks to get */ 280 memtop = sbrk(1 << rnu); /* PWP */ 281 op = (union overhead *) memtop; 282 /* no more room! */ 283 if ((long) op == -1) 284 return; 285 memtop += (long) (1 << rnu); 286 /* 287 * Round up to minimum allocation size boundary and deduct from block count 288 * to reflect. 289 */ 290 if (((U_long) op) & ROUNDUP) { 291 op = (union overhead *) (((U_long) op + (ROUNDUP + 1)) & ~ROUNDUP); 292 nblks--; 293 } 294 /* 295 * Add new memory allocated to that on free list for this hash bucket. 296 */ 297 nextf[bucket] = op; 298 siz = 1 << (bucket + 3); 299 while (--nblks > 0) { 300 op->ov_next = (union overhead *) (((caddr_t) op) + siz); 301 op = (union overhead *) (((caddr_t) op) + siz); 302 } 303 op->ov_next = NULL; 304 } 305 306 #endif 307 308 void 309 free(ptr_t cp) 310 { 311 #ifndef lint 312 int size; 313 union overhead *op; 314 315 /* 316 * the don't free flag is there so that we avoid os bugs in routines 317 * that free invalid pointers! 318 */ 319 if (cp == NULL || dont_free) 320 return; 321 CHECK(!memtop || !membot, 322 CGETS(19, 2, "free(%p) called before any allocations."), cp); 323 CHECK(cp > (ptr_t) memtop, 324 CGETS(19, 3, "free(%p) above top of memory."), cp); 325 CHECK(cp < (ptr_t) membot, 326 CGETS(19, 4, "free(%p) below bottom of memory."), cp); 327 op = (union overhead *) (((caddr_t) cp) - MEMALIGN(sizeof(union overhead))); 328 CHECK(op->ov_magic != MAGIC, 329 CGETS(19, 5, "free(%p) bad block."), cp); 330 331 #ifdef RCHECK 332 if (op->ov_index <= 13) 333 CHECK(*(U_int *) ((caddr_t) op + op->ov_size + 1 - RSLOP) != RMAGIC, 334 CGETS(19, 6, "free(%p) bad range check."), cp); 335 #endif 336 CHECK(op->ov_index >= NBUCKETS, 337 CGETS(19, 7, "free(%p) bad block index."), cp); 338 size = op->ov_index; 339 op->ov_next = nextf[size]; 340 nextf[size] = op; 341 342 nmalloc[size]--; 343 344 #else 345 if (cp == NULL) 346 return; 347 #endif 348 } 349 350 memalign_t 351 calloc(size_t i, size_t j) 352 { 353 #ifndef lint 354 char *cp; 355 volatile size_t k; 356 357 i *= j; 358 cp = xmalloc(i); 359 /* Stop gcc 5.x from optimizing malloc+memset = calloc */ 360 k = i; 361 memset(cp, 0, k); 362 363 return ((memalign_t) cp); 364 #else 365 if (i && j) 366 return ((memalign_t) 0); 367 else 368 return ((memalign_t) 0); 369 #endif 370 } 371 372 /* 373 * When a program attempts "storage compaction" as mentioned in the 374 * old malloc man page, it realloc's an already freed block. Usually 375 * this is the last block it freed; occasionally it might be farther 376 * back. We have to search all the free lists for the block in order 377 * to determine its bucket: 1st we make one pass thru the lists 378 * checking only the first block in each; if that fails we search 379 * ``realloc_srchlen'' blocks in each list for a match (the variable 380 * is extern so the caller can modify it). If that fails we just copy 381 * however many bytes was given to realloc() and hope it's not huge. 382 */ 383 #ifndef lint 384 /* 4 should be plenty, -1 =>'s whole list */ 385 static int realloc_srchlen = 4; 386 #endif /* lint */ 387 388 memalign_t 389 realloc(ptr_t cp, size_t nbytes) 390 { 391 #ifndef lint 392 U_int onb; 393 union overhead *op; 394 ptr_t res; 395 int i; 396 int was_alloced = 0; 397 398 if (cp == NULL) 399 return (malloc(nbytes)); 400 op = (union overhead *) (((caddr_t) cp) - MEMALIGN(sizeof(union overhead))); 401 if (op->ov_magic == MAGIC) { 402 was_alloced++; 403 i = op->ov_index; 404 } 405 else 406 /* 407 * Already free, doing "compaction". 408 * 409 * Search for the old block of memory on the free list. First, check the 410 * most common case (last element free'd), then (this failing) the last 411 * ``realloc_srchlen'' items free'd. If all lookups fail, then assume 412 * the size of the memory block being realloc'd is the smallest 413 * possible. 414 */ 415 if ((i = findbucket(op, 1)) < 0 && 416 (i = findbucket(op, realloc_srchlen)) < 0) 417 i = 0; 418 419 onb = MEMALIGN(nbytes + MEMALIGN(sizeof(union overhead)) + RSLOP); 420 421 /* avoid the copy if same size block */ 422 if (was_alloced && (onb <= (U_int) (1 << (i + 3))) && 423 (onb > (U_int) (1 << (i + 2)))) { 424 #ifdef RCHECK 425 /* JMR: formerly this wasn't updated ! */ 426 nbytes = MEMALIGN(MEMALIGN(sizeof(union overhead))+nbytes+RSLOP); 427 *((U_int *) (((caddr_t) op) + nbytes - RSLOP)) = RMAGIC; 428 op->ov_rmagic = RMAGIC; 429 op->ov_size = (op->ov_index <= 13) ? nbytes - 1 : 0; 430 #endif 431 return ((memalign_t) cp); 432 } 433 if ((res = malloc(nbytes)) == NULL) 434 return ((memalign_t) NULL); 435 if (cp != res) { /* common optimization */ 436 /* 437 * christos: this used to copy nbytes! It should copy the 438 * smaller of the old and new size 439 */ 440 onb = (1 << (i + 3)) - MEMALIGN(sizeof(union overhead)) - RSLOP; 441 (void) memmove(res, cp, onb < nbytes ? onb : nbytes); 442 } 443 if (was_alloced) 444 free(cp); 445 return ((memalign_t) res); 446 #else 447 if (cp && nbytes) 448 return ((memalign_t) 0); 449 else 450 return ((memalign_t) 0); 451 #endif /* !lint */ 452 } 453 454 /* 455 * On linux, _nss_nis_setnetgrent() calls this function to determine 456 * the usable size of the pointer passed, but this is not a portable 457 * API, so we cannot use our malloc replacement without providing one. 458 * Thanks a lot glibc! 459 */ 460 #ifdef __linux__ 461 #define M_U_S_CONST 462 #else 463 #define M_U_S_CONST 464 #endif 465 size_t malloc_usable_size(M_U_S_CONST void *); 466 size_t 467 malloc_usable_size(M_U_S_CONST void *ptr) 468 { 469 const union overhead *op = (const union overhead *) 470 (((const char *) ptr) - MEMALIGN(sizeof(*op))); 471 if (op->ov_magic == MAGIC) 472 return 1 << (op->ov_index + 3); 473 else 474 return 0; 475 } 476 477 478 #ifndef lint 479 /* 480 * Search ``srchlen'' elements of each free list for a block whose 481 * header starts at ``freep''. If srchlen is -1 search the whole list. 482 * Return bucket number, or -1 if not found. 483 */ 484 static int 485 findbucket(union overhead *freep, int srchlen) 486 { 487 union overhead *p; 488 size_t i; 489 int j; 490 491 for (i = 0; i < NBUCKETS; i++) { 492 j = 0; 493 for (p = nextf[i]; p && j != srchlen; p = p->ov_next) { 494 if (p == freep) 495 return (i); 496 j++; 497 } 498 } 499 return (-1); 500 } 501 502 #endif 503 504 505 #else /* SYSMALLOC */ 506 507 /** 508 ** ``Protected versions'' of malloc, realloc, calloc, and free 509 ** 510 ** On many systems: 511 ** 512 ** 1. malloc(0) is bad 513 ** 2. free(0) is bad 514 ** 3. realloc(0, n) is bad 515 ** 4. realloc(n, 0) is bad 516 ** 517 ** Also we call our error routine if we run out of memory. 518 **/ 519 memalign_t 520 smalloc(size_t n) 521 { 522 ptr_t ptr; 523 524 n = n ? n : 1; 525 526 #ifdef USE_SBRK 527 if (membot == NULL) 528 membot = sbrk(0); 529 #endif /* USE_SBRK */ 530 531 if ((ptr = malloc(n)) == NULL) 532 out_of_memory(); 533 #ifndef USE_SBRK 534 if (memtop < ((char *) ptr) + n) 535 memtop = ((char *) ptr) + n; 536 if (membot == NULL) 537 membot = ptr; 538 #endif /* !USE_SBRK */ 539 return ((memalign_t) ptr); 540 } 541 542 memalign_t 543 srealloc(ptr_t p, size_t n) 544 { 545 ptr_t ptr; 546 547 n = n ? n : 1; 548 549 #ifdef USE_SBRK 550 if (membot == NULL) 551 membot = sbrk(0); 552 #endif /* USE_SBRK */ 553 554 if ((ptr = (p ? realloc(p, n) : malloc(n))) == NULL) 555 out_of_memory(); 556 #ifndef USE_SBRK 557 if (memtop < ((char *) ptr) + n) 558 memtop = ((char *) ptr) + n; 559 if (membot == NULL) 560 membot = ptr; 561 #endif /* !USE_SBRK */ 562 return ((memalign_t) ptr); 563 } 564 565 memalign_t 566 scalloc(size_t s, size_t n) 567 { 568 ptr_t ptr; 569 570 n *= s; 571 n = n ? n : 1; 572 573 #ifdef USE_SBRK 574 if (membot == NULL) 575 membot = sbrk(0); 576 #endif /* USE_SBRK */ 577 578 if ((ptr = malloc(n)) == NULL) 579 out_of_memory(); 580 581 memset (ptr, 0, n); 582 583 #ifndef USE_SBRK 584 if (memtop < ((char *) ptr) + n) 585 memtop = ((char *) ptr) + n; 586 if (membot == NULL) 587 membot = ptr; 588 #endif /* !USE_SBRK */ 589 590 return ((memalign_t) ptr); 591 } 592 593 void 594 sfree(ptr_t p) 595 { 596 if (p && !dont_free) 597 free(p); 598 } 599 600 #endif /* SYSMALLOC */ 601 602 /* 603 * mstats - print out statistics about malloc 604 * 605 * Prints two lines of numbers, one showing the length of the free list 606 * for each size category, the second showing the number of mallocs - 607 * frees for each size category. 608 */ 609 /*ARGSUSED*/ 610 void 611 showall(Char **v, struct command *c) 612 { 613 #ifndef SYSMALLOC 614 size_t i, j; 615 union overhead *p; 616 int totfree = 0, totused = 0; 617 618 xprintf(CGETS(19, 8, "%s current memory allocation:\nfree:\t"), progname); 619 for (i = 0; i < NBUCKETS; i++) { 620 for (j = 0, p = nextf[i]; p; p = p->ov_next, j++) 621 continue; 622 xprintf(" %4zd", j); 623 totfree += j * (1 << (i + 3)); 624 } 625 xprintf("\n%s:\t", CGETS(19, 9, "used")); 626 for (i = 0; i < NBUCKETS; i++) { 627 xprintf(" %4d", nmalloc[i]); 628 totused += nmalloc[i] * (1 << (i + 3)); 629 } 630 xprintf(CGETS(19, 10, "\n\tTotal in use: %d, total free: %d\n"), 631 totused, totfree); 632 xprintf(CGETS(19, 11, 633 "\tAllocated memory from 0x%lx to 0x%lx. Real top at 0x%lx\n"), 634 (unsigned long) membot, (unsigned long) memtop, 635 (unsigned long) sbrk(0)); 636 #else /* SYSMALLOC */ 637 #ifndef HAVE_MALLINFO 638 #ifdef USE_SBRK 639 memtop = sbrk(0); 640 #endif /* USE_SBRK */ 641 xprintf(CGETS(19, 12, "Allocated memory from 0x%lx to 0x%lx (%ld).\n"), 642 (unsigned long) membot, (unsigned long) memtop, 643 (unsigned long) (memtop - membot)); 644 #else /* HAVE_MALLINFO */ 645 struct mallinfo mi; 646 647 mi = mallinfo(); 648 xprintf(CGETS(19, 13, "%s current memory allocation:\n"), progname); 649 xprintf(CGETS(19, 14, "Total space allocated from system: %d\n"), mi.arena); 650 xprintf(CGETS(19, 15, "Number of non-inuse chunks: %d\n"), mi.ordblks); 651 xprintf(CGETS(19, 16, "Number of mmapped regions: %d\n"), mi.hblks); 652 xprintf(CGETS(19, 17, "Total space in mmapped regions: %d\n"), mi.hblkhd); 653 xprintf(CGETS(19, 18, "Total allocated space: %d\n"), mi.uordblks); 654 xprintf(CGETS(19, 19, "Total non-inuse space: %d\n"), mi.fordblks); 655 xprintf(CGETS(19, 20, "Top-most, releasable space: %d\n"), mi.keepcost); 656 #endif /* HAVE_MALLINFO */ 657 #endif /* SYSMALLOC */ 658 USE(c); 659 USE(v); 660 } 661