1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* Copyright (c) 1988 AT&T */ 23 /* All Rights Reserved */ 24 25 #pragma ident "%Z%%M% %I% %E% SMI" 26 27 /* 28 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 29 * Use is subject to license terms. 30 */ 31 32 #include <sys/types.h> 33 34 #ifndef debug 35 #define NDEBUG 36 #endif 37 38 #include <stdlib.h> 39 #include <string.h> 40 #include "assert.h" 41 #include "malloc.h" 42 #include "mallint.h" 43 #include <thread.h> 44 #include <synch.h> 45 #include <unistd.h> 46 #include <limits.h> 47 48 static mutex_t mlock = DEFAULTMUTEX; 49 static ssize_t freespace(struct holdblk *); 50 static void *malloc_unlocked(size_t, int); 51 static void *realloc_unlocked(void *, size_t); 52 static void free_unlocked(void *); 53 static void *morecore(size_t); 54 55 /* 56 * use level memory allocater (malloc, free, realloc) 57 * 58 * -malloc, free, realloc and mallopt form a memory allocator 59 * similar to malloc, free, and realloc. The routines 60 * here are much faster than the original, with slightly worse 61 * space usage (a few percent difference on most input). They 62 * do not have the property that data in freed blocks is left 63 * untouched until the space is reallocated. 64 * 65 * -Memory is kept in the "arena", a singly linked list of blocks. 66 * These blocks are of 3 types. 67 * 1. A free block. This is a block not in use by the 68 * user. It has a 3 word header. (See description 69 * of the free queue.) 70 * 2. An allocated block. This is a block the user has 71 * requested. It has only a 1 word header, pointing 72 * to the next block of any sort. 73 * 3. A permanently allocated block. This covers space 74 * aquired by the user directly through sbrk(). It 75 * has a 1 word header, as does 2. 76 * Blocks of type 1 have the lower bit of the pointer to the 77 * nextblock = 0. Blocks of type 2 and 3 have that bit set, 78 * to mark them busy. 79 * 80 * -Unallocated blocks are kept on an unsorted doubly linked 81 * free list. 82 * 83 * -Memory is allocated in blocks, with sizes specified by the 84 * user. A circular first-fit startegy is used, with a roving 85 * head of the free queue, which prevents bunching of small 86 * blocks at the head of the queue. 87 * 88 * -Compaction is performed at free time of any blocks immediately 89 * following the freed block. The freed block will be combined 90 * with a preceding block during the search phase of malloc. 91 * Since a freed block is added at the front of the free queue, 92 * which is moved to the end of the queue if considered and 93 * rejected during the search, fragmentation only occurs if 94 * a block with a contiguious preceding block that is free is 95 * freed and reallocated on the next call to malloc. The 96 * time savings of this strategy is judged to be worth the 97 * occasional waste of memory. 98 * 99 * -Small blocks (of size < MAXSIZE) are not allocated directly. 100 * A large "holding" block is allocated via a recursive call to 101 * malloc. This block contains a header and ?????? small blocks. 102 * Holding blocks for a given size of small block (rounded to the 103 * nearest ALIGNSZ bytes) are kept on a queue with the property that any 104 * holding block with an unused small block is in front of any without. 105 * A list of free blocks is kept within the holding block. 106 */ 107 108 /* 109 * description of arena, free queue, holding blocks etc. 110 * 111 * New compiler and linker does not guarentee order of initialized data. 112 * Define freeptr as arena[2-3] to guarentee it follows arena in memory. 113 * Later code depends on this order. 114 */ 115 116 static struct header arena[4] = { 117 {0, 0, 0}, 118 {0, 0, 0}, 119 {0, 0, 0}, 120 {0, 0, 0} 121 }; 122 /* 123 * the second word is a minimal block to 124 * start the arena. The first is a busy 125 * block to be pointed to by the last block. 126 */ 127 128 #define freeptr (arena + 2) 129 /* first and last entry in free list */ 130 static struct header *arenaend; /* ptr to block marking high end of arena */ 131 static struct header *lastblk; /* the highest block in the arena */ 132 static struct holdblk **holdhead; /* pointer to array of head pointers */ 133 /* to holding block chains */ 134 /* 135 * In order to save time calculating indices, the array is 1 too 136 * large, and the first element is unused 137 * 138 * Variables controlling algorithm, esp. how holding blocs are used 139 */ 140 static int numlblks = NUMLBLKS; 141 static int minhead = MINHEAD; 142 static int change = 0; /* != 0, once param changes are no longer allowed */ 143 static int fastct = FASTCT; 144 static unsigned int maxfast = MAXFAST; 145 /* number of small block sizes to map to one size */ 146 147 static int grain = ALIGNSZ; 148 149 #ifdef debug 150 static int case1count = 0; 151 152 static void 153 checkq(void) 154 { 155 register struct header *p; 156 157 p = &freeptr[0]; 158 159 /* check forward */ 160 /*CSTYLED*/ 161 while (p != &freeptr[1]) { 162 p = p->nextfree; 163 assert(p->prevfree->nextfree == p); 164 } 165 166 /* check backward */ 167 /*CSTYLED*/ 168 while (p != &freeptr[0]) { 169 p = p->prevfree; 170 assert(p->nextfree->prevfree == p); 171 } 172 } 173 #endif 174 175 176 /* 177 * malloc(nbytes) - give a user nbytes to use 178 */ 179 180 void * 181 malloc(size_t nbytes) 182 { 183 void *ret; 184 185 (void) mutex_lock(&mlock); 186 ret = malloc_unlocked(nbytes, 0); 187 (void) mutex_unlock(&mlock); 188 return (ret); 189 } 190 191 /* 192 * Use malloc_unlocked() to get the address to start with; Given this 193 * address, find out the closest address that aligns with the request 194 * and return that address after doing some house keeping (refer to the 195 * ascii art below). 196 */ 197 void * 198 memalign(size_t alignment, size_t size) 199 { 200 void *alloc_buf; 201 struct header *hd; 202 size_t alloc_size; 203 uintptr_t fr; 204 static int realloc; 205 206 if (size == 0 || alignment == 0 || 207 (alignment & (alignment - 1)) != 0) { 208 return (NULL); 209 } 210 if (alignment <= ALIGNSZ) 211 return (malloc(size)); 212 213 alloc_size = size + alignment; 214 if (alloc_size < size) { /* overflow */ 215 return (NULL); 216 } 217 218 (void) mutex_lock(&mlock); 219 alloc_buf = malloc_unlocked(alloc_size, 1); 220 (void) mutex_unlock(&mlock); 221 222 if (alloc_buf == NULL) 223 return (NULL); 224 fr = (uintptr_t)alloc_buf; 225 226 fr = (fr + alignment - 1) / alignment * alignment; 227 228 if (fr == (uintptr_t)alloc_buf) 229 return (alloc_buf); 230 231 if ((fr - (uintptr_t)alloc_buf) <= HEADSZ) { 232 /* 233 * we hit an edge case, where the space ahead of aligned 234 * address is not sufficient to hold 'header' and hence we 235 * can't free it. So double the allocation request. 236 */ 237 realloc++; 238 free(alloc_buf); 239 alloc_size = size + alignment*2; 240 if (alloc_size < size) { 241 return (NULL); 242 } 243 244 (void) mutex_lock(&mlock); 245 alloc_buf = malloc_unlocked(alloc_size, 1); 246 (void) mutex_unlock(&mlock); 247 248 if (alloc_buf == NULL) 249 return (NULL); 250 fr = (uintptr_t)alloc_buf; 251 252 fr = (fr + alignment - 1) / alignment * alignment; 253 if (fr == (uintptr_t)alloc_buf) 254 return (alloc_buf); 255 if ((fr - (uintptr_t)alloc_buf) <= HEADSZ) { 256 fr = fr + alignment; 257 } 258 } 259 260 /* 261 * +-------+ +-------+ 262 * +---| <a> | | <a> |--+ 263 * | +-------+<--alloc_buf-->+-------+ | 264 * | | | | | | 265 * | | | | | | 266 * | | | | | | 267 * | | | hd--> +-------+ | 268 * | | | +---| <b> |<-+ 269 * | | | | +-------+<--- fr 270 * | | | | | | 271 * | | | | | | 272 * | | | | | | 273 * | | | | | | 274 * | | | | | | 275 * | | | | | | 276 * | +-------+ | +-------+ 277 * +-->| next | +-->| next | 278 * +-------+ +-------+ 279 * 280 */ 281 hd = (struct header *)((char *)fr - minhead); 282 (void) mutex_lock(&mlock); 283 hd->nextblk = ((struct header *)((char *)alloc_buf - minhead))->nextblk; 284 ((struct header *)((char *)alloc_buf - minhead))->nextblk = SETBUSY(hd); 285 (void) mutex_unlock(&mlock); 286 free(alloc_buf); 287 CHECKQ 288 return ((void *)fr); 289 } 290 291 void * 292 valloc(size_t size) 293 { 294 static unsigned pagesize; 295 if (size == 0) 296 return (NULL); 297 298 if (!pagesize) 299 pagesize = sysconf(_SC_PAGESIZE); 300 301 return (memalign(pagesize, size)); 302 } 303 304 /* 305 * malloc_unlocked(nbytes, nosmall) - Do the real work for malloc 306 */ 307 308 static void * 309 malloc_unlocked(size_t nbytes, int nosmall) 310 { 311 struct header *blk; 312 size_t nb; /* size of entire block we need */ 313 314 /* on first call, initialize */ 315 if (freeptr[0].nextfree == GROUND) { 316 /* initialize arena */ 317 arena[1].nextblk = (struct header *)BUSY; 318 arena[0].nextblk = (struct header *)BUSY; 319 lastblk = arenaend = &(arena[1]); 320 /* initialize free queue */ 321 freeptr[0].nextfree = &(freeptr[1]); 322 freeptr[1].nextblk = &(arena[0]); 323 freeptr[1].prevfree = &(freeptr[0]); 324 /* mark that small blocks not init yet */ 325 } 326 if (nbytes == 0) 327 return (NULL); 328 329 if (nbytes <= maxfast && !nosmall) { 330 /* 331 * We can allocate out of a holding block 332 */ 333 struct holdblk *holdblk; /* head of right sized queue */ 334 struct lblk *lblk; /* pointer to a little block */ 335 struct holdblk *newhold; 336 337 if (!change) { 338 int i; 339 /* 340 * This allocates space for hold block 341 * pointers by calling malloc recursively. 342 * Maxfast is temporarily set to 0, to 343 * avoid infinite recursion. allocate 344 * space for an extra ptr so that an index 345 * is just ->blksz/grain, with the first 346 * ptr unused. 347 */ 348 change = 1; /* change to algorithm params */ 349 /* no longer allowed */ 350 /* 351 * temporarily alter maxfast, to avoid 352 * infinite recursion 353 */ 354 maxfast = 0; 355 holdhead = (struct holdblk **) 356 malloc_unlocked(sizeof (struct holdblk *) * 357 (fastct + 1), 0); 358 if (holdhead == NULL) 359 return (malloc_unlocked(nbytes, 0)); 360 for (i = 1; i <= fastct; i++) { 361 holdhead[i] = HGROUND; 362 } 363 maxfast = fastct * grain; 364 } 365 /* 366 * Note that this uses the absolute min header size (MINHEAD) 367 * unlike the large block case which uses minhead 368 * 369 * round up to nearest multiple of grain 370 * code assumes grain is a multiple of MINHEAD 371 */ 372 /* round up to grain */ 373 nb = (nbytes + grain - 1) / grain * grain; 374 holdblk = holdhead[nb / grain]; 375 nb = nb + MINHEAD; 376 /* 377 * look for space in the holding block. Blocks with 378 * space will be in front of those without 379 */ 380 if ((holdblk != HGROUND) && (holdblk->lfreeq != LGROUND)) { 381 /* there is space */ 382 lblk = holdblk->lfreeq; 383 384 /* 385 * Now make lfreeq point to a free block. 386 * If lblk has been previously allocated and 387 * freed, it has a valid pointer to use. 388 * Otherwise, lblk is at the beginning of 389 * the unallocated blocks at the end of 390 * the holding block, so, if there is room, take 391 * the next space. If not, mark holdblk full, 392 * and move holdblk to the end of the queue 393 */ 394 if (lblk < holdblk->unused) { 395 /* move to next holdblk, if this one full */ 396 if ((holdblk->lfreeq = 397 CLRSMAL(lblk->header.nextfree)) == 398 LGROUND) { 399 holdhead[(nb-MINHEAD) / grain] = 400 holdblk->nexthblk; 401 } 402 } else if (((char *)holdblk->unused + nb) < 403 ((char *)holdblk + HOLDSZ(nb))) { 404 holdblk->unused = (struct lblk *) 405 ((char *)holdblk->unused+nb); 406 holdblk->lfreeq = holdblk->unused; 407 } else { 408 holdblk->unused = (struct lblk *) 409 ((char *)holdblk->unused+nb); 410 holdblk->lfreeq = LGROUND; 411 holdhead[(nb-MINHEAD)/grain] = 412 holdblk->nexthblk; 413 } 414 /* mark as busy and small */ 415 lblk->header.holder = (struct holdblk *)SETALL(holdblk); 416 } else { 417 /* we need a new holding block */ 418 newhold = (struct holdblk *) 419 malloc_unlocked(HOLDSZ(nb), 0); 420 if ((char *)newhold == NULL) { 421 return (NULL); 422 } 423 /* add to head of free queue */ 424 if (holdblk != HGROUND) { 425 newhold->nexthblk = holdblk; 426 newhold->prevhblk = holdblk->prevhblk; 427 holdblk->prevhblk = newhold; 428 newhold->prevhblk->nexthblk = newhold; 429 } else { 430 newhold->nexthblk = newhold->prevhblk = newhold; 431 } 432 holdhead[(nb-MINHEAD)/grain] = newhold; 433 /* set up newhold */ 434 lblk = (struct lblk *)(newhold->space); 435 newhold->lfreeq = newhold->unused = 436 (struct lblk *)((char *)newhold->space+nb); 437 lblk->header.holder = (struct holdblk *)SETALL(newhold); 438 newhold->blksz = nb-MINHEAD; 439 } 440 #ifdef debug 441 assert(((struct holdblk *)CLRALL(lblk->header.holder))->blksz >= 442 nbytes); 443 #endif /* debug */ 444 return ((char *)lblk + MINHEAD); 445 } else { 446 /* 447 * We need an ordinary block 448 */ 449 struct header *newblk; /* used for creating a block */ 450 451 /* get number of bytes we need */ 452 nb = nbytes + minhead; 453 nb = (nb + ALIGNSZ - 1) / ALIGNSZ * ALIGNSZ; /* align */ 454 nb = (nb > MINBLKSZ) ? nb : MINBLKSZ; 455 /* 456 * see if there is a big enough block 457 * If none exists, you will get to freeptr[1]. 458 * freeptr[1].next = &arena[0], so when you do the test, 459 * the result is a large positive number, since arena[0] 460 * comes before all blocks. Arena[0] is marked busy so 461 * that it will not be compacted. This kludge is for the 462 * sake of the almighty efficiency. 463 */ 464 /* check that a very large request won't cause an inf. loop */ 465 466 if ((freeptr[1].nextblk-&(freeptr[1])) < nb) { 467 return (NULL); 468 } else { 469 struct header *next; /* following block */ 470 struct header *nextnext; /* block after next */ 471 472 blk = freeptr; 473 do { 474 blk = blk->nextfree; 475 /* see if we can compact */ 476 next = blk->nextblk; 477 if (!TESTBUSY(nextnext = next->nextblk)) { 478 do { 479 DELFREEQ(next); 480 next = nextnext; 481 nextnext = next->nextblk; 482 } while (!TESTBUSY(nextnext)); 483 /* 484 * next will be at most == to lastblk, 485 * but I think the >= test is faster 486 */ 487 if (next >= arenaend) 488 lastblk = blk; 489 blk->nextblk = next; 490 } 491 } while (((char *)(next) - (char *)blk) < nb); 492 } 493 /* 494 * if we didn't find a block, get more memory 495 */ 496 if (blk == &(freeptr[1])) { 497 /* 498 * careful coding could likely replace 499 * newend with arenaend 500 */ 501 struct header *newend; /* new end of arena */ 502 ssize_t nget; /* number of words to get */ 503 504 /* 505 * Three cases - 1. There is space between arenaend 506 * and the break value that will become 507 * a permanently allocated block. 508 * 2. Case 1 is not true, and the last 509 * block is allocated. 510 * 3. Case 1 is not true, and the last 511 * block is free 512 */ 513 if ((newblk = (struct header *)sbrk(0)) != 514 (struct header *)((char *)arenaend + HEADSZ)) { 515 /* case 1 */ 516 #ifdef debug 517 if (case1count++ > 0) 518 (void) write(2, "Case 1 hit more that once." 519 " brk or sbrk?\n", 41); 520 #endif 521 /* get size to fetch */ 522 nget = nb + HEADSZ; 523 /* round up to a block */ 524 nget = (nget + BLOCKSZ - 1)/BLOCKSZ * BLOCKSZ; 525 assert((uintptr_t)newblk % ALIGNSZ == 0); 526 /* get memory */ 527 if (morecore(nget) == (void *)-1) 528 return (NULL); 529 /* add to arena */ 530 newend = (struct header *)((char *)newblk + nget 531 - HEADSZ); 532 assert((uintptr_t)newblk % ALIGNSZ == 0); 533 newend->nextblk = SETBUSY(&(arena[1])); 534 /* ??? newblk ?? */ 535 newblk->nextblk = newend; 536 537 /* 538 * space becomes a permanently allocated block. 539 * This is likely not mt-safe as lock is not 540 * shared with brk or sbrk 541 */ 542 arenaend->nextblk = SETBUSY(newblk); 543 /* adjust other pointers */ 544 arenaend = newend; 545 lastblk = newblk; 546 blk = newblk; 547 } else if (TESTBUSY(lastblk->nextblk)) { 548 /* case 2 */ 549 nget = (nb + BLOCKSZ - 1) / BLOCKSZ * BLOCKSZ; 550 if (morecore(nget) == (void *)-1) 551 return (NULL); 552 /* block must be word aligned */ 553 assert(((uintptr_t)newblk%ALIGNSZ) == 0); 554 /* 555 * stub at old arenaend becomes first word 556 * in blk 557 */ 558 /* ??? newblk = arenaend; */ 559 560 newend = 561 (struct header *)((char *)arenaend+nget); 562 newend->nextblk = SETBUSY(&(arena[1])); 563 arenaend->nextblk = newend; 564 lastblk = blk = arenaend; 565 arenaend = newend; 566 } else { 567 /* case 3 */ 568 /* 569 * last block in arena is at end of memory and 570 * is free 571 */ 572 /* 1.7 had this backward without cast */ 573 nget = nb - 574 ((char *)arenaend - (char *)lastblk); 575 nget = (nget + (BLOCKSZ - 1)) / 576 BLOCKSZ * BLOCKSZ; 577 assert(((uintptr_t)newblk % ALIGNSZ) == 0); 578 if (morecore(nget) == (void *)-1) 579 return (NULL); 580 /* combine with last block, put in arena */ 581 newend = (struct header *) 582 ((char *)arenaend + nget); 583 arenaend = lastblk->nextblk = newend; 584 newend->nextblk = SETBUSY(&(arena[1])); 585 /* set which block to use */ 586 blk = lastblk; 587 DELFREEQ(blk); 588 } 589 } else { 590 struct header *nblk; /* next block */ 591 592 /* take block found of free queue */ 593 DELFREEQ(blk); 594 /* 595 * make head of free queue immediately follow blk, 596 * unless blk was at the end of the queue 597 */ 598 nblk = blk->nextfree; 599 if (nblk != &(freeptr[1])) { 600 MOVEHEAD(nblk); 601 } 602 } 603 /* blk now points to an adequate block */ 604 if (((char *)blk->nextblk - (char *)blk) - nb >= MINBLKSZ) { 605 /* carve out the right size block */ 606 /* newblk will be the remainder */ 607 newblk = (struct header *)((char *)blk + nb); 608 newblk->nextblk = blk->nextblk; 609 /* mark the block busy */ 610 blk->nextblk = SETBUSY(newblk); 611 ADDFREEQ(newblk); 612 /* if blk was lastblk, make newblk lastblk */ 613 if (blk == lastblk) 614 lastblk = newblk; 615 } else { 616 /* just mark the block busy */ 617 blk->nextblk = SETBUSY(blk->nextblk); 618 } 619 } 620 CHECKQ 621 assert((char *)CLRALL(blk->nextblk) - 622 ((char *)blk + minhead) >= nbytes); 623 assert((char *)CLRALL(blk->nextblk) - 624 ((char *)blk + minhead) < nbytes + MINBLKSZ); 625 return ((char *)blk + minhead); 626 } 627 628 /* 629 * free(ptr) - free block that user thinks starts at ptr 630 * 631 * input - ptr-1 contains the block header. 632 * If the header points forward, we have a normal 633 * block pointing to the next block 634 * if the header points backward, we have a small 635 * block from a holding block. 636 * In both cases, the busy bit must be set 637 */ 638 639 void 640 free(void *ptr) 641 { 642 (void) mutex_lock(&mlock); 643 free_unlocked(ptr); 644 (void) mutex_unlock(&mlock); 645 } 646 647 /* 648 * free_unlocked(ptr) - Do the real work for free() 649 */ 650 651 void 652 free_unlocked(void *ptr) 653 { 654 struct holdblk *holdblk; /* block holding blk */ 655 struct holdblk *oldhead; /* former head of the hold block */ 656 /* queue containing blk's holder */ 657 658 if (ptr == NULL) 659 return; 660 if (TESTSMAL(((struct header *)((char *)ptr - MINHEAD))->nextblk)) { 661 struct lblk *lblk; /* pointer to freed block */ 662 ssize_t offset; /* choice of header lists */ 663 664 lblk = (struct lblk *)CLRBUSY((char *)ptr - MINHEAD); 665 assert((struct header *)lblk < arenaend); 666 assert((struct header *)lblk > arena); 667 /* allow twits (e.g. awk) to free a block twice */ 668 holdblk = lblk->header.holder; 669 if (!TESTBUSY(holdblk)) 670 return; 671 holdblk = (struct holdblk *)CLRALL(holdblk); 672 /* put lblk on its hold block's free list */ 673 lblk->header.nextfree = SETSMAL(holdblk->lfreeq); 674 holdblk->lfreeq = lblk; 675 /* move holdblk to head of queue, if its not already there */ 676 offset = holdblk->blksz / grain; 677 oldhead = holdhead[offset]; 678 if (oldhead != holdblk) { 679 /* first take out of current spot */ 680 holdhead[offset] = holdblk; 681 holdblk->nexthblk->prevhblk = holdblk->prevhblk; 682 holdblk->prevhblk->nexthblk = holdblk->nexthblk; 683 /* now add at front */ 684 holdblk->nexthblk = oldhead; 685 holdblk->prevhblk = oldhead->prevhblk; 686 oldhead->prevhblk = holdblk; 687 holdblk->prevhblk->nexthblk = holdblk; 688 } 689 } else { 690 struct header *blk; /* real start of block */ 691 struct header *next; /* next = blk->nextblk */ 692 struct header *nextnext; /* block after next */ 693 694 blk = (struct header *)((char *)ptr - minhead); 695 next = blk->nextblk; 696 /* take care of twits (e.g. awk) who return blocks twice */ 697 if (!TESTBUSY(next)) 698 return; 699 blk->nextblk = next = CLRBUSY(next); 700 ADDFREEQ(blk); 701 /* see if we can compact */ 702 if (!TESTBUSY(nextnext = next->nextblk)) { 703 do { 704 DELFREEQ(next); 705 next = nextnext; 706 } while (!TESTBUSY(nextnext = next->nextblk)); 707 if (next == arenaend) lastblk = blk; 708 blk->nextblk = next; 709 } 710 } 711 CHECKQ 712 } 713 714 715 /* 716 * realloc(ptr, size) - give the user a block of size "size", with 717 * the contents pointed to by ptr. Free ptr. 718 */ 719 720 void * 721 realloc(void *ptr, size_t size) 722 { 723 void *retval; 724 725 (void) mutex_lock(&mlock); 726 retval = realloc_unlocked(ptr, size); 727 (void) mutex_unlock(&mlock); 728 return (retval); 729 } 730 731 732 /* 733 * realloc_unlocked(ptr) - Do the real work for realloc() 734 */ 735 736 static void * 737 realloc_unlocked(void *ptr, size_t size) 738 { 739 struct header *blk; /* block ptr is contained in */ 740 size_t trusize; /* block size as allocater sees it */ 741 char *newptr; /* pointer to user's new block */ 742 size_t cpysize; /* amount to copy */ 743 struct header *next; /* block after blk */ 744 745 if (ptr == NULL) 746 return (malloc_unlocked(size, 0)); 747 748 if (size == 0) { 749 free_unlocked(ptr); 750 return (NULL); 751 } 752 753 if (TESTSMAL(((struct lblk *)((char *)ptr - MINHEAD))-> 754 header.holder)) { 755 /* 756 * we have a special small block which can't be expanded 757 * 758 * This makes the assumption that even if the user is 759 * reallocating a free block, malloc doesn't alter the contents 760 * of small blocks 761 */ 762 newptr = malloc_unlocked(size, 0); 763 if (newptr == NULL) 764 return (NULL); 765 /* this isn't to save time--its to protect the twits */ 766 if ((char *)ptr != newptr) { 767 struct lblk *lblk; 768 lblk = (struct lblk *)((char *)ptr - MINHEAD); 769 cpysize = ((struct holdblk *) 770 CLRALL(lblk->header.holder))->blksz; 771 cpysize = (size > cpysize) ? cpysize : size; 772 (void) memcpy(newptr, ptr, cpysize); 773 free_unlocked(ptr); 774 } 775 } else { 776 blk = (struct header *)((char *)ptr - minhead); 777 next = blk->nextblk; 778 /* 779 * deal with twits who reallocate free blocks 780 * 781 * if they haven't reset minblk via getopt, that's 782 * their problem 783 */ 784 if (!TESTBUSY(next)) { 785 DELFREEQ(blk); 786 blk->nextblk = SETBUSY(next); 787 } 788 next = CLRBUSY(next); 789 /* make blk as big as possible */ 790 if (!TESTBUSY(next->nextblk)) { 791 do { 792 DELFREEQ(next); 793 next = next->nextblk; 794 } while (!TESTBUSY(next->nextblk)); 795 blk->nextblk = SETBUSY(next); 796 if (next >= arenaend) lastblk = blk; 797 } 798 /* get size we really need */ 799 trusize = size+minhead; 800 trusize = (trusize + ALIGNSZ - 1)/ALIGNSZ*ALIGNSZ; 801 trusize = (trusize >= MINBLKSZ) ? trusize : MINBLKSZ; 802 /* see if we have enough */ 803 /* this isn't really the copy size, but I need a register */ 804 cpysize = (char *)next - (char *)blk; 805 if (cpysize >= trusize) { 806 /* carve out the size we need */ 807 struct header *newblk; /* remainder */ 808 809 if (cpysize - trusize >= MINBLKSZ) { 810 /* 811 * carve out the right size block 812 * newblk will be the remainder 813 */ 814 newblk = (struct header *)((char *)blk + 815 trusize); 816 newblk->nextblk = next; 817 blk->nextblk = SETBUSY(newblk); 818 /* at this point, next is invalid */ 819 ADDFREEQ(newblk); 820 /* if blk was lastblk, make newblk lastblk */ 821 if (blk == lastblk) 822 lastblk = newblk; 823 } 824 newptr = ptr; 825 } else { 826 /* bite the bullet, and call malloc */ 827 cpysize = (size > cpysize) ? cpysize : size; 828 newptr = malloc_unlocked(size, 0); 829 if (newptr == NULL) 830 return (NULL); 831 (void) memcpy(newptr, ptr, cpysize); 832 free_unlocked(ptr); 833 } 834 } 835 return (newptr); 836 } 837 838 839 /* LINTLIBRARY */ 840 /* 841 * calloc - allocate and clear memory block 842 */ 843 844 void * 845 calloc(size_t num, size_t size) 846 { 847 char *mp; 848 849 num *= size; 850 mp = malloc(num); 851 if (mp == NULL) 852 return (NULL); 853 (void) memset(mp, 0, num); 854 return (mp); 855 } 856 857 858 /* 859 * Mallopt - set options for allocation 860 * 861 * Mallopt provides for control over the allocation algorithm. 862 * The cmds available are: 863 * 864 * M_MXFAST Set maxfast to value. Maxfast is the size of the 865 * largest small, quickly allocated block. Maxfast 866 * may be set to 0 to disable fast allocation entirely. 867 * 868 * M_NLBLKS Set numlblks to value. Numlblks is the number of 869 * small blocks per holding block. Value must be 870 * greater than 0. 871 * 872 * M_GRAIN Set grain to value. The sizes of all blocks 873 * smaller than maxfast are considered to be rounded 874 * up to the nearest multiple of grain. The default 875 * value of grain is the smallest number of bytes 876 * which will allow alignment of any data type. Grain 877 * will be rounded up to a multiple of its default, 878 * and maxsize will be rounded up to a multiple of 879 * grain. Value must be greater than 0. 880 * 881 * M_KEEP Retain data in freed block until the next malloc, 882 * realloc, or calloc. Value is ignored. 883 * This option is provided only for compatibility with 884 * the old version of malloc, and is not recommended. 885 * 886 * returns - 0, upon successful completion 887 * 1, if malloc has previously been called or 888 * if value or cmd have illegal values 889 */ 890 891 int 892 mallopt(int cmd, int value) 893 { 894 /* disallow changes once a small block is allocated */ 895 (void) mutex_lock(&mlock); 896 if (change) { 897 (void) mutex_unlock(&mlock); 898 return (1); 899 } 900 switch (cmd) { 901 case M_MXFAST: 902 if (value < 0) { 903 (void) mutex_unlock(&mlock); 904 return (1); 905 } 906 fastct = (value + grain - 1) / grain; 907 maxfast = grain*fastct; 908 break; 909 case M_NLBLKS: 910 if (value <= 1) { 911 (void) mutex_unlock(&mlock); 912 return (1); 913 } 914 numlblks = value; 915 break; 916 case M_GRAIN: 917 if (value <= 0) { 918 (void) mutex_unlock(&mlock); 919 return (1); 920 } 921 922 /* round grain up to a multiple of ALIGNSZ */ 923 grain = (value + ALIGNSZ - 1)/ALIGNSZ*ALIGNSZ; 924 925 /* reduce fastct appropriately */ 926 fastct = (maxfast + grain - 1) / grain; 927 maxfast = grain * fastct; 928 break; 929 case M_KEEP: 930 if (change && holdhead != NULL) { 931 mutex_unlock(&mlock); 932 return (1); 933 } 934 minhead = HEADSZ; 935 break; 936 default: 937 (void) mutex_unlock(&mlock); 938 return (1); 939 } 940 (void) mutex_unlock(&mlock); 941 return (0); 942 } 943 944 /* 945 * mallinfo-provide information about space usage 946 * 947 * input - max; mallinfo will return the size of the 948 * largest block < max. 949 * 950 * output - a structure containing a description of 951 * of space usage, defined in malloc.h 952 */ 953 954 struct mallinfo 955 mallinfo(void) 956 { 957 struct header *blk, *next; /* ptr to ordinary blocks */ 958 struct holdblk *hblk; /* ptr to holding blocks */ 959 struct mallinfo inf; /* return value */ 960 int i; /* the ubiquitous counter */ 961 ssize_t size; /* size of a block */ 962 ssize_t fsp; /* free space in 1 hold block */ 963 964 (void) mutex_lock(&mlock); 965 (void) memset(&inf, 0, sizeof (struct mallinfo)); 966 if (freeptr[0].nextfree == GROUND) { 967 (void) mutex_unlock(&mlock); 968 return (inf); 969 } 970 blk = CLRBUSY(arena[1].nextblk); 971 /* return total space used */ 972 inf.arena = (char *)arenaend - (char *)blk; 973 974 /* 975 * loop through arena, counting # of blocks, and 976 * and space used by blocks 977 */ 978 next = CLRBUSY(blk->nextblk); 979 while (next != &(arena[1])) { 980 inf.ordblks++; 981 size = (char *)next - (char *)blk; 982 if (TESTBUSY(blk->nextblk)) { 983 inf.uordblks += size; 984 inf.keepcost += HEADSZ-MINHEAD; 985 } else { 986 inf.fordblks += size; 987 } 988 blk = next; 989 next = CLRBUSY(blk->nextblk); 990 } 991 992 /* 993 * if any holding block have been allocated 994 * then examine space in holding blks 995 */ 996 if (change && holdhead != NULL) { 997 for (i = fastct; i > 0; i--) { /* loop thru ea. chain */ 998 hblk = holdhead[i]; 999 /* do only if chain not empty */ 1000 if (hblk != HGROUND) { 1001 size = hblk->blksz + 1002 sizeof (struct lblk) - sizeof (int); 1003 do { /* loop thru 1 hold blk chain */ 1004 inf.hblks++; 1005 fsp = freespace(hblk); 1006 inf.fsmblks += fsp; 1007 inf.usmblks += numlblks*size - fsp; 1008 inf.smblks += numlblks; 1009 hblk = hblk->nexthblk; 1010 } while (hblk != holdhead[i]); 1011 } 1012 } 1013 } 1014 inf.hblkhd = (inf.smblks / numlblks) * sizeof (struct holdblk); 1015 /* holding block were counted in ordblks, so subtract off */ 1016 inf.ordblks -= inf.hblks; 1017 inf.uordblks -= inf.hblkhd + inf.usmblks + inf.fsmblks; 1018 inf.keepcost -= inf.hblks*(HEADSZ - MINHEAD); 1019 (void) mutex_unlock(&mlock); 1020 return (inf); 1021 } 1022 1023 1024 /* 1025 * freespace - calc. how much space is used in the free 1026 * small blocks in a given holding block 1027 * 1028 * input - hblk = given holding block 1029 * 1030 * returns space used in free small blocks of hblk 1031 */ 1032 1033 static ssize_t 1034 freespace(struct holdblk *holdblk) 1035 { 1036 struct lblk *lblk; 1037 ssize_t space = 0; 1038 ssize_t size; 1039 struct lblk *unused; 1040 1041 lblk = CLRSMAL(holdblk->lfreeq); 1042 size = holdblk->blksz + sizeof (struct lblk) - sizeof (int); 1043 unused = CLRSMAL(holdblk->unused); 1044 /* follow free chain */ 1045 while ((lblk != LGROUND) && (lblk != unused)) { 1046 space += size; 1047 lblk = CLRSMAL(lblk->header.nextfree); 1048 } 1049 space += ((char *)holdblk + HOLDSZ(size)) - (char *)unused; 1050 return (space); 1051 } 1052 1053 static void * 1054 morecore(size_t bytes) 1055 { 1056 void * ret; 1057 1058 if (bytes > LONG_MAX) { 1059 intptr_t wad; 1060 /* 1061 * The request size is too big. We need to do this in 1062 * chunks. Sbrk only takes an int for an arg. 1063 */ 1064 if (bytes == ULONG_MAX) 1065 return ((void *)-1); 1066 1067 ret = sbrk(0); 1068 wad = LONG_MAX; 1069 while (wad > 0) { 1070 if (sbrk(wad) == (void *)-1) { 1071 if (ret != sbrk(0)) 1072 (void) sbrk(-LONG_MAX); 1073 return ((void *)-1); 1074 } 1075 bytes -= LONG_MAX; 1076 wad = bytes; 1077 } 1078 } else 1079 ret = sbrk(bytes); 1080 1081 return (ret); 1082 } 1083 1084 #ifdef debug 1085 int 1086 check_arena(void) 1087 { 1088 struct header *blk, *prev, *next; /* ptr to ordinary blocks */ 1089 1090 (void) mutex_lock(&mlock); 1091 if (freeptr[0].nextfree == GROUND) { 1092 (void) mutex_unlock(&mlock); 1093 return (-1); 1094 } 1095 blk = arena + 1; 1096 1097 /* loop through arena, checking */ 1098 blk = (struct header *)CLRALL(blk->nextblk); 1099 next = (struct header *)CLRALL(blk->nextblk); 1100 while (next != arena + 1) { 1101 assert(blk >= arena + 1); 1102 assert(blk <= lastblk); 1103 assert(next >= blk + 1); 1104 assert(((uintptr_t)((struct header *)blk->nextblk) & 1105 (4 | SMAL)) == 0); 1106 1107 if (TESTBUSY(blk->nextblk) == 0) { 1108 assert(blk->nextfree >= freeptr); 1109 assert(blk->prevfree >= freeptr); 1110 assert(blk->nextfree <= lastblk); 1111 assert(blk->prevfree <= lastblk); 1112 assert(((uintptr_t)((struct header *)blk->nextfree) & 1113 7) == 0); 1114 assert(((uintptr_t)((struct header *)blk->prevfree) & 1115 7) == 0 || blk->prevfree == freeptr); 1116 } 1117 blk = next; 1118 next = CLRBUSY(blk->nextblk); 1119 } 1120 (void) mutex_unlock(&mlock); 1121 return (0); 1122 } 1123 1124 #define RSTALLOC 1 1125 #endif 1126 1127 #ifdef RSTALLOC 1128 /* 1129 * rstalloc - reset alloc routines 1130 * 1131 * description - return allocated memory and reset 1132 * allocation pointers. 1133 * 1134 * Warning - This is for debugging purposes only. 1135 * It will return all memory allocated after 1136 * the first call to malloc, even if some 1137 * of it was fetched by a user's sbrk(). 1138 */ 1139 1140 void 1141 rstalloc(void) 1142 { 1143 (void) mutex_lock(&mlock); 1144 minhead = MINHEAD; 1145 grain = ALIGNSZ; 1146 numlblks = NUMLBLKS; 1147 fastct = FASTCT; 1148 maxfast = MAXFAST; 1149 change = 0; 1150 if (freeptr[0].nextfree == GROUND) { 1151 (void) mutex_unlock(&mlock); 1152 return; 1153 } 1154 brk(CLRBUSY(arena[1].nextblk)); 1155 freeptr[0].nextfree = GROUND; 1156 #ifdef debug 1157 case1count = 0; 1158 #endif 1159 (void) mutex_unlock(&mlock); 1160 } 1161 #endif /* RSTALLOC */ 1162 1163 /* 1164 * cfree is an undocumented, obsolete function 1165 */ 1166 1167 /* ARGSUSED1 */ 1168 void 1169 cfree(void *p, size_t num, size_t size) 1170 { 1171 free(p); 1172 } 1173