1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1983 Regents of the University of California. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #if defined(LIBC_SCCS) && !defined(lint) 33 /*static char *sccsid = "from: @(#)malloc.c 5.11 (Berkeley) 2/23/91";*/ 34 static char *rcsid = "$FreeBSD$"; 35 #endif /* LIBC_SCCS and not lint */ 36 37 /* 38 * malloc.c (Caltech) 2/21/82 39 * Chris Kingsley, kingsley@cit-20. 40 * 41 * This is a very fast storage allocator. It allocates blocks of a small 42 * number of different sizes, and keeps free lists of each size. Blocks that 43 * don't exactly fit are passed up to the next larger size. In this 44 * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long. 45 * This is designed for use in a virtual memory environment. 46 */ 47 48 #include <sys/param.h> 49 #include <sys/sysctl.h> 50 #include <sys/mman.h> 51 #include <errno.h> 52 #include <stdarg.h> 53 #include <stddef.h> 54 #include <string.h> 55 #include <unistd.h> 56 #include "rtld.h" 57 #include "rtld_printf.h" 58 #include "paths.h" 59 60 /* 61 * Pre-allocate mmap'ed pages 62 */ 63 #define NPOOLPAGES (128*1024/pagesz) 64 static caddr_t pagepool_start, pagepool_end; 65 66 /* 67 * The overhead on a block is at least 4 bytes. When free, this space 68 * contains a pointer to the next free block, and the bottom two bits must 69 * be zero. When in use, the first byte is set to MAGIC, and the second 70 * byte is the size index. The remaining bytes are for alignment. 71 * If range checking is enabled then a second word holds the size of the 72 * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC). 73 * The order of elements is critical: ov_magic must overlay the low order 74 * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern. 75 */ 76 union overhead { 77 union overhead *ov_next; /* when free */ 78 struct { 79 u_char ovu_magic; /* magic number */ 80 u_char ovu_index; /* bucket # */ 81 } ovu; 82 #define ov_magic ovu.ovu_magic 83 #define ov_index ovu.ovu_index 84 #define ov_rmagic ovu.ovu_rmagic 85 #define ov_size ovu.ovu_size 86 }; 87 88 static void morecore(int bucket); 89 static int morepages(int n); 90 static int findbucket(union overhead *freep, int srchlen); 91 92 93 #define MAGIC 0xef /* magic # on accounting info */ 94 #define RMAGIC 0x5555 /* magic # on range info */ 95 96 /* 97 * nextf[i] is the pointer to the next free block of size 2^(i+3). The 98 * smallest allocatable block is 8 bytes. The overhead information 99 * precedes the data area returned to the user. 100 */ 101 #define NBUCKETS 30 102 static union overhead *nextf[NBUCKETS]; 103 104 static int pagesz; /* page size */ 105 static int pagebucket; /* page size bucket */ 106 107 /* 108 * The array of supported page sizes is provided by the user, i.e., the 109 * program that calls this storage allocator. That program must initialize 110 * the array before making its first call to allocate storage. The array 111 * must contain at least one page size. The page sizes must be stored in 112 * increasing order. 113 */ 114 115 void * 116 __crt_malloc(size_t nbytes) 117 { 118 union overhead *op; 119 int bucket; 120 ssize_t n; 121 size_t amt; 122 123 /* 124 * First time malloc is called, setup page size and 125 * align break pointer so all data will be page aligned. 126 */ 127 if (pagesz == 0) { 128 pagesz = n = pagesizes[0]; 129 if (morepages(NPOOLPAGES) == 0) 130 return NULL; 131 op = (union overhead *)(pagepool_start); 132 n = n - sizeof (*op) - ((long)op & (n - 1)); 133 if (n < 0) 134 n += pagesz; 135 if (n) { 136 pagepool_start += n; 137 } 138 bucket = 0; 139 amt = 8; 140 while ((unsigned)pagesz > amt) { 141 amt <<= 1; 142 bucket++; 143 } 144 pagebucket = bucket; 145 } 146 /* 147 * Convert amount of memory requested into closest block size 148 * stored in hash buckets which satisfies request. 149 * Account for space used per block for accounting. 150 */ 151 if (nbytes <= (unsigned long)(n = pagesz - sizeof(*op))) { 152 amt = 8; /* size of first bucket */ 153 bucket = 0; 154 n = -sizeof(*op); 155 } else { 156 amt = pagesz; 157 bucket = pagebucket; 158 } 159 while (nbytes > amt + n) { 160 amt <<= 1; 161 if (amt == 0) 162 return (NULL); 163 bucket++; 164 } 165 /* 166 * If nothing in hash bucket right now, 167 * request more memory from the system. 168 */ 169 if ((op = nextf[bucket]) == NULL) { 170 morecore(bucket); 171 if ((op = nextf[bucket]) == NULL) 172 return (NULL); 173 } 174 /* remove from linked list */ 175 nextf[bucket] = op->ov_next; 176 op->ov_magic = MAGIC; 177 op->ov_index = bucket; 178 return ((char *)(op + 1)); 179 } 180 181 void * 182 __crt_calloc(size_t num, size_t size) 183 { 184 void *ret; 185 186 if (size != 0 && (num * size) / size != num) { 187 /* size_t overflow. */ 188 return (NULL); 189 } 190 191 if ((ret = __crt_malloc(num * size)) != NULL) 192 memset(ret, 0, num * size); 193 194 return (ret); 195 } 196 197 /* 198 * Allocate more memory to the indicated bucket. 199 */ 200 static void 201 morecore(int bucket) 202 { 203 union overhead *op; 204 int sz; /* size of desired block */ 205 int amt; /* amount to allocate */ 206 int nblks; /* how many blocks we get */ 207 208 /* 209 * sbrk_size <= 0 only for big, FLUFFY, requests (about 210 * 2^30 bytes on a VAX, I think) or for a negative arg. 211 */ 212 if ((unsigned)bucket >= NBBY * sizeof(int) - 4) 213 return; 214 sz = 1 << (bucket + 3); 215 if (sz < pagesz) { 216 amt = pagesz; 217 nblks = amt / sz; 218 } else { 219 amt = sz + pagesz; 220 nblks = 1; 221 } 222 if (amt > pagepool_end - pagepool_start) 223 if (morepages(amt/pagesz + NPOOLPAGES) == 0) 224 return; 225 op = (union overhead *)pagepool_start; 226 pagepool_start += amt; 227 228 /* 229 * Add new memory allocated to that on 230 * free list for this hash bucket. 231 */ 232 nextf[bucket] = op; 233 while (--nblks > 0) { 234 op->ov_next = (union overhead *)((caddr_t)op + sz); 235 op = (union overhead *)((caddr_t)op + sz); 236 } 237 } 238 239 void 240 __crt_free(void *cp) 241 { 242 int size; 243 union overhead *op; 244 245 if (cp == NULL) 246 return; 247 op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); 248 if (op->ov_magic != MAGIC) 249 return; /* sanity */ 250 size = op->ov_index; 251 op->ov_next = nextf[size]; /* also clobbers ov_magic */ 252 nextf[size] = op; 253 } 254 255 /* 256 * When a program attempts "storage compaction" as mentioned in the 257 * old malloc man page, it realloc's an already freed block. Usually 258 * this is the last block it freed; occasionally it might be farther 259 * back. We have to search all the free lists for the block in order 260 * to determine its bucket: 1st we make one pass through the lists 261 * checking only the first block in each; if that fails we search 262 * ``realloc_srchlen'' blocks in each list for a match (the variable 263 * is extern so the caller can modify it). If that fails we just copy 264 * however many bytes was given to realloc() and hope it's not huge. 265 */ 266 static int realloc_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */ 267 268 void * 269 __crt_realloc(void *cp, size_t nbytes) 270 { 271 u_int onb; 272 int i; 273 union overhead *op; 274 char *res; 275 int was_alloced = 0; 276 277 if (cp == NULL) 278 return (__crt_malloc(nbytes)); 279 op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); 280 if (op->ov_magic == MAGIC) { 281 was_alloced++; 282 i = op->ov_index; 283 } else { 284 /* 285 * Already free, doing "compaction". 286 * 287 * Search for the old block of memory on the 288 * free list. First, check the most common 289 * case (last element free'd), then (this failing) 290 * the last ``realloc_srchlen'' items free'd. 291 * If all lookups fail, then assume the size of 292 * the memory block being realloc'd is the 293 * largest possible (so that all "nbytes" of new 294 * memory are copied into). Note that this could cause 295 * a memory fault if the old area was tiny, and the moon 296 * is gibbous. However, that is very unlikely. 297 */ 298 if ((i = findbucket(op, 1)) < 0 && 299 (i = findbucket(op, realloc_srchlen)) < 0) 300 i = NBUCKETS; 301 } 302 onb = 1 << (i + 3); 303 if (onb < (u_int)pagesz) 304 onb -= sizeof(*op); 305 else 306 onb += pagesz - sizeof(*op); 307 /* avoid the copy if same size block */ 308 if (was_alloced) { 309 if (i) { 310 i = 1 << (i + 2); 311 if (i < pagesz) 312 i -= sizeof(*op); 313 else 314 i += pagesz - sizeof(*op); 315 } 316 if (nbytes <= onb && nbytes > (size_t)i) 317 return (cp); 318 __crt_free(cp); 319 } 320 if ((res = __crt_malloc(nbytes)) == NULL) 321 return (NULL); 322 if (cp != res) /* common optimization if "compacting" */ 323 bcopy(cp, res, (nbytes < onb) ? nbytes : onb); 324 return (res); 325 } 326 327 /* 328 * Search ``srchlen'' elements of each free list for a block whose 329 * header starts at ``freep''. If srchlen is -1 search the whole list. 330 * Return bucket number, or -1 if not found. 331 */ 332 static int 333 findbucket(union overhead *freep, int srchlen) 334 { 335 union overhead *p; 336 int i, j; 337 338 for (i = 0; i < NBUCKETS; i++) { 339 j = 0; 340 for (p = nextf[i]; p && j != srchlen; p = p->ov_next) { 341 if (p == freep) 342 return (i); 343 j++; 344 } 345 } 346 return (-1); 347 } 348 349 static int 350 morepages(int n) 351 { 352 int fd = -1; 353 int offset; 354 355 if (pagepool_end - pagepool_start > pagesz) { 356 caddr_t addr = (caddr_t) 357 (((long)pagepool_start + pagesz - 1) & ~(pagesz - 1)); 358 if (munmap(addr, pagepool_end - addr) != 0) { 359 #ifdef IN_RTLD 360 rtld_fdprintf(STDERR_FILENO, _BASENAME_RTLD ": " 361 "morepages: cannot munmap %p: %s\n", 362 addr, rtld_strerror(errno)); 363 #endif 364 } 365 } 366 367 offset = (long)pagepool_start - ((long)pagepool_start & ~(pagesz - 1)); 368 369 if ((pagepool_start = mmap(0, n * pagesz, 370 PROT_READ|PROT_WRITE, 371 MAP_ANON|MAP_PRIVATE, fd, 0)) == (caddr_t)-1) { 372 #ifdef IN_RTLD 373 rtld_fdprintf(STDERR_FILENO, _BASENAME_RTLD ": morepages: " 374 "cannot mmap anonymous memory: %s\n", 375 rtld_strerror(errno)); 376 #endif 377 return 0; 378 } 379 pagepool_end = pagepool_start + n * pagesz; 380 pagepool_start += offset; 381 382 return n; 383 } 384