1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1983 Regents of the University of California. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #if defined(LIBC_SCCS) && !defined(lint) 33 /*static char *sccsid = "from: @(#)malloc.c 5.11 (Berkeley) 2/23/91";*/ 34 static char *rcsid = "$FreeBSD$"; 35 #endif /* LIBC_SCCS and not lint */ 36 37 /* 38 * malloc.c (Caltech) 2/21/82 39 * Chris Kingsley, kingsley@cit-20. 40 * 41 * This is a very fast storage allocator. It allocates blocks of a small 42 * number of different sizes, and keeps free lists of each size. Blocks that 43 * don't exactly fit are passed up to the next larger size. In this 44 * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long. 45 * This is designed for use in a virtual memory environment. 46 */ 47 48 #include <sys/param.h> 49 #include <sys/sysctl.h> 50 #include <sys/mman.h> 51 #include <errno.h> 52 #include <stdarg.h> 53 #include <stddef.h> 54 #include <string.h> 55 #include <unistd.h> 56 #include "rtld.h" 57 #include "rtld_printf.h" 58 #include "paths.h" 59 60 /* 61 * Pre-allocate mmap'ed pages 62 */ 63 #define NPOOLPAGES (128*1024/pagesz) 64 static caddr_t pagepool_start, pagepool_end; 65 66 /* 67 * The overhead on a block is at least 4 bytes. When free, this space 68 * contains a pointer to the next free block, and the bottom two bits must 69 * be zero. When in use, the first byte is set to MAGIC, and the second 70 * byte is the size index. The remaining bytes are for alignment. 71 * If range checking is enabled then a second word holds the size of the 72 * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC). 73 * The order of elements is critical: ov_magic must overlay the low order 74 * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern. 75 */ 76 union overhead { 77 union overhead *ov_next; /* when free */ 78 struct { 79 u_char ovu_magic; /* magic number */ 80 u_char ovu_index; /* bucket # */ 81 } ovu; 82 #define ov_magic ovu.ovu_magic 83 #define ov_index ovu.ovu_index 84 }; 85 86 static void morecore(int bucket); 87 static int morepages(int n); 88 static int findbucket(union overhead *freep, int srchlen); 89 90 #define MAGIC 0xef /* magic # on accounting info */ 91 92 /* 93 * nextf[i] is the pointer to the next free block of size 2^(i+3). The 94 * smallest allocatable block is 8 bytes. The overhead information 95 * precedes the data area returned to the user. 96 */ 97 #define NBUCKETS 30 98 static union overhead *nextf[NBUCKETS]; 99 100 static int pagesz; /* page size */ 101 static int pagebucket; /* page size bucket */ 102 103 /* 104 * The array of supported page sizes is provided by the user, i.e., the 105 * program that calls this storage allocator. That program must initialize 106 * the array before making its first call to allocate storage. The array 107 * must contain at least one page size. The page sizes must be stored in 108 * increasing order. 109 */ 110 111 void * 112 __crt_malloc(size_t nbytes) 113 { 114 union overhead *op; 115 int bucket; 116 ssize_t n; 117 size_t amt; 118 119 /* 120 * First time malloc is called, setup page size and 121 * align break pointer so all data will be page aligned. 122 */ 123 if (pagesz == 0) { 124 pagesz = n = pagesizes[0]; 125 if (morepages(NPOOLPAGES) == 0) 126 return NULL; 127 op = (union overhead *)(pagepool_start); 128 n = n - sizeof (*op) - ((long)op & (n - 1)); 129 if (n < 0) 130 n += pagesz; 131 if (n) { 132 pagepool_start += n; 133 } 134 bucket = 0; 135 amt = 8; 136 while ((unsigned)pagesz > amt) { 137 amt <<= 1; 138 bucket++; 139 } 140 pagebucket = bucket; 141 } 142 /* 143 * Convert amount of memory requested into closest block size 144 * stored in hash buckets which satisfies request. 145 * Account for space used per block for accounting. 146 */ 147 if (nbytes <= (unsigned long)(n = pagesz - sizeof(*op))) { 148 amt = 8; /* size of first bucket */ 149 bucket = 0; 150 n = -sizeof(*op); 151 } else { 152 amt = pagesz; 153 bucket = pagebucket; 154 } 155 while (nbytes > amt + n) { 156 amt <<= 1; 157 if (amt == 0) 158 return (NULL); 159 bucket++; 160 } 161 /* 162 * If nothing in hash bucket right now, 163 * request more memory from the system. 164 */ 165 if ((op = nextf[bucket]) == NULL) { 166 morecore(bucket); 167 if ((op = nextf[bucket]) == NULL) 168 return (NULL); 169 } 170 /* remove from linked list */ 171 nextf[bucket] = op->ov_next; 172 op->ov_magic = MAGIC; 173 op->ov_index = bucket; 174 return ((char *)(op + 1)); 175 } 176 177 void * 178 __crt_calloc(size_t num, size_t size) 179 { 180 void *ret; 181 182 if (size != 0 && (num * size) / size != num) { 183 /* size_t overflow. */ 184 return (NULL); 185 } 186 187 if ((ret = __crt_malloc(num * size)) != NULL) 188 memset(ret, 0, num * size); 189 190 return (ret); 191 } 192 193 /* 194 * Allocate more memory to the indicated bucket. 195 */ 196 static void 197 morecore(int bucket) 198 { 199 union overhead *op; 200 int sz; /* size of desired block */ 201 int amt; /* amount to allocate */ 202 int nblks; /* how many blocks we get */ 203 204 /* 205 * sbrk_size <= 0 only for big, FLUFFY, requests (about 206 * 2^30 bytes on a VAX, I think) or for a negative arg. 207 */ 208 if ((unsigned)bucket >= NBBY * sizeof(int) - 4) 209 return; 210 sz = 1 << (bucket + 3); 211 if (sz < pagesz) { 212 amt = pagesz; 213 nblks = amt / sz; 214 } else { 215 amt = sz + pagesz; 216 nblks = 1; 217 } 218 if (amt > pagepool_end - pagepool_start) 219 if (morepages(amt/pagesz + NPOOLPAGES) == 0) 220 return; 221 op = (union overhead *)pagepool_start; 222 pagepool_start += amt; 223 224 /* 225 * Add new memory allocated to that on 226 * free list for this hash bucket. 227 */ 228 nextf[bucket] = op; 229 while (--nblks > 0) { 230 op->ov_next = (union overhead *)((caddr_t)op + sz); 231 op = (union overhead *)((caddr_t)op + sz); 232 } 233 } 234 235 void 236 __crt_free(void *cp) 237 { 238 int size; 239 union overhead *op; 240 241 if (cp == NULL) 242 return; 243 op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); 244 if (op->ov_magic != MAGIC) 245 return; /* sanity */ 246 size = op->ov_index; 247 op->ov_next = nextf[size]; /* also clobbers ov_magic */ 248 nextf[size] = op; 249 } 250 251 /* 252 * When a program attempts "storage compaction" as mentioned in the 253 * old malloc man page, it realloc's an already freed block. Usually 254 * this is the last block it freed; occasionally it might be farther 255 * back. We have to search all the free lists for the block in order 256 * to determine its bucket: 1st we make one pass through the lists 257 * checking only the first block in each; if that fails we search 258 * ``realloc_srchlen'' blocks in each list for a match (the variable 259 * is extern so the caller can modify it). If that fails we just copy 260 * however many bytes was given to realloc() and hope it's not huge. 261 */ 262 static int realloc_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */ 263 264 void * 265 __crt_realloc(void *cp, size_t nbytes) 266 { 267 u_int onb; 268 int i; 269 union overhead *op; 270 char *res; 271 int was_alloced = 0; 272 273 if (cp == NULL) 274 return (__crt_malloc(nbytes)); 275 op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); 276 if (op->ov_magic == MAGIC) { 277 was_alloced++; 278 i = op->ov_index; 279 } else { 280 /* 281 * Already free, doing "compaction". 282 * 283 * Search for the old block of memory on the 284 * free list. First, check the most common 285 * case (last element free'd), then (this failing) 286 * the last ``realloc_srchlen'' items free'd. 287 * If all lookups fail, then assume the size of 288 * the memory block being realloc'd is the 289 * largest possible (so that all "nbytes" of new 290 * memory are copied into). Note that this could cause 291 * a memory fault if the old area was tiny, and the moon 292 * is gibbous. However, that is very unlikely. 293 */ 294 if ((i = findbucket(op, 1)) < 0 && 295 (i = findbucket(op, realloc_srchlen)) < 0) 296 i = NBUCKETS; 297 } 298 onb = 1 << (i + 3); 299 if (onb < (u_int)pagesz) 300 onb -= sizeof(*op); 301 else 302 onb += pagesz - sizeof(*op); 303 /* avoid the copy if same size block */ 304 if (was_alloced) { 305 if (i) { 306 i = 1 << (i + 2); 307 if (i < pagesz) 308 i -= sizeof(*op); 309 else 310 i += pagesz - sizeof(*op); 311 } 312 if (nbytes <= onb && nbytes > (size_t)i) 313 return (cp); 314 __crt_free(cp); 315 } 316 if ((res = __crt_malloc(nbytes)) == NULL) 317 return (NULL); 318 if (cp != res) /* common optimization if "compacting" */ 319 bcopy(cp, res, (nbytes < onb) ? nbytes : onb); 320 return (res); 321 } 322 323 /* 324 * Search ``srchlen'' elements of each free list for a block whose 325 * header starts at ``freep''. If srchlen is -1 search the whole list. 326 * Return bucket number, or -1 if not found. 327 */ 328 static int 329 findbucket(union overhead *freep, int srchlen) 330 { 331 union overhead *p; 332 int i, j; 333 334 for (i = 0; i < NBUCKETS; i++) { 335 j = 0; 336 for (p = nextf[i]; p && j != srchlen; p = p->ov_next) { 337 if (p == freep) 338 return (i); 339 j++; 340 } 341 } 342 return (-1); 343 } 344 345 static int 346 morepages(int n) 347 { 348 caddr_t addr; 349 int offset; 350 351 if (pagepool_end - pagepool_start > pagesz) { 352 addr = (caddr_t)roundup2((long)pagepool_start, pagesz); 353 if (munmap(addr, pagepool_end - addr) != 0) { 354 #ifdef IN_RTLD 355 rtld_fdprintf(STDERR_FILENO, _BASENAME_RTLD ": " 356 "morepages: cannot munmap %p: %s\n", 357 addr, rtld_strerror(errno)); 358 #endif 359 } 360 } 361 362 offset = (long)pagepool_start - rounddown2((long)pagepool_start, 363 pagesz); 364 365 pagepool_start = mmap(0, n * pagesz, PROT_READ | PROT_WRITE, 366 MAP_ANON | MAP_PRIVATE, -1, 0); 367 if (pagepool_start == MAP_FAILED) { 368 #ifdef IN_RTLD 369 rtld_fdprintf(STDERR_FILENO, _BASENAME_RTLD ": morepages: " 370 "cannot mmap anonymous memory: %s\n", 371 rtld_strerror(errno)); 372 #endif 373 return (0); 374 } 375 pagepool_end = pagepool_start + n * pagesz; 376 pagepool_start += offset; 377 378 return (n); 379 } 380