1 /* 2 * util/alloc.c - memory allocation service. 3 * 4 * Copyright (c) 2007, NLnet Labs. All rights reserved. 5 * 6 * This software is open source. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * Redistributions of source code must retain the above copyright notice, 13 * this list of conditions and the following disclaimer. 14 * 15 * Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * Neither the name of the NLNET LABS nor the names of its contributors may 20 * be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /** 37 * \file 38 * 39 * This file contains memory allocation functions. 40 */ 41 42 #include "config.h" 43 #include "util/alloc.h" 44 #include "util/regional.h" 45 #include "util/data/packed_rrset.h" 46 #include "util/fptr_wlist.h" 47 48 /** custom size of cached regional blocks */ 49 #define ALLOC_REG_SIZE 16384 50 /** number of bits for ID part of uint64, rest for number of threads. */ 51 #define THRNUM_SHIFT 48 /* for 65k threads, 2^48 rrsets per thr. */ 52 53 /** setup new special type */ 54 static void 55 alloc_setup_special(alloc_special_t* t) 56 { 57 memset(t, 0, sizeof(*t)); 58 lock_rw_init(&t->entry.lock); 59 t->entry.key = t; 60 } 61 62 /** prealloc some entries in the cache. To minimize contention. 63 * Result is 1 lock per alloc_max newly created entries. 64 * @param alloc: the structure to fill up. 65 */ 66 static void 67 prealloc(struct alloc_cache* alloc) 68 { 69 alloc_special_t* p; 70 int i; 71 for(i=0; i<ALLOC_SPECIAL_MAX; i++) { 72 if(!(p = (alloc_special_t*)malloc(sizeof(alloc_special_t)))) { 73 log_err("prealloc: out of memory"); 74 return; 75 } 76 alloc_setup_special(p); 77 alloc_set_special_next(p, alloc->quar); 78 alloc->quar = p; 79 alloc->num_quar++; 80 } 81 } 82 83 /** prealloc region blocks */ 84 static void 85 prealloc_blocks(struct alloc_cache* alloc, size_t num) 86 { 87 size_t i; 88 struct regional* r; 89 for(i=0; i<num; i++) { 90 r = regional_create_custom(ALLOC_REG_SIZE); 91 if(!r) { 92 log_err("prealloc blocks: out of memory"); 93 return; 94 } 95 r->next = (char*)alloc->reg_list; 96 alloc->reg_list = r; 97 alloc->num_reg_blocks ++; 98 } 99 } 100 101 void 102 alloc_init(struct alloc_cache* alloc, struct alloc_cache* super, 103 int thread_num) 104 { 105 memset(alloc, 0, sizeof(*alloc)); 106 alloc->super = super; 107 alloc->thread_num = thread_num; 108 alloc->next_id = (uint64_t)thread_num; /* in steps, so that type */ 109 alloc->next_id <<= THRNUM_SHIFT; /* of *_id is used. */ 110 alloc->last_id = 1; /* so no 64bit constants, */ 111 alloc->last_id <<= THRNUM_SHIFT; /* or implicit 'int' ops. */ 112 alloc->last_id -= 1; /* for compiler portability. */ 113 alloc->last_id |= alloc->next_id; 114 alloc->next_id += 1; /* because id=0 is special. */ 115 alloc->max_reg_blocks = 100; 116 alloc->num_reg_blocks = 0; 117 alloc->reg_list = NULL; 118 alloc->cleanup = NULL; 119 alloc->cleanup_arg = NULL; 120 if(alloc->super) 121 prealloc_blocks(alloc, alloc->max_reg_blocks); 122 if(!alloc->super) { 123 lock_quick_init(&alloc->lock); 124 lock_protect(&alloc->lock, alloc, sizeof(*alloc)); 125 } 126 } 127 128 void 129 alloc_clear(struct alloc_cache* alloc) 130 { 131 alloc_special_t* p, *np; 132 struct regional* r, *nr; 133 if(!alloc) 134 return; 135 if(!alloc->super) { 136 lock_quick_destroy(&alloc->lock); 137 } 138 if(alloc->super && alloc->quar) { 139 /* push entire list into super */ 140 p = alloc->quar; 141 while(alloc_special_next(p)) /* find last */ 142 p = alloc_special_next(p); 143 lock_quick_lock(&alloc->super->lock); 144 alloc_set_special_next(p, alloc->super->quar); 145 alloc->super->quar = alloc->quar; 146 alloc->super->num_quar += alloc->num_quar; 147 lock_quick_unlock(&alloc->super->lock); 148 } else { 149 /* free */ 150 p = alloc->quar; 151 while(p) { 152 np = alloc_special_next(p); 153 /* deinit special type */ 154 lock_rw_destroy(&p->entry.lock); 155 free(p); 156 p = np; 157 } 158 } 159 alloc->quar = 0; 160 alloc->num_quar = 0; 161 r = alloc->reg_list; 162 while(r) { 163 nr = (struct regional*)r->next; 164 free(r); 165 r = nr; 166 } 167 alloc->reg_list = NULL; 168 alloc->num_reg_blocks = 0; 169 } 170 171 uint64_t 172 alloc_get_id(struct alloc_cache* alloc) 173 { 174 uint64_t id = alloc->next_id++; 175 if(id == alloc->last_id) { 176 log_warn("rrset alloc: out of 64bit ids. Clearing cache."); 177 fptr_ok(fptr_whitelist_alloc_cleanup(alloc->cleanup)); 178 (*alloc->cleanup)(alloc->cleanup_arg); 179 180 /* start back at first number */ /* like in alloc_init*/ 181 alloc->next_id = (uint64_t)alloc->thread_num; 182 alloc->next_id <<= THRNUM_SHIFT; /* in steps for comp. */ 183 alloc->next_id += 1; /* portability. */ 184 /* and generate new and safe id */ 185 id = alloc->next_id++; 186 } 187 return id; 188 } 189 190 alloc_special_t* 191 alloc_special_obtain(struct alloc_cache* alloc) 192 { 193 alloc_special_t* p; 194 log_assert(alloc); 195 /* see if in local cache */ 196 if(alloc->quar) { 197 p = alloc->quar; 198 alloc->quar = alloc_special_next(p); 199 alloc->num_quar--; 200 p->id = alloc_get_id(alloc); 201 return p; 202 } 203 /* see if in global cache */ 204 if(alloc->super) { 205 /* could maybe grab alloc_max/2 entries in one go, 206 * but really, isn't that just as fast as this code? */ 207 lock_quick_lock(&alloc->super->lock); 208 if((p = alloc->super->quar)) { 209 alloc->super->quar = alloc_special_next(p); 210 alloc->super->num_quar--; 211 } 212 lock_quick_unlock(&alloc->super->lock); 213 if(p) { 214 p->id = alloc_get_id(alloc); 215 return p; 216 } 217 } 218 /* allocate new */ 219 prealloc(alloc); 220 if(!(p = (alloc_special_t*)malloc(sizeof(alloc_special_t)))) { 221 log_err("alloc_special_obtain: out of memory"); 222 return NULL; 223 } 224 alloc_setup_special(p); 225 p->id = alloc_get_id(alloc); 226 return p; 227 } 228 229 /** push mem and some more items to the super */ 230 static void 231 pushintosuper(struct alloc_cache* alloc, alloc_special_t* mem) 232 { 233 int i; 234 alloc_special_t *p = alloc->quar; 235 log_assert(p); 236 log_assert(alloc && alloc->super && 237 alloc->num_quar >= ALLOC_SPECIAL_MAX); 238 /* push ALLOC_SPECIAL_MAX/2 after mem */ 239 alloc_set_special_next(mem, alloc->quar); 240 for(i=1; i<ALLOC_SPECIAL_MAX/2; i++) { 241 p = alloc_special_next(p); 242 } 243 alloc->quar = alloc_special_next(p); 244 alloc->num_quar -= ALLOC_SPECIAL_MAX/2; 245 246 /* dump mem+list into the super quar list */ 247 lock_quick_lock(&alloc->super->lock); 248 alloc_set_special_next(p, alloc->super->quar); 249 alloc->super->quar = mem; 250 alloc->super->num_quar += ALLOC_SPECIAL_MAX/2 + 1; 251 lock_quick_unlock(&alloc->super->lock); 252 /* so 1 lock per mem+alloc/2 deletes */ 253 } 254 255 void 256 alloc_special_release(struct alloc_cache* alloc, alloc_special_t* mem) 257 { 258 log_assert(alloc); 259 if(!mem) 260 return; 261 if(!alloc->super) { 262 lock_quick_lock(&alloc->lock); /* superalloc needs locking */ 263 } 264 265 alloc_special_clean(mem); 266 if(alloc->super && alloc->num_quar >= ALLOC_SPECIAL_MAX) { 267 /* push it to the super structure */ 268 pushintosuper(alloc, mem); 269 return; 270 } 271 272 alloc_set_special_next(mem, alloc->quar); 273 alloc->quar = mem; 274 alloc->num_quar++; 275 if(!alloc->super) { 276 lock_quick_unlock(&alloc->lock); 277 } 278 } 279 280 void 281 alloc_stats(struct alloc_cache* alloc) 282 { 283 log_info("%salloc: %d in cache, %d blocks.", alloc->super?"":"sup", 284 (int)alloc->num_quar, (int)alloc->num_reg_blocks); 285 } 286 287 size_t alloc_get_mem(struct alloc_cache* alloc) 288 { 289 alloc_special_t* p; 290 size_t s = sizeof(*alloc); 291 if(!alloc->super) { 292 lock_quick_lock(&alloc->lock); /* superalloc needs locking */ 293 } 294 s += sizeof(alloc_special_t) * alloc->num_quar; 295 for(p = alloc->quar; p; p = alloc_special_next(p)) { 296 s += lock_get_mem(&p->entry.lock); 297 } 298 s += alloc->num_reg_blocks * ALLOC_REG_SIZE; 299 if(!alloc->super) { 300 lock_quick_unlock(&alloc->lock); 301 } 302 return s; 303 } 304 305 struct regional* 306 alloc_reg_obtain(struct alloc_cache* alloc) 307 { 308 if(alloc->num_reg_blocks > 0) { 309 struct regional* r = alloc->reg_list; 310 alloc->reg_list = (struct regional*)r->next; 311 r->next = NULL; 312 alloc->num_reg_blocks--; 313 return r; 314 } 315 return regional_create_custom(ALLOC_REG_SIZE); 316 } 317 318 void 319 alloc_reg_release(struct alloc_cache* alloc, struct regional* r) 320 { 321 if(alloc->num_reg_blocks >= alloc->max_reg_blocks) { 322 regional_destroy(r); 323 return; 324 } 325 if(!r) return; 326 regional_free_all(r); 327 log_assert(r->next == NULL); 328 r->next = (char*)alloc->reg_list; 329 alloc->reg_list = r; 330 alloc->num_reg_blocks++; 331 } 332 333 void 334 alloc_set_id_cleanup(struct alloc_cache* alloc, void (*cleanup)(void*), 335 void* arg) 336 { 337 alloc->cleanup = cleanup; 338 alloc->cleanup_arg = arg; 339 } 340 341 /** global debug value to keep track of total memory mallocs */ 342 size_t unbound_mem_alloc = 0; 343 /** global debug value to keep track of total memory frees */ 344 size_t unbound_mem_freed = 0; 345 #ifdef UNBOUND_ALLOC_STATS 346 /** special value to know if the memory is being tracked */ 347 uint64_t mem_special = (uint64_t)0xfeed43327766abcdLL; 348 #ifdef malloc 349 #undef malloc 350 #endif 351 /** malloc with stats */ 352 void *unbound_stat_malloc(size_t size) 353 { 354 void* res; 355 if(size == 0) size = 1; 356 res = malloc(size+16); 357 if(!res) return NULL; 358 unbound_mem_alloc += size; 359 log_info("stat %p=malloc(%u)", res+16, (unsigned)size); 360 memcpy(res, &size, sizeof(size)); 361 memcpy(res+8, &mem_special, sizeof(mem_special)); 362 return res+16; 363 } 364 #ifdef calloc 365 #undef calloc 366 #endif 367 #ifndef INT_MAX 368 #define INT_MAX (((int)-1)>>1) 369 #endif 370 /** calloc with stats */ 371 void *unbound_stat_calloc(size_t nmemb, size_t size) 372 { 373 size_t s; 374 void* res; 375 if(nmemb != 0 && INT_MAX/nmemb < size) 376 return NULL; /* integer overflow check */ 377 s = (nmemb*size==0)?(size_t)1:nmemb*size; 378 res = calloc(1, s+16); 379 if(!res) return NULL; 380 log_info("stat %p=calloc(%u, %u)", res+16, (unsigned)nmemb, (unsigned)size); 381 unbound_mem_alloc += s; 382 memcpy(res, &s, sizeof(s)); 383 memcpy(res+8, &mem_special, sizeof(mem_special)); 384 return res+16; 385 } 386 #ifdef free 387 #undef free 388 #endif 389 /** free with stats */ 390 void unbound_stat_free(void *ptr) 391 { 392 size_t s; 393 if(!ptr) return; 394 if(memcmp(ptr-8, &mem_special, sizeof(mem_special)) != 0) { 395 free(ptr); 396 return; 397 } 398 ptr-=16; 399 memcpy(&s, ptr, sizeof(s)); 400 log_info("stat free(%p) size %u", ptr+16, (unsigned)s); 401 memset(ptr+8, 0, 8); 402 unbound_mem_freed += s; 403 free(ptr); 404 } 405 #ifdef realloc 406 #undef realloc 407 #endif 408 /** realloc with stats */ 409 void *unbound_stat_realloc(void *ptr, size_t size) 410 { 411 size_t cursz; 412 void* res; 413 if(!ptr) return unbound_stat_malloc(size); 414 if(memcmp(ptr-8, &mem_special, sizeof(mem_special)) != 0) { 415 return realloc(ptr, size); 416 } 417 if(size==0) { 418 unbound_stat_free(ptr); 419 return NULL; 420 } 421 ptr -= 16; 422 memcpy(&cursz, ptr, sizeof(cursz)); 423 if(cursz == size) { 424 /* nothing changes */ 425 return ptr; 426 } 427 res = malloc(size+16); 428 if(!res) return NULL; 429 unbound_mem_alloc += size; 430 unbound_mem_freed += cursz; 431 log_info("stat realloc(%p, %u) from %u", ptr+16, (unsigned)size, (unsigned)cursz); 432 if(cursz > size) { 433 memcpy(res+16, ptr+16, size); 434 } else if(size > cursz) { 435 memcpy(res+16, ptr+16, cursz); 436 } 437 memset(ptr+8, 0, 8); 438 free(ptr); 439 memcpy(res, &size, sizeof(size)); 440 memcpy(res+8, &mem_special, sizeof(mem_special)); 441 return res+16; 442 } 443 444 /** log to file where alloc was done */ 445 void *unbound_stat_malloc_log(size_t size, const char* file, int line, 446 const char* func) 447 { 448 log_info("%s:%d %s malloc(%u)", file, line, func, (unsigned)size); 449 return unbound_stat_malloc(size); 450 } 451 452 /** log to file where alloc was done */ 453 void *unbound_stat_calloc_log(size_t nmemb, size_t size, const char* file, 454 int line, const char* func) 455 { 456 log_info("%s:%d %s calloc(%u, %u)", file, line, func, 457 (unsigned) nmemb, (unsigned)size); 458 return unbound_stat_calloc(nmemb, size); 459 } 460 461 /** log to file where free was done */ 462 void unbound_stat_free_log(void *ptr, const char* file, int line, 463 const char* func) 464 { 465 if(ptr && memcmp(ptr-8, &mem_special, sizeof(mem_special)) == 0) { 466 size_t s; 467 memcpy(&s, ptr-16, sizeof(s)); 468 log_info("%s:%d %s free(%p) size %u", 469 file, line, func, ptr, (unsigned)s); 470 } else 471 log_info("%s:%d %s unmatched free(%p)", file, line, func, ptr); 472 unbound_stat_free(ptr); 473 } 474 475 /** log to file where alloc was done */ 476 void *unbound_stat_realloc_log(void *ptr, size_t size, const char* file, 477 int line, const char* func) 478 { 479 log_info("%s:%d %s realloc(%p, %u)", file, line, func, 480 ptr, (unsigned)size); 481 return unbound_stat_realloc(ptr, size); 482 } 483 484 #endif /* UNBOUND_ALLOC_STATS */ 485 #ifdef UNBOUND_ALLOC_LITE 486 #undef malloc 487 #undef calloc 488 #undef free 489 #undef realloc 490 /** length of prefix and suffix */ 491 static size_t lite_pad = 16; 492 /** prefix value to check */ 493 static char* lite_pre = "checkfront123456"; 494 /** suffix value to check */ 495 static char* lite_post= "checkafter123456"; 496 497 void *unbound_stat_malloc_lite(size_t size, const char* file, int line, 498 const char* func) 499 { 500 /* [prefix .. len .. actual data .. suffix] */ 501 void* res = malloc(size+lite_pad*2+sizeof(size_t)); 502 if(!res) return NULL; 503 memmove(res, lite_pre, lite_pad); 504 memmove(res+lite_pad, &size, sizeof(size_t)); 505 memset(res+lite_pad+sizeof(size_t), 0x1a, size); /* init the memory */ 506 memmove(res+lite_pad+size+sizeof(size_t), lite_post, lite_pad); 507 return res+lite_pad+sizeof(size_t); 508 } 509 510 void *unbound_stat_calloc_lite(size_t nmemb, size_t size, const char* file, 511 int line, const char* func) 512 { 513 size_t req; 514 void* res; 515 if(nmemb != 0 && INT_MAX/nmemb < size) 516 return NULL; /* integer overflow check */ 517 req = nmemb * size; 518 res = malloc(req+lite_pad*2+sizeof(size_t)); 519 if(!res) return NULL; 520 memmove(res, lite_pre, lite_pad); 521 memmove(res+lite_pad, &req, sizeof(size_t)); 522 memset(res+lite_pad+sizeof(size_t), 0, req); 523 memmove(res+lite_pad+req+sizeof(size_t), lite_post, lite_pad); 524 return res+lite_pad+sizeof(size_t); 525 } 526 527 void unbound_stat_free_lite(void *ptr, const char* file, int line, 528 const char* func) 529 { 530 void* real; 531 size_t orig = 0; 532 if(!ptr) return; 533 real = ptr-lite_pad-sizeof(size_t); 534 if(memcmp(real, lite_pre, lite_pad) != 0) { 535 log_err("free(): prefix failed %s:%d %s", file, line, func); 536 log_hex("prefix here", real, lite_pad); 537 log_hex(" should be", lite_pre, lite_pad); 538 fatal_exit("alloc assertion failed"); 539 } 540 memmove(&orig, real+lite_pad, sizeof(size_t)); 541 if(memcmp(real+lite_pad+orig+sizeof(size_t), lite_post, lite_pad)!=0){ 542 log_err("free(): suffix failed %s:%d %s", file, line, func); 543 log_err("alloc size is %d", (int)orig); 544 log_hex("suffix here", real+lite_pad+orig+sizeof(size_t), 545 lite_pad); 546 log_hex(" should be", lite_post, lite_pad); 547 fatal_exit("alloc assertion failed"); 548 } 549 memset(real, 0xdd, orig+lite_pad*2+sizeof(size_t)); /* mark it */ 550 free(real); 551 } 552 553 void *unbound_stat_realloc_lite(void *ptr, size_t size, const char* file, 554 int line, const char* func) 555 { 556 /* always free and realloc (no growing) */ 557 void* real, *newa; 558 size_t orig = 0; 559 if(!ptr) { 560 /* like malloc() */ 561 return unbound_stat_malloc_lite(size, file, line, func); 562 } 563 if(!size) { 564 /* like free() */ 565 unbound_stat_free_lite(ptr, file, line, func); 566 return NULL; 567 } 568 /* change allocation size and copy */ 569 real = ptr-lite_pad-sizeof(size_t); 570 if(memcmp(real, lite_pre, lite_pad) != 0) { 571 log_err("realloc(): prefix failed %s:%d %s", file, line, func); 572 log_hex("prefix here", real, lite_pad); 573 log_hex(" should be", lite_pre, lite_pad); 574 fatal_exit("alloc assertion failed"); 575 } 576 memmove(&orig, real+lite_pad, sizeof(size_t)); 577 if(memcmp(real+lite_pad+orig+sizeof(size_t), lite_post, lite_pad)!=0){ 578 log_err("realloc(): suffix failed %s:%d %s", file, line, func); 579 log_err("alloc size is %d", (int)orig); 580 log_hex("suffix here", real+lite_pad+orig+sizeof(size_t), 581 lite_pad); 582 log_hex(" should be", lite_post, lite_pad); 583 fatal_exit("alloc assertion failed"); 584 } 585 /* new alloc and copy over */ 586 newa = unbound_stat_malloc_lite(size, file, line, func); 587 if(!newa) 588 return NULL; 589 if(orig < size) 590 memmove(newa, ptr, orig); 591 else memmove(newa, ptr, size); 592 memset(real, 0xdd, orig+lite_pad*2+sizeof(size_t)); /* mark it */ 593 free(real); 594 return newa; 595 } 596 597 char* unbound_strdup_lite(const char* s, const char* file, int line, 598 const char* func) 599 { 600 /* this routine is made to make sure strdup() uses the malloc_lite */ 601 size_t l = strlen(s)+1; 602 char* n = (char*)unbound_stat_malloc_lite(l, file, line, func); 603 if(!n) return NULL; 604 memmove(n, s, l); 605 return n; 606 } 607 608 char* unbound_lite_wrapstr(char* s) 609 { 610 char* n = unbound_strdup_lite(s, __FILE__, __LINE__, __func__); 611 free(s); 612 return n; 613 } 614 615 #undef sldns_pkt2wire 616 sldns_status unbound_lite_pkt2wire(uint8_t **dest, const sldns_pkt *p, 617 size_t *size) 618 { 619 uint8_t* md = NULL; 620 size_t ms = 0; 621 sldns_status s = sldns_pkt2wire(&md, p, &ms); 622 if(md) { 623 *dest = unbound_stat_malloc_lite(ms, __FILE__, __LINE__, 624 __func__); 625 *size = ms; 626 if(!*dest) { free(md); return LDNS_STATUS_MEM_ERR; } 627 memcpy(*dest, md, ms); 628 free(md); 629 } else { 630 *dest = NULL; 631 *size = 0; 632 } 633 return s; 634 } 635 636 #undef i2d_DSA_SIG 637 int unbound_lite_i2d_DSA_SIG(DSA_SIG* dsasig, unsigned char** sig) 638 { 639 unsigned char* n = NULL; 640 int r= i2d_DSA_SIG(dsasig, &n); 641 if(n) { 642 *sig = unbound_stat_malloc_lite((size_t)r, __FILE__, __LINE__, 643 __func__); 644 if(!*sig) return -1; 645 memcpy(*sig, n, (size_t)r); 646 free(n); 647 return r; 648 } 649 *sig = NULL; 650 return r; 651 } 652 653 #endif /* UNBOUND_ALLOC_LITE */ 654