1 /* 2 * util/alloc.c - memory allocation service. 3 * 4 * Copyright (c) 2007, NLnet Labs. All rights reserved. 5 * 6 * This software is open source. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * Redistributions of source code must retain the above copyright notice, 13 * this list of conditions and the following disclaimer. 14 * 15 * Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * Neither the name of the NLNET LABS nor the names of its contributors may 20 * be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 25 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 26 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE 27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /** 37 * \file 38 * 39 * This file contains memory allocation functions. 40 */ 41 42 #include "config.h" 43 #include "util/alloc.h" 44 #include "util/regional.h" 45 #include "util/data/packed_rrset.h" 46 #include "util/fptr_wlist.h" 47 48 /** custom size of cached regional blocks */ 49 #define ALLOC_REG_SIZE 16384 50 /** number of bits for ID part of uint64, rest for number of threads. */ 51 #define THRNUM_SHIFT 48 /* for 65k threads, 2^48 rrsets per thr. */ 52 53 /** setup new special type */ 54 static void 55 alloc_setup_special(alloc_special_t* t) 56 { 57 memset(t, 0, sizeof(*t)); 58 lock_rw_init(&t->entry.lock); 59 t->entry.key = t; 60 } 61 62 /** prealloc some entries in the cache. To minimize contention. 63 * Result is 1 lock per alloc_max newly created entries. 64 * @param alloc: the structure to fill up. 65 */ 66 static void 67 prealloc(struct alloc_cache* alloc) 68 { 69 alloc_special_t* p; 70 int i; 71 for(i=0; i<ALLOC_SPECIAL_MAX; i++) { 72 if(!(p = (alloc_special_t*)malloc(sizeof(alloc_special_t)))) { 73 log_err("prealloc: out of memory"); 74 return; 75 } 76 alloc_setup_special(p); 77 alloc_set_special_next(p, alloc->quar); 78 alloc->quar = p; 79 alloc->num_quar++; 80 } 81 } 82 83 /** prealloc region blocks */ 84 static void 85 prealloc_blocks(struct alloc_cache* alloc, size_t num) 86 { 87 size_t i; 88 struct regional* r; 89 for(i=0; i<num; i++) { 90 r = regional_create_custom(ALLOC_REG_SIZE); 91 if(!r) { 92 log_err("prealloc blocks: out of memory"); 93 return; 94 } 95 r->next = (char*)alloc->reg_list; 96 alloc->reg_list = r; 97 alloc->num_reg_blocks ++; 98 } 99 } 100 101 void 102 alloc_init(struct alloc_cache* alloc, struct alloc_cache* super, 103 int thread_num) 104 { 105 memset(alloc, 0, sizeof(*alloc)); 106 alloc->super = super; 107 alloc->thread_num = thread_num; 108 alloc->next_id = (uint64_t)thread_num; /* in steps, so that type */ 109 alloc->next_id <<= THRNUM_SHIFT; /* of *_id is used. */ 110 alloc->last_id = 1; /* so no 64bit constants, */ 111 alloc->last_id <<= THRNUM_SHIFT; /* or implicit 'int' ops. */ 112 alloc->last_id -= 1; /* for compiler portability. */ 113 alloc->last_id |= alloc->next_id; 114 alloc->next_id += 1; /* because id=0 is special. */ 115 alloc->max_reg_blocks = 100; 116 alloc->num_reg_blocks = 0; 117 alloc->reg_list = NULL; 118 alloc->cleanup = NULL; 119 alloc->cleanup_arg = NULL; 120 if(alloc->super) 121 prealloc_blocks(alloc, alloc->max_reg_blocks); 122 if(!alloc->super) { 123 lock_quick_init(&alloc->lock); 124 lock_protect(&alloc->lock, alloc, sizeof(*alloc)); 125 } 126 } 127 128 void 129 alloc_clear(struct alloc_cache* alloc) 130 { 131 alloc_special_t* p, *np; 132 struct regional* r, *nr; 133 if(!alloc) 134 return; 135 if(!alloc->super) { 136 lock_quick_destroy(&alloc->lock); 137 } 138 if(alloc->super && alloc->quar) { 139 /* push entire list into super */ 140 p = alloc->quar; 141 while(alloc_special_next(p)) /* find last */ 142 p = alloc_special_next(p); 143 lock_quick_lock(&alloc->super->lock); 144 alloc_set_special_next(p, alloc->super->quar); 145 alloc->super->quar = alloc->quar; 146 alloc->super->num_quar += alloc->num_quar; 147 lock_quick_unlock(&alloc->super->lock); 148 } else { 149 /* free */ 150 p = alloc->quar; 151 while(p) { 152 np = alloc_special_next(p); 153 /* deinit special type */ 154 lock_rw_destroy(&p->entry.lock); 155 free(p); 156 p = np; 157 } 158 } 159 alloc->quar = 0; 160 alloc->num_quar = 0; 161 r = alloc->reg_list; 162 while(r) { 163 nr = (struct regional*)r->next; 164 free(r); 165 r = nr; 166 } 167 alloc->reg_list = NULL; 168 alloc->num_reg_blocks = 0; 169 } 170 171 uint64_t 172 alloc_get_id(struct alloc_cache* alloc) 173 { 174 uint64_t id = alloc->next_id++; 175 if(id == alloc->last_id) { 176 log_warn("rrset alloc: out of 64bit ids. Clearing cache."); 177 fptr_ok(fptr_whitelist_alloc_cleanup(alloc->cleanup)); 178 (*alloc->cleanup)(alloc->cleanup_arg); 179 180 /* start back at first number */ /* like in alloc_init*/ 181 alloc->next_id = (uint64_t)alloc->thread_num; 182 alloc->next_id <<= THRNUM_SHIFT; /* in steps for comp. */ 183 alloc->next_id += 1; /* portability. */ 184 /* and generate new and safe id */ 185 id = alloc->next_id++; 186 } 187 return id; 188 } 189 190 alloc_special_t* 191 alloc_special_obtain(struct alloc_cache* alloc) 192 { 193 alloc_special_t* p; 194 log_assert(alloc); 195 /* see if in local cache */ 196 if(alloc->quar) { 197 p = alloc->quar; 198 alloc->quar = alloc_special_next(p); 199 alloc->num_quar--; 200 p->id = alloc_get_id(alloc); 201 return p; 202 } 203 /* see if in global cache */ 204 if(alloc->super) { 205 /* could maybe grab alloc_max/2 entries in one go, 206 * but really, isn't that just as fast as this code? */ 207 lock_quick_lock(&alloc->super->lock); 208 if((p = alloc->super->quar)) { 209 alloc->super->quar = alloc_special_next(p); 210 alloc->super->num_quar--; 211 } 212 lock_quick_unlock(&alloc->super->lock); 213 if(p) { 214 p->id = alloc_get_id(alloc); 215 return p; 216 } 217 } 218 /* allocate new */ 219 prealloc(alloc); 220 if(!(p = (alloc_special_t*)malloc(sizeof(alloc_special_t)))) { 221 log_err("alloc_special_obtain: out of memory"); 222 return NULL; 223 } 224 alloc_setup_special(p); 225 p->id = alloc_get_id(alloc); 226 return p; 227 } 228 229 /** push mem and some more items to the super */ 230 static void 231 pushintosuper(struct alloc_cache* alloc, alloc_special_t* mem) 232 { 233 int i; 234 alloc_special_t *p = alloc->quar; 235 log_assert(p); 236 log_assert(alloc && alloc->super && 237 alloc->num_quar >= ALLOC_SPECIAL_MAX); 238 /* push ALLOC_SPECIAL_MAX/2 after mem */ 239 alloc_set_special_next(mem, alloc->quar); 240 for(i=1; i<ALLOC_SPECIAL_MAX/2; i++) { 241 p = alloc_special_next(p); 242 } 243 alloc->quar = alloc_special_next(p); 244 alloc->num_quar -= ALLOC_SPECIAL_MAX/2; 245 246 /* dump mem+list into the super quar list */ 247 lock_quick_lock(&alloc->super->lock); 248 alloc_set_special_next(p, alloc->super->quar); 249 alloc->super->quar = mem; 250 alloc->super->num_quar += ALLOC_SPECIAL_MAX/2 + 1; 251 lock_quick_unlock(&alloc->super->lock); 252 /* so 1 lock per mem+alloc/2 deletes */ 253 } 254 255 void 256 alloc_special_release(struct alloc_cache* alloc, alloc_special_t* mem) 257 { 258 log_assert(alloc); 259 if(!mem) 260 return; 261 if(!alloc->super) { 262 lock_quick_lock(&alloc->lock); /* superalloc needs locking */ 263 } 264 265 alloc_special_clean(mem); 266 if(alloc->super && alloc->num_quar >= ALLOC_SPECIAL_MAX) { 267 /* push it to the super structure */ 268 pushintosuper(alloc, mem); 269 return; 270 } 271 272 alloc_set_special_next(mem, alloc->quar); 273 alloc->quar = mem; 274 alloc->num_quar++; 275 if(!alloc->super) { 276 lock_quick_unlock(&alloc->lock); 277 } 278 } 279 280 void 281 alloc_stats(struct alloc_cache* alloc) 282 { 283 log_info("%salloc: %d in cache, %d blocks.", alloc->super?"":"sup", 284 (int)alloc->num_quar, (int)alloc->num_reg_blocks); 285 } 286 287 size_t alloc_get_mem(struct alloc_cache* alloc) 288 { 289 alloc_special_t* p; 290 size_t s = sizeof(*alloc); 291 if(!alloc->super) { 292 lock_quick_lock(&alloc->lock); /* superalloc needs locking */ 293 } 294 s += sizeof(alloc_special_t) * alloc->num_quar; 295 for(p = alloc->quar; p; p = alloc_special_next(p)) { 296 s += lock_get_mem(&p->entry.lock); 297 } 298 s += alloc->num_reg_blocks * ALLOC_REG_SIZE; 299 if(!alloc->super) { 300 lock_quick_unlock(&alloc->lock); 301 } 302 return s; 303 } 304 305 struct regional* 306 alloc_reg_obtain(struct alloc_cache* alloc) 307 { 308 if(alloc->num_reg_blocks > 0) { 309 struct regional* r = alloc->reg_list; 310 alloc->reg_list = (struct regional*)r->next; 311 r->next = NULL; 312 alloc->num_reg_blocks--; 313 return r; 314 } 315 return regional_create_custom(ALLOC_REG_SIZE); 316 } 317 318 void 319 alloc_reg_release(struct alloc_cache* alloc, struct regional* r) 320 { 321 if(alloc->num_reg_blocks >= alloc->max_reg_blocks) { 322 regional_destroy(r); 323 return; 324 } 325 if(!r) return; 326 regional_free_all(r); 327 log_assert(r->next == NULL); 328 r->next = (char*)alloc->reg_list; 329 alloc->reg_list = r; 330 alloc->num_reg_blocks++; 331 } 332 333 void 334 alloc_set_id_cleanup(struct alloc_cache* alloc, void (*cleanup)(void*), 335 void* arg) 336 { 337 alloc->cleanup = cleanup; 338 alloc->cleanup_arg = arg; 339 } 340 341 /** global debug value to keep track of total memory mallocs */ 342 size_t unbound_mem_alloc = 0; 343 /** global debug value to keep track of total memory frees */ 344 size_t unbound_mem_freed = 0; 345 #ifdef UNBOUND_ALLOC_STATS 346 /** special value to know if the memory is being tracked */ 347 uint64_t mem_special = (uint64_t)0xfeed43327766abcdLL; 348 #ifdef malloc 349 #undef malloc 350 #endif 351 /** malloc with stats */ 352 void *unbound_stat_malloc(size_t size) 353 { 354 void* res; 355 if(size == 0) size = 1; 356 res = malloc(size+16); 357 if(!res) return NULL; 358 unbound_mem_alloc += size; 359 log_info("stat %p=malloc(%u)", res+16, (unsigned)size); 360 memcpy(res, &size, sizeof(size)); 361 memcpy(res+8, &mem_special, sizeof(mem_special)); 362 return res+16; 363 } 364 #ifdef calloc 365 #undef calloc 366 #endif 367 /** calloc with stats */ 368 void *unbound_stat_calloc(size_t nmemb, size_t size) 369 { 370 size_t s = (nmemb*size==0)?(size_t)1:nmemb*size; 371 void* res = calloc(1, s+16); 372 if(!res) return NULL; 373 log_info("stat %p=calloc(%u, %u)", res+16, (unsigned)nmemb, (unsigned)size); 374 unbound_mem_alloc += s; 375 memcpy(res, &s, sizeof(s)); 376 memcpy(res+8, &mem_special, sizeof(mem_special)); 377 return res+16; 378 } 379 #ifdef free 380 #undef free 381 #endif 382 /** free with stats */ 383 void unbound_stat_free(void *ptr) 384 { 385 size_t s; 386 if(!ptr) return; 387 if(memcmp(ptr-8, &mem_special, sizeof(mem_special)) != 0) { 388 free(ptr); 389 return; 390 } 391 ptr-=16; 392 memcpy(&s, ptr, sizeof(s)); 393 log_info("stat free(%p) size %u", ptr+16, (unsigned)s); 394 memset(ptr+8, 0, 8); 395 unbound_mem_freed += s; 396 free(ptr); 397 } 398 #ifdef realloc 399 #undef realloc 400 #endif 401 /** realloc with stats */ 402 void *unbound_stat_realloc(void *ptr, size_t size) 403 { 404 size_t cursz; 405 void* res; 406 if(!ptr) return unbound_stat_malloc(size); 407 if(memcmp(ptr-8, &mem_special, sizeof(mem_special)) != 0) { 408 return realloc(ptr, size); 409 } 410 if(size==0) { 411 unbound_stat_free(ptr); 412 return NULL; 413 } 414 ptr -= 16; 415 memcpy(&cursz, ptr, sizeof(cursz)); 416 if(cursz == size) { 417 /* nothing changes */ 418 return ptr; 419 } 420 res = malloc(size+16); 421 if(!res) return NULL; 422 unbound_mem_alloc += size; 423 unbound_mem_freed += cursz; 424 log_info("stat realloc(%p, %u) from %u", ptr+16, (unsigned)size, (unsigned)cursz); 425 if(cursz > size) { 426 memcpy(res+16, ptr+16, size); 427 } else if(size > cursz) { 428 memcpy(res+16, ptr+16, cursz); 429 } 430 memset(ptr+8, 0, 8); 431 free(ptr); 432 memcpy(res, &size, sizeof(size)); 433 memcpy(res+8, &mem_special, sizeof(mem_special)); 434 return res+16; 435 } 436 437 /** log to file where alloc was done */ 438 void *unbound_stat_malloc_log(size_t size, const char* file, int line, 439 const char* func) 440 { 441 log_info("%s:%d %s malloc(%u)", file, line, func, (unsigned)size); 442 return unbound_stat_malloc(size); 443 } 444 445 /** log to file where alloc was done */ 446 void *unbound_stat_calloc_log(size_t nmemb, size_t size, const char* file, 447 int line, const char* func) 448 { 449 log_info("%s:%d %s calloc(%u, %u)", file, line, func, 450 (unsigned) nmemb, (unsigned)size); 451 return unbound_stat_calloc(nmemb, size); 452 } 453 454 /** log to file where free was done */ 455 void unbound_stat_free_log(void *ptr, const char* file, int line, 456 const char* func) 457 { 458 if(ptr && memcmp(ptr-8, &mem_special, sizeof(mem_special)) == 0) { 459 size_t s; 460 memcpy(&s, ptr-16, sizeof(s)); 461 log_info("%s:%d %s free(%p) size %u", 462 file, line, func, ptr, (unsigned)s); 463 } else 464 log_info("%s:%d %s unmatched free(%p)", file, line, func, ptr); 465 unbound_stat_free(ptr); 466 } 467 468 /** log to file where alloc was done */ 469 void *unbound_stat_realloc_log(void *ptr, size_t size, const char* file, 470 int line, const char* func) 471 { 472 log_info("%s:%d %s realloc(%p, %u)", file, line, func, 473 ptr, (unsigned)size); 474 return unbound_stat_realloc(ptr, size); 475 } 476 477 #endif /* UNBOUND_ALLOC_STATS */ 478 #ifdef UNBOUND_ALLOC_LITE 479 #undef malloc 480 #undef calloc 481 #undef free 482 #undef realloc 483 /** length of prefix and suffix */ 484 static size_t lite_pad = 16; 485 /** prefix value to check */ 486 static char* lite_pre = "checkfront123456"; 487 /** suffix value to check */ 488 static char* lite_post= "checkafter123456"; 489 490 void *unbound_stat_malloc_lite(size_t size, const char* file, int line, 491 const char* func) 492 { 493 /* [prefix .. len .. actual data .. suffix] */ 494 void* res = malloc(size+lite_pad*2+sizeof(size_t)); 495 if(!res) return NULL; 496 memmove(res, lite_pre, lite_pad); 497 memmove(res+lite_pad, &size, sizeof(size_t)); 498 memset(res+lite_pad+sizeof(size_t), 0x1a, size); /* init the memory */ 499 memmove(res+lite_pad+size+sizeof(size_t), lite_post, lite_pad); 500 return res+lite_pad+sizeof(size_t); 501 } 502 503 void *unbound_stat_calloc_lite(size_t nmemb, size_t size, const char* file, 504 int line, const char* func) 505 { 506 size_t req = nmemb * size; 507 void* res = malloc(req+lite_pad*2+sizeof(size_t)); 508 if(!res) return NULL; 509 memmove(res, lite_pre, lite_pad); 510 memmove(res+lite_pad, &req, sizeof(size_t)); 511 memset(res+lite_pad+sizeof(size_t), 0, req); 512 memmove(res+lite_pad+req+sizeof(size_t), lite_post, lite_pad); 513 return res+lite_pad+sizeof(size_t); 514 } 515 516 void unbound_stat_free_lite(void *ptr, const char* file, int line, 517 const char* func) 518 { 519 void* real; 520 size_t orig = 0; 521 if(!ptr) return; 522 real = ptr-lite_pad-sizeof(size_t); 523 if(memcmp(real, lite_pre, lite_pad) != 0) { 524 log_err("free(): prefix failed %s:%d %s", file, line, func); 525 log_hex("prefix here", real, lite_pad); 526 log_hex(" should be", lite_pre, lite_pad); 527 fatal_exit("alloc assertion failed"); 528 } 529 memmove(&orig, real+lite_pad, sizeof(size_t)); 530 if(memcmp(real+lite_pad+orig+sizeof(size_t), lite_post, lite_pad)!=0){ 531 log_err("free(): suffix failed %s:%d %s", file, line, func); 532 log_err("alloc size is %d", (int)orig); 533 log_hex("suffix here", real+lite_pad+orig+sizeof(size_t), 534 lite_pad); 535 log_hex(" should be", lite_post, lite_pad); 536 fatal_exit("alloc assertion failed"); 537 } 538 memset(real, 0xdd, orig+lite_pad*2+sizeof(size_t)); /* mark it */ 539 free(real); 540 } 541 542 void *unbound_stat_realloc_lite(void *ptr, size_t size, const char* file, 543 int line, const char* func) 544 { 545 /* always free and realloc (no growing) */ 546 void* real, *newa; 547 size_t orig = 0; 548 if(!ptr) { 549 /* like malloc() */ 550 return unbound_stat_malloc_lite(size, file, line, func); 551 } 552 if(!size) { 553 /* like free() */ 554 unbound_stat_free_lite(ptr, file, line, func); 555 return NULL; 556 } 557 /* change allocation size and copy */ 558 real = ptr-lite_pad-sizeof(size_t); 559 if(memcmp(real, lite_pre, lite_pad) != 0) { 560 log_err("realloc(): prefix failed %s:%d %s", file, line, func); 561 log_hex("prefix here", real, lite_pad); 562 log_hex(" should be", lite_pre, lite_pad); 563 fatal_exit("alloc assertion failed"); 564 } 565 memmove(&orig, real+lite_pad, sizeof(size_t)); 566 if(memcmp(real+lite_pad+orig+sizeof(size_t), lite_post, lite_pad)!=0){ 567 log_err("realloc(): suffix failed %s:%d %s", file, line, func); 568 log_err("alloc size is %d", (int)orig); 569 log_hex("suffix here", real+lite_pad+orig+sizeof(size_t), 570 lite_pad); 571 log_hex(" should be", lite_post, lite_pad); 572 fatal_exit("alloc assertion failed"); 573 } 574 /* new alloc and copy over */ 575 newa = unbound_stat_malloc_lite(size, file, line, func); 576 if(!newa) 577 return NULL; 578 if(orig < size) 579 memmove(newa, ptr, orig); 580 else memmove(newa, ptr, size); 581 memset(real, 0xdd, orig+lite_pad*2+sizeof(size_t)); /* mark it */ 582 free(real); 583 return newa; 584 } 585 586 char* unbound_strdup_lite(const char* s, const char* file, int line, 587 const char* func) 588 { 589 /* this routine is made to make sure strdup() uses the malloc_lite */ 590 size_t l = strlen(s)+1; 591 char* n = (char*)unbound_stat_malloc_lite(l, file, line, func); 592 if(!n) return NULL; 593 memmove(n, s, l); 594 return n; 595 } 596 597 char* unbound_lite_wrapstr(char* s) 598 { 599 char* n = unbound_strdup_lite(s, __FILE__, __LINE__, __func__); 600 free(s); 601 return n; 602 } 603 604 #undef ldns_pkt2wire 605 ldns_status unbound_lite_pkt2wire(uint8_t **dest, const ldns_pkt *p, 606 size_t *size) 607 { 608 uint8_t* md = NULL; 609 size_t ms = 0; 610 ldns_status s = ldns_pkt2wire(&md, p, &ms); 611 if(md) { 612 *dest = unbound_stat_malloc_lite(ms, __FILE__, __LINE__, 613 __func__); 614 *size = ms; 615 if(!*dest) { free(md); return LDNS_STATUS_MEM_ERR; } 616 memcpy(*dest, md, ms); 617 free(md); 618 } else { 619 *dest = NULL; 620 *size = 0; 621 } 622 return s; 623 } 624 625 #undef i2d_DSA_SIG 626 int unbound_lite_i2d_DSA_SIG(DSA_SIG* dsasig, unsigned char** sig) 627 { 628 unsigned char* n = NULL; 629 int r= i2d_DSA_SIG(dsasig, &n); 630 if(n) { 631 *sig = unbound_stat_malloc_lite((size_t)r, __FILE__, __LINE__, 632 __func__); 633 if(!*sig) return -1; 634 memcpy(*sig, n, (size_t)r); 635 free(n); 636 return r; 637 } 638 *sig = NULL; 639 return r; 640 } 641 642 #endif /* UNBOUND_ALLOC_LITE */ 643