1 /* 2 * util/storage/lruhash.c - hashtable, hash function, LRU keeping. 3 * 4 * Copyright (c) 2007, NLnet Labs. All rights reserved. 5 * 6 * This software is open source. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * Redistributions of source code must retain the above copyright notice, 13 * this list of conditions and the following disclaimer. 14 * 15 * Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * Neither the name of the NLNET LABS nor the names of its contributors may 20 * be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /** 37 * \file 38 * 39 * This file contains a hashtable with LRU keeping of entries. 40 * 41 */ 42 43 #include "config.h" 44 #include "util/storage/lruhash.h" 45 #include "util/fptr_wlist.h" 46 47 void 48 bin_init(struct lruhash_bin* array, size_t size) 49 { 50 size_t i; 51 #ifdef THREADS_DISABLED 52 (void)array; 53 #endif 54 for(i=0; i<size; i++) { 55 lock_quick_init(&array[i].lock); 56 lock_protect(&array[i].lock, &array[i], 57 sizeof(struct lruhash_bin)); 58 } 59 } 60 61 struct lruhash* 62 lruhash_create(size_t start_size, size_t maxmem, 63 lruhash_sizefunc_type sizefunc, lruhash_compfunc_type compfunc, 64 lruhash_delkeyfunc_type delkeyfunc, 65 lruhash_deldatafunc_type deldatafunc, void* arg) 66 { 67 struct lruhash* table = (struct lruhash*)calloc(1, 68 sizeof(struct lruhash)); 69 if(!table) 70 return NULL; 71 lock_quick_init(&table->lock); 72 table->sizefunc = sizefunc; 73 table->compfunc = compfunc; 74 table->delkeyfunc = delkeyfunc; 75 table->deldatafunc = deldatafunc; 76 table->cb_arg = arg; 77 table->size = start_size; 78 table->size_mask = (int)(start_size-1); 79 table->lru_start = NULL; 80 table->lru_end = NULL; 81 table->num = 0; 82 table->space_used = 0; 83 table->space_max = maxmem; 84 table->max_collisions = 0; 85 table->array = calloc(table->size, sizeof(struct lruhash_bin)); 86 if(!table->array) { 87 lock_quick_destroy(&table->lock); 88 free(table); 89 return NULL; 90 } 91 bin_init(table->array, table->size); 92 lock_protect(&table->lock, table, sizeof(*table)); 93 lock_protect(&table->lock, table->array, 94 table->size*sizeof(struct lruhash_bin)); 95 return table; 96 } 97 98 void 99 bin_delete(struct lruhash* table, struct lruhash_bin* bin) 100 { 101 struct lruhash_entry* p, *np; 102 void *d; 103 if(!bin) 104 return; 105 lock_quick_destroy(&bin->lock); 106 p = bin->overflow_list; 107 bin->overflow_list = NULL; 108 while(p) { 109 np = p->overflow_next; 110 d = p->data; 111 (*table->delkeyfunc)(p->key, table->cb_arg); 112 (*table->deldatafunc)(d, table->cb_arg); 113 p = np; 114 } 115 } 116 117 void 118 bin_split(struct lruhash* table, struct lruhash_bin* newa, 119 int newmask) 120 { 121 size_t i; 122 struct lruhash_entry *p, *np; 123 struct lruhash_bin* newbin; 124 /* move entries to new table. Notice that since hash x is mapped to 125 * bin x & mask, and new mask uses one more bit, so all entries in 126 * one bin will go into the old bin or bin | newbit */ 127 #ifndef THREADS_DISABLED 128 int newbit = newmask - table->size_mask; 129 #endif 130 /* so, really, this task could also be threaded, per bin. */ 131 /* LRU list is not changed */ 132 for(i=0; i<table->size; i++) 133 { 134 lock_quick_lock(&table->array[i].lock); 135 p = table->array[i].overflow_list; 136 /* lock both destination bins */ 137 lock_quick_lock(&newa[i].lock); 138 lock_quick_lock(&newa[newbit|i].lock); 139 while(p) { 140 np = p->overflow_next; 141 /* link into correct new bin */ 142 newbin = &newa[p->hash & newmask]; 143 p->overflow_next = newbin->overflow_list; 144 newbin->overflow_list = p; 145 p=np; 146 } 147 lock_quick_unlock(&newa[i].lock); 148 lock_quick_unlock(&newa[newbit|i].lock); 149 lock_quick_unlock(&table->array[i].lock); 150 } 151 } 152 153 void 154 lruhash_delete(struct lruhash* table) 155 { 156 size_t i; 157 if(!table) 158 return; 159 /* delete lock on hashtable to force check its OK */ 160 lock_quick_destroy(&table->lock); 161 for(i=0; i<table->size; i++) 162 bin_delete(table, &table->array[i]); 163 free(table->array); 164 free(table); 165 } 166 167 void 168 bin_overflow_remove(struct lruhash_bin* bin, struct lruhash_entry* entry) 169 { 170 struct lruhash_entry* p = bin->overflow_list; 171 struct lruhash_entry** prevp = &bin->overflow_list; 172 while(p) { 173 if(p == entry) { 174 *prevp = p->overflow_next; 175 return; 176 } 177 prevp = &p->overflow_next; 178 p = p->overflow_next; 179 } 180 } 181 182 void 183 reclaim_space(struct lruhash* table, struct lruhash_entry** list) 184 { 185 struct lruhash_entry* d; 186 struct lruhash_bin* bin; 187 log_assert(table); 188 /* does not delete MRU entry, so table will not be empty. */ 189 while(table->num > 1 && table->space_used > table->space_max) { 190 /* notice that since we hold the hashtable lock, nobody 191 can change the lru chain. So it cannot be deleted underneath 192 us. We still need the hashbin and entry write lock to make 193 sure we flush all users away from the entry. 194 which is unlikely, since it is LRU, if someone got a rdlock 195 it would be moved to front, but to be sure. */ 196 d = table->lru_end; 197 /* specialised, delete from end of double linked list, 198 and we know num>1, so there is a previous lru entry. */ 199 log_assert(d && d->lru_prev); 200 table->lru_end = d->lru_prev; 201 d->lru_prev->lru_next = NULL; 202 /* schedule entry for deletion */ 203 bin = &table->array[d->hash & table->size_mask]; 204 table->num --; 205 lock_quick_lock(&bin->lock); 206 bin_overflow_remove(bin, d); 207 d->overflow_next = *list; 208 *list = d; 209 lock_rw_wrlock(&d->lock); 210 table->space_used -= table->sizefunc(d->key, d->data); 211 if(table->markdelfunc) 212 (*table->markdelfunc)(d->key); 213 lock_rw_unlock(&d->lock); 214 lock_quick_unlock(&bin->lock); 215 } 216 } 217 218 struct lruhash_entry* 219 bin_find_entry(struct lruhash* table, 220 struct lruhash_bin* bin, hashvalue_type hash, void* key, size_t* collisions) 221 { 222 size_t c = 0; 223 struct lruhash_entry* p = bin->overflow_list; 224 while(p) { 225 if(p->hash == hash && table->compfunc(p->key, key) == 0) 226 break; 227 c++; 228 p = p->overflow_next; 229 } 230 if (collisions != NULL) 231 *collisions = c; 232 return p; 233 } 234 235 void 236 table_grow(struct lruhash* table) 237 { 238 struct lruhash_bin* newa; 239 int newmask; 240 size_t i; 241 if(table->size_mask == (int)(((size_t)-1)>>1)) { 242 log_err("hash array malloc: size_t too small"); 243 return; 244 } 245 /* try to allocate new array, if not fail */ 246 newa = calloc(table->size*2, sizeof(struct lruhash_bin)); 247 if(!newa) { 248 log_err("hash grow: malloc failed"); 249 /* continue with smaller array. Though its slower. */ 250 return; 251 } 252 bin_init(newa, table->size*2); 253 newmask = (table->size_mask << 1) | 1; 254 bin_split(table, newa, newmask); 255 /* delete the old bins */ 256 lock_unprotect(&table->lock, table->array); 257 for(i=0; i<table->size; i++) { 258 lock_quick_destroy(&table->array[i].lock); 259 } 260 free(table->array); 261 262 table->size *= 2; 263 table->size_mask = newmask; 264 table->array = newa; 265 lock_protect(&table->lock, table->array, 266 table->size*sizeof(struct lruhash_bin)); 267 return; 268 } 269 270 void 271 lru_front(struct lruhash* table, struct lruhash_entry* entry) 272 { 273 entry->lru_prev = NULL; 274 entry->lru_next = table->lru_start; 275 if(!table->lru_start) 276 table->lru_end = entry; 277 else table->lru_start->lru_prev = entry; 278 table->lru_start = entry; 279 } 280 281 void 282 lru_remove(struct lruhash* table, struct lruhash_entry* entry) 283 { 284 if(entry->lru_prev) 285 entry->lru_prev->lru_next = entry->lru_next; 286 else table->lru_start = entry->lru_next; 287 if(entry->lru_next) 288 entry->lru_next->lru_prev = entry->lru_prev; 289 else table->lru_end = entry->lru_prev; 290 } 291 292 void 293 lru_touch(struct lruhash* table, struct lruhash_entry* entry) 294 { 295 log_assert(table && entry); 296 if(entry == table->lru_start) 297 return; /* nothing to do */ 298 /* remove from current lru position */ 299 lru_remove(table, entry); 300 /* add at front */ 301 lru_front(table, entry); 302 } 303 304 void 305 lruhash_insert(struct lruhash* table, hashvalue_type hash, 306 struct lruhash_entry* entry, void* data, void* cb_arg) 307 { 308 struct lruhash_bin* bin; 309 struct lruhash_entry* found, *reclaimlist=NULL; 310 size_t need_size; 311 size_t collisions; 312 fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc)); 313 fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc)); 314 fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc)); 315 fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc)); 316 fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc)); 317 need_size = table->sizefunc(entry->key, data); 318 if(cb_arg == NULL) cb_arg = table->cb_arg; 319 320 /* find bin */ 321 lock_quick_lock(&table->lock); 322 bin = &table->array[hash & table->size_mask]; 323 lock_quick_lock(&bin->lock); 324 325 /* see if entry exists already */ 326 if(!(found=bin_find_entry(table, bin, hash, entry->key, &collisions))) { 327 /* if not: add to bin */ 328 entry->overflow_next = bin->overflow_list; 329 bin->overflow_list = entry; 330 lru_front(table, entry); 331 table->num++; 332 if (table->max_collisions < collisions) 333 table->max_collisions = collisions; 334 table->space_used += need_size; 335 } else { 336 /* if so: update data - needs a writelock */ 337 table->space_used += need_size - 338 (*table->sizefunc)(found->key, found->data); 339 (*table->delkeyfunc)(entry->key, cb_arg); 340 lru_touch(table, found); 341 lock_rw_wrlock(&found->lock); 342 (*table->deldatafunc)(found->data, cb_arg); 343 found->data = data; 344 lock_rw_unlock(&found->lock); 345 } 346 lock_quick_unlock(&bin->lock); 347 if(table->space_used > table->space_max) 348 reclaim_space(table, &reclaimlist); 349 if(table->num >= table->size) 350 table_grow(table); 351 lock_quick_unlock(&table->lock); 352 353 /* finish reclaim if any (outside of critical region) */ 354 while(reclaimlist) { 355 struct lruhash_entry* n = reclaimlist->overflow_next; 356 void* d = reclaimlist->data; 357 (*table->delkeyfunc)(reclaimlist->key, cb_arg); 358 (*table->deldatafunc)(d, cb_arg); 359 reclaimlist = n; 360 } 361 } 362 363 struct lruhash_entry* 364 lruhash_lookup(struct lruhash* table, hashvalue_type hash, void* key, int wr) 365 { 366 struct lruhash_entry* entry; 367 struct lruhash_bin* bin; 368 fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc)); 369 370 lock_quick_lock(&table->lock); 371 bin = &table->array[hash & table->size_mask]; 372 lock_quick_lock(&bin->lock); 373 if((entry=bin_find_entry(table, bin, hash, key, NULL))) 374 lru_touch(table, entry); 375 lock_quick_unlock(&table->lock); 376 377 if(entry) { 378 if(wr) { lock_rw_wrlock(&entry->lock); } 379 else { lock_rw_rdlock(&entry->lock); } 380 } 381 lock_quick_unlock(&bin->lock); 382 return entry; 383 } 384 385 void 386 lruhash_remove(struct lruhash* table, hashvalue_type hash, void* key) 387 { 388 struct lruhash_entry* entry; 389 struct lruhash_bin* bin; 390 void *d; 391 fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc)); 392 fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc)); 393 fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc)); 394 fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc)); 395 fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc)); 396 397 lock_quick_lock(&table->lock); 398 bin = &table->array[hash & table->size_mask]; 399 lock_quick_lock(&bin->lock); 400 if((entry=bin_find_entry(table, bin, hash, key, NULL))) { 401 bin_overflow_remove(bin, entry); 402 lru_remove(table, entry); 403 } else { 404 lock_quick_unlock(&table->lock); 405 lock_quick_unlock(&bin->lock); 406 return; 407 } 408 table->num--; 409 table->space_used -= (*table->sizefunc)(entry->key, entry->data); 410 lock_rw_wrlock(&entry->lock); 411 if(table->markdelfunc) 412 (*table->markdelfunc)(entry->key); 413 lock_rw_unlock(&entry->lock); 414 lock_quick_unlock(&bin->lock); 415 lock_quick_unlock(&table->lock); 416 /* finish removal */ 417 d = entry->data; 418 (*table->delkeyfunc)(entry->key, table->cb_arg); 419 (*table->deldatafunc)(d, table->cb_arg); 420 } 421 422 /** clear bin, respecting locks, does not do space, LRU */ 423 static void 424 bin_clear(struct lruhash* table, struct lruhash_bin* bin) 425 { 426 struct lruhash_entry* p, *np; 427 void *d; 428 lock_quick_lock(&bin->lock); 429 p = bin->overflow_list; 430 while(p) { 431 lock_rw_wrlock(&p->lock); 432 np = p->overflow_next; 433 d = p->data; 434 if(table->markdelfunc) 435 (*table->markdelfunc)(p->key); 436 lock_rw_unlock(&p->lock); 437 (*table->delkeyfunc)(p->key, table->cb_arg); 438 (*table->deldatafunc)(d, table->cb_arg); 439 p = np; 440 } 441 bin->overflow_list = NULL; 442 lock_quick_unlock(&bin->lock); 443 } 444 445 void 446 lruhash_clear(struct lruhash* table) 447 { 448 size_t i; 449 if(!table) 450 return; 451 fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc)); 452 fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc)); 453 fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc)); 454 455 lock_quick_lock(&table->lock); 456 for(i=0; i<table->size; i++) { 457 bin_clear(table, &table->array[i]); 458 } 459 table->lru_start = NULL; 460 table->lru_end = NULL; 461 table->num = 0; 462 table->space_used = 0; 463 lock_quick_unlock(&table->lock); 464 } 465 466 void 467 lruhash_status(struct lruhash* table, const char* id, int extended) 468 { 469 lock_quick_lock(&table->lock); 470 log_info("%s: %u entries, memory %u / %u", 471 id, (unsigned)table->num, (unsigned)table->space_used, 472 (unsigned)table->space_max); 473 log_info(" itemsize %u, array %u, mask %d", 474 (unsigned)(table->num? table->space_used/table->num : 0), 475 (unsigned)table->size, table->size_mask); 476 if(extended) { 477 size_t i; 478 int min=(int)table->size*2, max=-2; 479 for(i=0; i<table->size; i++) { 480 int here = 0; 481 struct lruhash_entry *en; 482 lock_quick_lock(&table->array[i].lock); 483 en = table->array[i].overflow_list; 484 while(en) { 485 here ++; 486 en = en->overflow_next; 487 } 488 lock_quick_unlock(&table->array[i].lock); 489 if(extended >= 2) 490 log_info("bin[%d] %d", (int)i, here); 491 if(here > max) max = here; 492 if(here < min) min = here; 493 } 494 log_info(" bin min %d, avg %.2lf, max %d", min, 495 (double)table->num/(double)table->size, max); 496 } 497 lock_quick_unlock(&table->lock); 498 } 499 500 size_t 501 lruhash_get_mem(struct lruhash* table) 502 { 503 size_t s; 504 lock_quick_lock(&table->lock); 505 s = sizeof(struct lruhash) + table->space_used; 506 #ifdef USE_THREAD_DEBUG 507 if(table->size != 0) { 508 size_t i; 509 for(i=0; i<table->size; i++) 510 s += sizeof(struct lruhash_bin) + 511 lock_get_mem(&table->array[i].lock); 512 } 513 #else /* no THREAD_DEBUG */ 514 if(table->size != 0) 515 s += (table->size)*(sizeof(struct lruhash_bin) + 516 lock_get_mem(&table->array[0].lock)); 517 #endif 518 lock_quick_unlock(&table->lock); 519 s += lock_get_mem(&table->lock); 520 return s; 521 } 522 523 void 524 lruhash_setmarkdel(struct lruhash* table, lruhash_markdelfunc_type md) 525 { 526 lock_quick_lock(&table->lock); 527 table->markdelfunc = md; 528 lock_quick_unlock(&table->lock); 529 } 530 531 void 532 lruhash_traverse(struct lruhash* h, int wr, 533 void (*func)(struct lruhash_entry*, void*), void* arg) 534 { 535 size_t i; 536 struct lruhash_entry* e; 537 538 lock_quick_lock(&h->lock); 539 for(i=0; i<h->size; i++) { 540 lock_quick_lock(&h->array[i].lock); 541 for(e = h->array[i].overflow_list; e; e = e->overflow_next) { 542 if(wr) { 543 lock_rw_wrlock(&e->lock); 544 } else { 545 lock_rw_rdlock(&e->lock); 546 } 547 (*func)(e, arg); 548 lock_rw_unlock(&e->lock); 549 } 550 lock_quick_unlock(&h->array[i].lock); 551 } 552 lock_quick_unlock(&h->lock); 553 } 554 555 /* 556 * Demote: the opposite of touch, move an entry to the bottom 557 * of the LRU pile. 558 */ 559 560 void 561 lru_demote(struct lruhash* table, struct lruhash_entry* entry) 562 { 563 log_assert(table && entry); 564 if (entry == table->lru_end) 565 return; /* nothing to do */ 566 /* remove from current lru position */ 567 lru_remove(table, entry); 568 /* add at end */ 569 entry->lru_next = NULL; 570 entry->lru_prev = table->lru_end; 571 572 if (table->lru_end == NULL) 573 { 574 table->lru_start = entry; 575 } 576 else 577 { 578 table->lru_end->lru_next = entry; 579 } 580 table->lru_end = entry; 581 } 582 583 struct lruhash_entry* 584 lruhash_insert_or_retrieve(struct lruhash* table, hashvalue_type hash, 585 struct lruhash_entry* entry, void* data, void* cb_arg) 586 { 587 struct lruhash_bin* bin; 588 struct lruhash_entry* found, *reclaimlist = NULL; 589 size_t need_size; 590 size_t collisions; 591 fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc)); 592 fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc)); 593 fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc)); 594 fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc)); 595 fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc)); 596 need_size = table->sizefunc(entry->key, data); 597 if (cb_arg == NULL) cb_arg = table->cb_arg; 598 599 /* find bin */ 600 lock_quick_lock(&table->lock); 601 bin = &table->array[hash & table->size_mask]; 602 lock_quick_lock(&bin->lock); 603 604 /* see if entry exists already */ 605 if ((found = bin_find_entry(table, bin, hash, entry->key, &collisions)) != NULL) { 606 /* if so: keep the existing data - acquire a writelock */ 607 lock_rw_wrlock(&found->lock); 608 } 609 else 610 { 611 /* if not: add to bin */ 612 entry->overflow_next = bin->overflow_list; 613 bin->overflow_list = entry; 614 lru_front(table, entry); 615 table->num++; 616 if (table->max_collisions < collisions) 617 table->max_collisions = collisions; 618 table->space_used += need_size; 619 /* return the entry that was presented, and lock it */ 620 found = entry; 621 lock_rw_wrlock(&found->lock); 622 } 623 lock_quick_unlock(&bin->lock); 624 if (table->space_used > table->space_max) 625 reclaim_space(table, &reclaimlist); 626 if (table->num >= table->size) 627 table_grow(table); 628 lock_quick_unlock(&table->lock); 629 630 /* finish reclaim if any (outside of critical region) */ 631 while (reclaimlist) { 632 struct lruhash_entry* n = reclaimlist->overflow_next; 633 void* d = reclaimlist->data; 634 (*table->delkeyfunc)(reclaimlist->key, cb_arg); 635 (*table->deldatafunc)(d, cb_arg); 636 reclaimlist = n; 637 } 638 639 /* return the entry that was selected */ 640 return found; 641 } 642 643