1 /* 2 * util/storage/lruhash.c - hashtable, hash function, LRU keeping. 3 * 4 * Copyright (c) 2007, NLnet Labs. All rights reserved. 5 * 6 * This software is open source. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * Redistributions of source code must retain the above copyright notice, 13 * this list of conditions and the following disclaimer. 14 * 15 * Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * Neither the name of the NLNET LABS nor the names of its contributors may 20 * be used to endorse or promote products derived from this software without 21 * specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /** 37 * \file 38 * 39 * This file contains a hashtable with LRU keeping of entries. 40 * 41 */ 42 43 #include "config.h" 44 #include "util/storage/lruhash.h" 45 #include "util/fptr_wlist.h" 46 47 void 48 bin_init(struct lruhash_bin* array, size_t size) 49 { 50 size_t i; 51 #ifdef THREADS_DISABLED 52 (void)array; 53 #endif 54 for(i=0; i<size; i++) { 55 lock_quick_init(&array[i].lock); 56 lock_protect(&array[i].lock, &array[i], 57 sizeof(struct lruhash_bin)); 58 } 59 } 60 61 struct lruhash* 62 lruhash_create(size_t start_size, size_t maxmem, 63 lruhash_sizefunc_type sizefunc, lruhash_compfunc_type compfunc, 64 lruhash_delkeyfunc_type delkeyfunc, 65 lruhash_deldatafunc_type deldatafunc, void* arg) 66 { 67 struct lruhash* table = (struct lruhash*)calloc(1, 68 sizeof(struct lruhash)); 69 if(!table) 70 return NULL; 71 lock_quick_init(&table->lock); 72 table->sizefunc = sizefunc; 73 table->compfunc = compfunc; 74 table->delkeyfunc = delkeyfunc; 75 table->deldatafunc = deldatafunc; 76 table->cb_arg = arg; 77 table->size = start_size; 78 table->size_mask = (int)(start_size-1); 79 table->lru_start = NULL; 80 table->lru_end = NULL; 81 table->num = 0; 82 table->space_used = 0; 83 table->space_max = maxmem; 84 table->max_collisions = 0; 85 table->array = calloc(table->size, sizeof(struct lruhash_bin)); 86 if(!table->array) { 87 lock_quick_destroy(&table->lock); 88 free(table); 89 return NULL; 90 } 91 bin_init(table->array, table->size); 92 lock_protect(&table->lock, table, sizeof(*table)); 93 lock_protect(&table->lock, table->array, 94 table->size*sizeof(struct lruhash_bin)); 95 return table; 96 } 97 98 void 99 bin_delete(struct lruhash* table, struct lruhash_bin* bin) 100 { 101 struct lruhash_entry* p, *np; 102 void *d; 103 if(!bin) 104 return; 105 lock_quick_destroy(&bin->lock); 106 p = bin->overflow_list; 107 bin->overflow_list = NULL; 108 while(p) { 109 np = p->overflow_next; 110 d = p->data; 111 (*table->delkeyfunc)(p->key, table->cb_arg); 112 (*table->deldatafunc)(d, table->cb_arg); 113 p = np; 114 } 115 } 116 117 void 118 bin_split(struct lruhash* table, struct lruhash_bin* newa, 119 int newmask) 120 { 121 size_t i; 122 struct lruhash_entry *p, *np; 123 struct lruhash_bin* newbin; 124 /* move entries to new table. Notice that since hash x is mapped to 125 * bin x & mask, and new mask uses one more bit, so all entries in 126 * one bin will go into the old bin or bin | newbit */ 127 #ifndef THREADS_DISABLED 128 int newbit = newmask - table->size_mask; 129 #endif 130 /* so, really, this task could also be threaded, per bin. */ 131 /* LRU list is not changed */ 132 for(i=0; i<table->size; i++) 133 { 134 lock_quick_lock(&table->array[i].lock); 135 p = table->array[i].overflow_list; 136 /* lock both destination bins */ 137 lock_quick_lock(&newa[i].lock); 138 lock_quick_lock(&newa[newbit|i].lock); 139 while(p) { 140 np = p->overflow_next; 141 /* link into correct new bin */ 142 newbin = &newa[p->hash & newmask]; 143 p->overflow_next = newbin->overflow_list; 144 newbin->overflow_list = p; 145 p=np; 146 } 147 lock_quick_unlock(&newa[i].lock); 148 lock_quick_unlock(&newa[newbit|i].lock); 149 lock_quick_unlock(&table->array[i].lock); 150 } 151 } 152 153 void 154 lruhash_delete(struct lruhash* table) 155 { 156 size_t i; 157 if(!table) 158 return; 159 /* delete lock on hashtable to force check its OK */ 160 lock_quick_destroy(&table->lock); 161 for(i=0; i<table->size; i++) 162 bin_delete(table, &table->array[i]); 163 free(table->array); 164 free(table); 165 } 166 167 void 168 bin_overflow_remove(struct lruhash_bin* bin, struct lruhash_entry* entry) 169 { 170 struct lruhash_entry* p = bin->overflow_list; 171 struct lruhash_entry** prevp = &bin->overflow_list; 172 while(p) { 173 if(p == entry) { 174 *prevp = p->overflow_next; 175 return; 176 } 177 prevp = &p->overflow_next; 178 p = p->overflow_next; 179 } 180 } 181 182 void 183 reclaim_space(struct lruhash* table, struct lruhash_entry** list) 184 { 185 struct lruhash_entry* d; 186 struct lruhash_bin* bin; 187 log_assert(table); 188 /* does not delete MRU entry, so table will not be empty. */ 189 while(table->num > 1 && table->space_used > table->space_max) { 190 /* notice that since we hold the hashtable lock, nobody 191 can change the lru chain. So it cannot be deleted underneath 192 us. We still need the hashbin and entry write lock to make 193 sure we flush all users away from the entry. 194 which is unlikely, since it is LRU, if someone got a rdlock 195 it would be moved to front, but to be sure. */ 196 d = table->lru_end; 197 /* specialised, delete from end of double linked list, 198 and we know num>1, so there is a previous lru entry. */ 199 log_assert(d && d->lru_prev); 200 table->lru_end = d->lru_prev; 201 d->lru_prev->lru_next = NULL; 202 /* schedule entry for deletion */ 203 bin = &table->array[d->hash & table->size_mask]; 204 table->num --; 205 lock_quick_lock(&bin->lock); 206 bin_overflow_remove(bin, d); 207 d->overflow_next = *list; 208 *list = d; 209 lock_rw_wrlock(&d->lock); 210 table->space_used -= table->sizefunc(d->key, d->data); 211 if(table->markdelfunc) 212 (*table->markdelfunc)(d->key); 213 lock_rw_unlock(&d->lock); 214 lock_quick_unlock(&bin->lock); 215 } 216 } 217 218 struct lruhash_entry* 219 bin_find_entry(struct lruhash* table, 220 struct lruhash_bin* bin, hashvalue_type hash, void* key, size_t* collisions) 221 { 222 size_t c = 0; 223 struct lruhash_entry* p = bin->overflow_list; 224 while(p) { 225 if(p->hash == hash && table->compfunc(p->key, key) == 0) 226 break; 227 c++; 228 p = p->overflow_next; 229 } 230 if (collisions != NULL) 231 *collisions = c; 232 return p; 233 } 234 235 void 236 table_grow(struct lruhash* table) 237 { 238 struct lruhash_bin* newa; 239 int newmask; 240 size_t i; 241 if(table->size_mask == (int)(((size_t)-1)>>1)) { 242 log_err("hash array malloc: size_t too small"); 243 return; 244 } 245 /* try to allocate new array, if not fail */ 246 newa = calloc(table->size*2, sizeof(struct lruhash_bin)); 247 if(!newa) { 248 log_err("hash grow: malloc failed"); 249 /* continue with smaller array. Though its slower. */ 250 return; 251 } 252 bin_init(newa, table->size*2); 253 newmask = (table->size_mask << 1) | 1; 254 bin_split(table, newa, newmask); 255 /* delete the old bins */ 256 lock_unprotect(&table->lock, table->array); 257 for(i=0; i<table->size; i++) { 258 lock_quick_destroy(&table->array[i].lock); 259 } 260 free(table->array); 261 262 table->size *= 2; 263 table->size_mask = newmask; 264 table->array = newa; 265 lock_protect(&table->lock, table->array, 266 table->size*sizeof(struct lruhash_bin)); 267 return; 268 } 269 270 void 271 lru_front(struct lruhash* table, struct lruhash_entry* entry) 272 { 273 entry->lru_prev = NULL; 274 entry->lru_next = table->lru_start; 275 if(!table->lru_start) 276 table->lru_end = entry; 277 else table->lru_start->lru_prev = entry; 278 table->lru_start = entry; 279 } 280 281 void 282 lru_remove(struct lruhash* table, struct lruhash_entry* entry) 283 { 284 if(entry->lru_prev) 285 entry->lru_prev->lru_next = entry->lru_next; 286 else table->lru_start = entry->lru_next; 287 if(entry->lru_next) 288 entry->lru_next->lru_prev = entry->lru_prev; 289 else table->lru_end = entry->lru_prev; 290 } 291 292 void 293 lru_touch(struct lruhash* table, struct lruhash_entry* entry) 294 { 295 log_assert(table && entry); 296 if(entry == table->lru_start) 297 return; /* nothing to do */ 298 /* remove from current lru position */ 299 lru_remove(table, entry); 300 /* add at front */ 301 lru_front(table, entry); 302 } 303 304 void 305 lruhash_insert(struct lruhash* table, hashvalue_type hash, 306 struct lruhash_entry* entry, void* data, void* cb_arg) 307 { 308 struct lruhash_bin* bin; 309 struct lruhash_entry* found, *reclaimlist=NULL; 310 size_t need_size; 311 size_t collisions; 312 fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc)); 313 fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc)); 314 fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc)); 315 fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc)); 316 fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc)); 317 need_size = table->sizefunc(entry->key, data); 318 if(cb_arg == NULL) cb_arg = table->cb_arg; 319 320 /* find bin */ 321 lock_quick_lock(&table->lock); 322 bin = &table->array[hash & table->size_mask]; 323 lock_quick_lock(&bin->lock); 324 325 /* see if entry exists already */ 326 if(!(found=bin_find_entry(table, bin, hash, entry->key, &collisions))) { 327 /* if not: add to bin */ 328 entry->overflow_next = bin->overflow_list; 329 bin->overflow_list = entry; 330 lru_front(table, entry); 331 table->num++; 332 if (table->max_collisions < collisions) 333 table->max_collisions = collisions; 334 table->space_used += need_size; 335 } else { 336 /* if so: update data - needs a writelock */ 337 table->space_used += need_size - 338 (*table->sizefunc)(found->key, found->data); 339 (*table->delkeyfunc)(entry->key, cb_arg); 340 lru_touch(table, found); 341 lock_rw_wrlock(&found->lock); 342 (*table->deldatafunc)(found->data, cb_arg); 343 found->data = data; 344 lock_rw_unlock(&found->lock); 345 } 346 lock_quick_unlock(&bin->lock); 347 if(table->space_used > table->space_max) 348 reclaim_space(table, &reclaimlist); 349 if(table->num >= table->size) 350 table_grow(table); 351 lock_quick_unlock(&table->lock); 352 353 /* finish reclaim if any (outside of critical region) */ 354 while(reclaimlist) { 355 struct lruhash_entry* n = reclaimlist->overflow_next; 356 void* d = reclaimlist->data; 357 (*table->delkeyfunc)(reclaimlist->key, cb_arg); 358 (*table->deldatafunc)(d, cb_arg); 359 reclaimlist = n; 360 } 361 } 362 363 struct lruhash_entry* 364 lruhash_lookup(struct lruhash* table, hashvalue_type hash, void* key, int wr) 365 { 366 struct lruhash_entry* entry; 367 struct lruhash_bin* bin; 368 fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc)); 369 370 lock_quick_lock(&table->lock); 371 bin = &table->array[hash & table->size_mask]; 372 lock_quick_lock(&bin->lock); 373 if((entry=bin_find_entry(table, bin, hash, key, NULL))) 374 lru_touch(table, entry); 375 lock_quick_unlock(&table->lock); 376 377 if(entry) { 378 if(wr) { lock_rw_wrlock(&entry->lock); } 379 else { lock_rw_rdlock(&entry->lock); } 380 } 381 lock_quick_unlock(&bin->lock); 382 return entry; 383 } 384 385 void 386 lruhash_remove(struct lruhash* table, hashvalue_type hash, void* key) 387 { 388 struct lruhash_entry* entry; 389 struct lruhash_bin* bin; 390 void *d; 391 fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc)); 392 fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc)); 393 fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc)); 394 fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc)); 395 fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc)); 396 397 lock_quick_lock(&table->lock); 398 bin = &table->array[hash & table->size_mask]; 399 lock_quick_lock(&bin->lock); 400 if((entry=bin_find_entry(table, bin, hash, key, NULL))) { 401 bin_overflow_remove(bin, entry); 402 lru_remove(table, entry); 403 } else { 404 lock_quick_unlock(&table->lock); 405 lock_quick_unlock(&bin->lock); 406 return; 407 } 408 table->num--; 409 table->space_used -= (*table->sizefunc)(entry->key, entry->data); 410 lock_rw_wrlock(&entry->lock); 411 if(table->markdelfunc) 412 (*table->markdelfunc)(entry->key); 413 lock_rw_unlock(&entry->lock); 414 lock_quick_unlock(&bin->lock); 415 lock_quick_unlock(&table->lock); 416 /* finish removal */ 417 d = entry->data; 418 (*table->delkeyfunc)(entry->key, table->cb_arg); 419 (*table->deldatafunc)(d, table->cb_arg); 420 } 421 422 /** clear bin, respecting locks, does not do space, LRU */ 423 static void 424 bin_clear(struct lruhash* table, struct lruhash_bin* bin) 425 { 426 struct lruhash_entry* p, *np; 427 void *d; 428 lock_quick_lock(&bin->lock); 429 p = bin->overflow_list; 430 while(p) { 431 lock_rw_wrlock(&p->lock); 432 np = p->overflow_next; 433 d = p->data; 434 if(table->markdelfunc) 435 (*table->markdelfunc)(p->key); 436 lock_rw_unlock(&p->lock); 437 (*table->delkeyfunc)(p->key, table->cb_arg); 438 (*table->deldatafunc)(d, table->cb_arg); 439 p = np; 440 } 441 bin->overflow_list = NULL; 442 lock_quick_unlock(&bin->lock); 443 } 444 445 void 446 lruhash_clear(struct lruhash* table) 447 { 448 size_t i; 449 if(!table) 450 return; 451 fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc)); 452 fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc)); 453 fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc)); 454 455 lock_quick_lock(&table->lock); 456 for(i=0; i<table->size; i++) { 457 bin_clear(table, &table->array[i]); 458 } 459 table->lru_start = NULL; 460 table->lru_end = NULL; 461 table->num = 0; 462 table->space_used = 0; 463 lock_quick_unlock(&table->lock); 464 } 465 466 void 467 lruhash_status(struct lruhash* table, const char* id, int extended) 468 { 469 lock_quick_lock(&table->lock); 470 log_info("%s: %u entries, memory %u / %u", 471 id, (unsigned)table->num, (unsigned)table->space_used, 472 (unsigned)table->space_max); 473 log_info(" itemsize %u, array %u, mask %d", 474 (unsigned)(table->num? table->space_used/table->num : 0), 475 (unsigned)table->size, table->size_mask); 476 if(extended) { 477 size_t i; 478 int min=(int)table->size*2, max=-2; 479 for(i=0; i<table->size; i++) { 480 int here = 0; 481 struct lruhash_entry *en; 482 lock_quick_lock(&table->array[i].lock); 483 en = table->array[i].overflow_list; 484 while(en) { 485 here ++; 486 en = en->overflow_next; 487 } 488 lock_quick_unlock(&table->array[i].lock); 489 if(extended >= 2) 490 log_info("bin[%d] %d", (int)i, here); 491 if(here > max) max = here; 492 if(here < min) min = here; 493 } 494 log_info(" bin min %d, avg %.2lf, max %d", min, 495 (double)table->num/(double)table->size, max); 496 } 497 lock_quick_unlock(&table->lock); 498 } 499 500 size_t 501 lruhash_get_mem(struct lruhash* table) 502 { 503 size_t s; 504 lock_quick_lock(&table->lock); 505 s = sizeof(struct lruhash) + table->space_used; 506 #ifdef USE_THREAD_DEBUG 507 if(table->size != 0) { 508 size_t i; 509 for(i=0; i<table->size; i++) 510 s += sizeof(struct lruhash_bin) + 511 lock_get_mem(&table->array[i].lock); 512 } 513 #else /* no THREAD_DEBUG */ 514 if(table->size != 0) 515 s += (table->size)*(sizeof(struct lruhash_bin) + 516 lock_get_mem(&table->array[0].lock)); 517 #endif 518 lock_quick_unlock(&table->lock); 519 s += lock_get_mem(&table->lock); 520 return s; 521 } 522 523 void 524 lruhash_setmarkdel(struct lruhash* table, lruhash_markdelfunc_type md) 525 { 526 lock_quick_lock(&table->lock); 527 table->markdelfunc = md; 528 lock_quick_unlock(&table->lock); 529 } 530 531 void 532 lruhash_update_space_used(struct lruhash* table, void* cb_arg, int diff_size) 533 { 534 struct lruhash_entry *reclaimlist = NULL; 535 536 fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc)); 537 fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc)); 538 fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc)); 539 fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc)); 540 541 if(cb_arg == NULL) cb_arg = table->cb_arg; 542 543 /* update space used */ 544 lock_quick_lock(&table->lock); 545 546 if((int)table->space_used + diff_size < 0) 547 table->space_used = 0; 548 else table->space_used = (size_t)((int)table->space_used + diff_size); 549 550 if(table->space_used > table->space_max) 551 reclaim_space(table, &reclaimlist); 552 553 lock_quick_unlock(&table->lock); 554 555 /* finish reclaim if any (outside of critical region) */ 556 while(reclaimlist) { 557 struct lruhash_entry* n = reclaimlist->overflow_next; 558 void* d = reclaimlist->data; 559 (*table->delkeyfunc)(reclaimlist->key, cb_arg); 560 (*table->deldatafunc)(d, cb_arg); 561 reclaimlist = n; 562 } 563 } 564 565 void lruhash_update_space_max(struct lruhash* table, void* cb_arg, size_t max) 566 { 567 struct lruhash_entry *reclaimlist = NULL; 568 569 fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc)); 570 fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc)); 571 fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc)); 572 fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc)); 573 574 if(cb_arg == NULL) cb_arg = table->cb_arg; 575 576 /* update space max */ 577 lock_quick_lock(&table->lock); 578 table->space_max = max; 579 580 if(table->space_used > table->space_max) 581 reclaim_space(table, &reclaimlist); 582 583 lock_quick_unlock(&table->lock); 584 585 /* finish reclaim if any (outside of critical region) */ 586 while(reclaimlist) { 587 struct lruhash_entry* n = reclaimlist->overflow_next; 588 void* d = reclaimlist->data; 589 (*table->delkeyfunc)(reclaimlist->key, cb_arg); 590 (*table->deldatafunc)(d, cb_arg); 591 reclaimlist = n; 592 } 593 } 594 595 void 596 lruhash_traverse(struct lruhash* h, int wr, 597 void (*func)(struct lruhash_entry*, void*), void* arg) 598 { 599 size_t i; 600 struct lruhash_entry* e; 601 602 lock_quick_lock(&h->lock); 603 for(i=0; i<h->size; i++) { 604 lock_quick_lock(&h->array[i].lock); 605 for(e = h->array[i].overflow_list; e; e = e->overflow_next) { 606 if(wr) { 607 lock_rw_wrlock(&e->lock); 608 } else { 609 lock_rw_rdlock(&e->lock); 610 } 611 (*func)(e, arg); 612 lock_rw_unlock(&e->lock); 613 } 614 lock_quick_unlock(&h->array[i].lock); 615 } 616 lock_quick_unlock(&h->lock); 617 } 618 619 /* 620 * Demote: the opposite of touch, move an entry to the bottom 621 * of the LRU pile. 622 */ 623 624 void 625 lru_demote(struct lruhash* table, struct lruhash_entry* entry) 626 { 627 log_assert(table && entry); 628 if (entry == table->lru_end) 629 return; /* nothing to do */ 630 /* remove from current lru position */ 631 lru_remove(table, entry); 632 /* add at end */ 633 entry->lru_next = NULL; 634 entry->lru_prev = table->lru_end; 635 636 if (table->lru_end == NULL) 637 { 638 table->lru_start = entry; 639 } 640 else 641 { 642 table->lru_end->lru_next = entry; 643 } 644 table->lru_end = entry; 645 } 646 647 struct lruhash_entry* 648 lruhash_insert_or_retrieve(struct lruhash* table, hashvalue_type hash, 649 struct lruhash_entry* entry, void* data, void* cb_arg) 650 { 651 struct lruhash_bin* bin; 652 struct lruhash_entry* found, *reclaimlist = NULL; 653 size_t need_size; 654 size_t collisions; 655 fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc)); 656 fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc)); 657 fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc)); 658 fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc)); 659 fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc)); 660 need_size = table->sizefunc(entry->key, data); 661 if (cb_arg == NULL) cb_arg = table->cb_arg; 662 663 /* find bin */ 664 lock_quick_lock(&table->lock); 665 bin = &table->array[hash & table->size_mask]; 666 lock_quick_lock(&bin->lock); 667 668 /* see if entry exists already */ 669 if ((found = bin_find_entry(table, bin, hash, entry->key, &collisions)) != NULL) { 670 /* if so: keep the existing data - acquire a writelock */ 671 lock_rw_wrlock(&found->lock); 672 } 673 else 674 { 675 /* if not: add to bin */ 676 entry->overflow_next = bin->overflow_list; 677 bin->overflow_list = entry; 678 lru_front(table, entry); 679 table->num++; 680 if (table->max_collisions < collisions) 681 table->max_collisions = collisions; 682 table->space_used += need_size; 683 /* return the entry that was presented, and lock it */ 684 found = entry; 685 lock_rw_wrlock(&found->lock); 686 } 687 lock_quick_unlock(&bin->lock); 688 if (table->space_used > table->space_max) 689 reclaim_space(table, &reclaimlist); 690 if (table->num >= table->size) 691 table_grow(table); 692 lock_quick_unlock(&table->lock); 693 694 /* finish reclaim if any (outside of critical region) */ 695 while (reclaimlist) { 696 struct lruhash_entry* n = reclaimlist->overflow_next; 697 void* d = reclaimlist->data; 698 (*table->delkeyfunc)(reclaimlist->key, cb_arg); 699 (*table->deldatafunc)(d, cb_arg); 700 reclaimlist = n; 701 } 702 703 /* return the entry that was selected */ 704 return found; 705 } 706 707