1 /* 2 ******************************************************************************* 3 * Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each 4 * hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash 5 * functions are employed. The original cuckoo hashing algorithm was described 6 * in: 7 * 8 * Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms 9 * 51(2):122-144. 10 * 11 * Generalization of cuckoo hashing was discussed in: 12 * 13 * Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical 14 * alternative to traditional hash tables. In Proceedings of the 7th 15 * Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA, 16 * January 2006. 17 * 18 * This implementation uses precisely two hash functions because that is the 19 * fewest that can work, and supporting multiple hashes is an implementation 20 * burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006) 21 * that shows approximate expected maximum load factors for various 22 * configurations: 23 * 24 * | #cells/bucket | 25 * #hashes | 1 | 2 | 4 | 8 | 26 * --------+-------+-------+-------+-------+ 27 * 1 | 0.006 | 0.006 | 0.03 | 0.12 | 28 * 2 | 0.49 | 0.86 |>0.93< |>0.96< | 29 * 3 | 0.91 | 0.97 | 0.98 | 0.999 | 30 * 4 | 0.97 | 0.99 | 0.999 | | 31 * 32 * The number of cells per bucket is chosen such that a bucket fits in one cache 33 * line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing, 34 * respectively. 35 * 36 ******************************************************************************/ 37 #define JEMALLOC_CKH_C_ 38 #include "jemalloc/internal/jemalloc_internal.h" 39 40 /******************************************************************************/ 41 /* Function prototypes for non-inline static functions. */ 42 43 static bool ckh_grow(ckh_t *ckh); 44 static void ckh_shrink(ckh_t *ckh); 45 46 /******************************************************************************/ 47 48 /* 49 * Search bucket for key and return the cell number if found; SIZE_T_MAX 50 * otherwise. 51 */ 52 JEMALLOC_INLINE_C size_t 53 ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) 54 { 55 ckhc_t *cell; 56 unsigned i; 57 58 for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { 59 cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; 60 if (cell->key != NULL && ckh->keycomp(key, cell->key)) 61 return ((bucket << LG_CKH_BUCKET_CELLS) + i); 62 } 63 64 return (SIZE_T_MAX); 65 } 66 67 /* 68 * Search table for key and return cell number if found; SIZE_T_MAX otherwise. 69 */ 70 JEMALLOC_INLINE_C size_t 71 ckh_isearch(ckh_t *ckh, const void *key) 72 { 73 size_t hashes[2], bucket, cell; 74 75 assert(ckh != NULL); 76 77 ckh->hash(key, hashes); 78 79 /* Search primary bucket. */ 80 bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); 81 cell = ckh_bucket_search(ckh, bucket, key); 82 if (cell != SIZE_T_MAX) 83 return (cell); 84 85 /* Search secondary bucket. */ 86 bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); 87 cell = ckh_bucket_search(ckh, bucket, key); 88 return (cell); 89 } 90 91 JEMALLOC_INLINE_C bool 92 ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, 93 const void *data) 94 { 95 ckhc_t *cell; 96 unsigned offset, i; 97 98 /* 99 * Cycle through the cells in the bucket, starting at a random position. 100 * The randomness avoids worst-case search overhead as buckets fill up. 101 */ 102 prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); 103 for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { 104 cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + 105 ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; 106 if (cell->key == NULL) { 107 cell->key = key; 108 cell->data = data; 109 ckh->count++; 110 return (false); 111 } 112 } 113 114 return (true); 115 } 116 117 /* 118 * No space is available in bucket. Randomly evict an item, then try to find an 119 * alternate location for that item. Iteratively repeat this 120 * eviction/relocation procedure until either success or detection of an 121 * eviction/relocation bucket cycle. 122 */ 123 JEMALLOC_INLINE_C bool 124 ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, 125 void const **argdata) 126 { 127 const void *key, *data, *tkey, *tdata; 128 ckhc_t *cell; 129 size_t hashes[2], bucket, tbucket; 130 unsigned i; 131 132 bucket = argbucket; 133 key = *argkey; 134 data = *argdata; 135 while (true) { 136 /* 137 * Choose a random item within the bucket to evict. This is 138 * critical to correct function, because without (eventually) 139 * evicting all items within a bucket during iteration, it 140 * would be possible to get stuck in an infinite loop if there 141 * were an item for which both hashes indicated the same 142 * bucket. 143 */ 144 prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); 145 cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; 146 assert(cell->key != NULL); 147 148 /* Swap cell->{key,data} and {key,data} (evict). */ 149 tkey = cell->key; tdata = cell->data; 150 cell->key = key; cell->data = data; 151 key = tkey; data = tdata; 152 153 #ifdef CKH_COUNT 154 ckh->nrelocs++; 155 #endif 156 157 /* Find the alternate bucket for the evicted item. */ 158 ckh->hash(key, hashes); 159 tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); 160 if (tbucket == bucket) { 161 tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) 162 - 1); 163 /* 164 * It may be that (tbucket == bucket) still, if the 165 * item's hashes both indicate this bucket. However, 166 * we are guaranteed to eventually escape this bucket 167 * during iteration, assuming pseudo-random item 168 * selection (true randomness would make infinite 169 * looping a remote possibility). The reason we can 170 * never get trapped forever is that there are two 171 * cases: 172 * 173 * 1) This bucket == argbucket, so we will quickly 174 * detect an eviction cycle and terminate. 175 * 2) An item was evicted to this bucket from another, 176 * which means that at least one item in this bucket 177 * has hashes that indicate distinct buckets. 178 */ 179 } 180 /* Check for a cycle. */ 181 if (tbucket == argbucket) { 182 *argkey = key; 183 *argdata = data; 184 return (true); 185 } 186 187 bucket = tbucket; 188 if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) 189 return (false); 190 } 191 } 192 193 JEMALLOC_INLINE_C bool 194 ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) 195 { 196 size_t hashes[2], bucket; 197 const void *key = *argkey; 198 const void *data = *argdata; 199 200 ckh->hash(key, hashes); 201 202 /* Try to insert in primary bucket. */ 203 bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); 204 if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) 205 return (false); 206 207 /* Try to insert in secondary bucket. */ 208 bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); 209 if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) 210 return (false); 211 212 /* 213 * Try to find a place for this item via iterative eviction/relocation. 214 */ 215 return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata)); 216 } 217 218 /* 219 * Try to rebuild the hash table from scratch by inserting all items from the 220 * old table into the new. 221 */ 222 JEMALLOC_INLINE_C bool 223 ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) 224 { 225 size_t count, i, nins; 226 const void *key, *data; 227 228 count = ckh->count; 229 ckh->count = 0; 230 for (i = nins = 0; nins < count; i++) { 231 if (aTab[i].key != NULL) { 232 key = aTab[i].key; 233 data = aTab[i].data; 234 if (ckh_try_insert(ckh, &key, &data)) { 235 ckh->count = count; 236 return (true); 237 } 238 nins++; 239 } 240 } 241 242 return (false); 243 } 244 245 static bool 246 ckh_grow(ckh_t *ckh) 247 { 248 bool ret; 249 ckhc_t *tab, *ttab; 250 size_t lg_curcells; 251 unsigned lg_prevbuckets; 252 253 #ifdef CKH_COUNT 254 ckh->ngrows++; 255 #endif 256 257 /* 258 * It is possible (though unlikely, given well behaved hashes) that the 259 * table will have to be doubled more than once in order to create a 260 * usable table. 261 */ 262 lg_prevbuckets = ckh->lg_curbuckets; 263 lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS; 264 while (true) { 265 size_t usize; 266 267 lg_curcells++; 268 usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); 269 if (usize == 0) { 270 ret = true; 271 goto label_return; 272 } 273 tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); 274 if (tab == NULL) { 275 ret = true; 276 goto label_return; 277 } 278 /* Swap in new table. */ 279 ttab = ckh->tab; 280 ckh->tab = tab; 281 tab = ttab; 282 ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; 283 284 if (ckh_rebuild(ckh, tab) == false) { 285 idalloc(tab); 286 break; 287 } 288 289 /* Rebuilding failed, so back out partially rebuilt table. */ 290 idalloc(ckh->tab); 291 ckh->tab = tab; 292 ckh->lg_curbuckets = lg_prevbuckets; 293 } 294 295 ret = false; 296 label_return: 297 return (ret); 298 } 299 300 static void 301 ckh_shrink(ckh_t *ckh) 302 { 303 ckhc_t *tab, *ttab; 304 size_t lg_curcells, usize; 305 unsigned lg_prevbuckets; 306 307 /* 308 * It is possible (though unlikely, given well behaved hashes) that the 309 * table rebuild will fail. 310 */ 311 lg_prevbuckets = ckh->lg_curbuckets; 312 lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; 313 usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); 314 if (usize == 0) 315 return; 316 tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); 317 if (tab == NULL) { 318 /* 319 * An OOM error isn't worth propagating, since it doesn't 320 * prevent this or future operations from proceeding. 321 */ 322 return; 323 } 324 /* Swap in new table. */ 325 ttab = ckh->tab; 326 ckh->tab = tab; 327 tab = ttab; 328 ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; 329 330 if (ckh_rebuild(ckh, tab) == false) { 331 idalloc(tab); 332 #ifdef CKH_COUNT 333 ckh->nshrinks++; 334 #endif 335 return; 336 } 337 338 /* Rebuilding failed, so back out partially rebuilt table. */ 339 idalloc(ckh->tab); 340 ckh->tab = tab; 341 ckh->lg_curbuckets = lg_prevbuckets; 342 #ifdef CKH_COUNT 343 ckh->nshrinkfails++; 344 #endif 345 } 346 347 bool 348 ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp) 349 { 350 bool ret; 351 size_t mincells, usize; 352 unsigned lg_mincells; 353 354 assert(minitems > 0); 355 assert(hash != NULL); 356 assert(keycomp != NULL); 357 358 #ifdef CKH_COUNT 359 ckh->ngrows = 0; 360 ckh->nshrinks = 0; 361 ckh->nshrinkfails = 0; 362 ckh->ninserts = 0; 363 ckh->nrelocs = 0; 364 #endif 365 ckh->prng_state = 42; /* Value doesn't really matter. */ 366 ckh->count = 0; 367 368 /* 369 * Find the minimum power of 2 that is large enough to fit aBaseCount 370 * entries. We are using (2+,2) cuckoo hashing, which has an expected 371 * maximum load factor of at least ~0.86, so 0.75 is a conservative load 372 * factor that will typically allow 2^aLgMinItems to fit without ever 373 * growing the table. 374 */ 375 assert(LG_CKH_BUCKET_CELLS > 0); 376 mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2; 377 for (lg_mincells = LG_CKH_BUCKET_CELLS; 378 (ZU(1) << lg_mincells) < mincells; 379 lg_mincells++) 380 ; /* Do nothing. */ 381 ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; 382 ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; 383 ckh->hash = hash; 384 ckh->keycomp = keycomp; 385 386 usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); 387 if (usize == 0) { 388 ret = true; 389 goto label_return; 390 } 391 ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); 392 if (ckh->tab == NULL) { 393 ret = true; 394 goto label_return; 395 } 396 397 ret = false; 398 label_return: 399 return (ret); 400 } 401 402 void 403 ckh_delete(ckh_t *ckh) 404 { 405 406 assert(ckh != NULL); 407 408 #ifdef CKH_VERBOSE 409 malloc_printf( 410 "%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64"," 411 " nshrinkfails: %"PRIu64", ninserts: %"PRIu64"," 412 " nrelocs: %"PRIu64"\n", __func__, ckh, 413 (unsigned long long)ckh->ngrows, 414 (unsigned long long)ckh->nshrinks, 415 (unsigned long long)ckh->nshrinkfails, 416 (unsigned long long)ckh->ninserts, 417 (unsigned long long)ckh->nrelocs); 418 #endif 419 420 idalloc(ckh->tab); 421 if (config_debug) 422 memset(ckh, 0x5a, sizeof(ckh_t)); 423 } 424 425 size_t 426 ckh_count(ckh_t *ckh) 427 { 428 429 assert(ckh != NULL); 430 431 return (ckh->count); 432 } 433 434 bool 435 ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) 436 { 437 size_t i, ncells; 438 439 for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets + 440 LG_CKH_BUCKET_CELLS)); i < ncells; i++) { 441 if (ckh->tab[i].key != NULL) { 442 if (key != NULL) 443 *key = (void *)ckh->tab[i].key; 444 if (data != NULL) 445 *data = (void *)ckh->tab[i].data; 446 *tabind = i + 1; 447 return (false); 448 } 449 } 450 451 return (true); 452 } 453 454 bool 455 ckh_insert(ckh_t *ckh, const void *key, const void *data) 456 { 457 bool ret; 458 459 assert(ckh != NULL); 460 assert(ckh_search(ckh, key, NULL, NULL)); 461 462 #ifdef CKH_COUNT 463 ckh->ninserts++; 464 #endif 465 466 while (ckh_try_insert(ckh, &key, &data)) { 467 if (ckh_grow(ckh)) { 468 ret = true; 469 goto label_return; 470 } 471 } 472 473 ret = false; 474 label_return: 475 return (ret); 476 } 477 478 bool 479 ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data) 480 { 481 size_t cell; 482 483 assert(ckh != NULL); 484 485 cell = ckh_isearch(ckh, searchkey); 486 if (cell != SIZE_T_MAX) { 487 if (key != NULL) 488 *key = (void *)ckh->tab[cell].key; 489 if (data != NULL) 490 *data = (void *)ckh->tab[cell].data; 491 ckh->tab[cell].key = NULL; 492 ckh->tab[cell].data = NULL; /* Not necessary. */ 493 494 ckh->count--; 495 /* Try to halve the table if it is less than 1/4 full. */ 496 if (ckh->count < (ZU(1) << (ckh->lg_curbuckets 497 + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets 498 > ckh->lg_minbuckets) { 499 /* Ignore error due to OOM. */ 500 ckh_shrink(ckh); 501 } 502 503 return (false); 504 } 505 506 return (true); 507 } 508 509 bool 510 ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) 511 { 512 size_t cell; 513 514 assert(ckh != NULL); 515 516 cell = ckh_isearch(ckh, searchkey); 517 if (cell != SIZE_T_MAX) { 518 if (key != NULL) 519 *key = (void *)ckh->tab[cell].key; 520 if (data != NULL) 521 *data = (void *)ckh->tab[cell].data; 522 return (false); 523 } 524 525 return (true); 526 } 527 528 void 529 ckh_string_hash(const void *key, size_t r_hash[2]) 530 { 531 532 hash(key, strlen((const char *)key), 0x94122f33U, r_hash); 533 } 534 535 bool 536 ckh_string_keycomp(const void *k1, const void *k2) 537 { 538 539 assert(k1 != NULL); 540 assert(k2 != NULL); 541 542 return (strcmp((char *)k1, (char *)k2) ? false : true); 543 } 544 545 void 546 ckh_pointer_hash(const void *key, size_t r_hash[2]) 547 { 548 union { 549 const void *v; 550 size_t i; 551 } u; 552 553 assert(sizeof(u.v) == sizeof(u.i)); 554 u.v = key; 555 hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash); 556 } 557 558 bool 559 ckh_pointer_keycomp(const void *k1, const void *k2) 560 { 561 562 return ((k1 == k2) ? true : false); 563 } 564