Lines Matching +full:key +full:- +full:value

2  * Copyright 2012-2015 Samy Al Bahra.
53 #define CK_HT_BUCKET_MASK (CK_HT_BUCKET_LENGTH - 1)
98 struct ck_ht_map *map = table->map; in ck_ht_stat()
100 st->n_entries = map->n_entries; in ck_ht_stat()
101 st->probe_maximum = map->probe_maximum; in ck_ht_stat()
108 const void *key, in ck_ht_hash() argument
112 table->h(h, key, key_length, table->seed); in ck_ht_hash()
119 uintptr_t key) in ck_ht_hash_direct() argument
122 ck_ht_hash(h, table, &key, sizeof(key)); in ck_ht_hash_direct()
128 const void *key, in ck_ht_hash_wrapper() argument
133 h->value = (unsigned long)MurmurHash64A(key, length, seed); in ck_ht_hash_wrapper()
150 (sizeof(struct ck_ht_entry) * n_entries + CK_MD_CACHELINE - 1); in ck_ht_map_create()
152 if (table->mode & CK_HT_WORKLOAD_DELETE) { in ck_ht_map_create()
159 map = table->m->malloc(size); in ck_ht_map_create()
163 map->mode = table->mode; in ck_ht_map_create()
164 map->size = size; in ck_ht_map_create()
165 map->probe_limit = ck_internal_max_64(n_entries >> in ck_ht_map_create()
168 map->deletions = 0; in ck_ht_map_create()
169 map->probe_maximum = 0; in ck_ht_map_create()
170 map->capacity = n_entries; in ck_ht_map_create()
171 map->step = ck_cc_ffsll(map->capacity); in ck_ht_map_create()
172 map->mask = map->capacity - 1; in ck_ht_map_create()
173 map->n_entries = 0; in ck_ht_map_create()
174 map->entries = (struct ck_ht_entry *)(((uintptr_t)&map[1] + prefix + in ck_ht_map_create()
175 CK_MD_CACHELINE - 1) & ~(CK_MD_CACHELINE - 1)); in ck_ht_map_create()
177 if (table->mode & CK_HT_WORKLOAD_DELETE) { in ck_ht_map_create()
178 map->probe_bound = (CK_HT_WORD *)&map[1]; in ck_ht_map_create()
179 memset(map->probe_bound, 0, prefix); in ck_ht_map_create()
181 map->probe_bound = NULL; in ck_ht_map_create()
184 memset(map->entries, 0, sizeof(struct ck_ht_entry) * n_entries); in ck_ht_map_create()
194 CK_HT_TYPE offset = h.value & m->mask; in ck_ht_map_bound_set()
196 if (n_probes > m->probe_maximum) in ck_ht_map_bound_set()
197 CK_HT_TYPE_STORE(&m->probe_maximum, n_probes); in ck_ht_map_bound_set()
199 if (m->probe_bound != NULL && m->probe_bound[offset] < n_probes) { in ck_ht_map_bound_set()
203 CK_HT_STORE(&m->probe_bound[offset], n_probes); in ck_ht_map_bound_set()
213 CK_HT_TYPE offset = h.value & m->mask; in ck_ht_map_bound_get()
216 if (m->probe_bound != NULL) { in ck_ht_map_bound_get()
217 r = CK_HT_LOAD(&m->probe_bound[offset]); in ck_ht_map_bound_get()
219 r = CK_HT_TYPE_LOAD(&m->probe_maximum); in ck_ht_map_bound_get()
221 r = CK_HT_TYPE_LOAD(&m->probe_maximum); in ck_ht_map_bound_get()
231 m->free(map, map->size, defer); in ck_ht_map_destroy()
242 r.value = (h.value >> map->step) >> level; in ck_ht_map_probe_next()
243 stride = (r.value & ~CK_HT_BUCKET_MASK) << 1 in ck_ht_map_probe_next()
244 | (r.value & CK_HT_BUCKET_MASK); in ck_ht_map_probe_next()
247 (stride | CK_HT_BUCKET_LENGTH)) & map->mask; in ck_ht_map_probe_next()
259 if (m == NULL || m->malloc == NULL || m->free == NULL) in ck_ht_init()
262 table->m = m; in ck_ht_init()
263 table->mode = mode; in ck_ht_init()
264 table->seed = seed; in ck_ht_init()
267 table->h = ck_ht_hash_wrapper; in ck_ht_init()
269 table->h = h; in ck_ht_init()
272 table->map = ck_ht_map_create(table, entries); in ck_ht_init()
273 return table->map != NULL; in ck_ht_init()
281 const void *key, in ck_ht_map_probe_wr() argument
298 offset = h.value & map->mask; in ck_ht_map_probe_wr()
299 for (i = 0; i < map->probe_limit; i++) { in ck_ht_map_probe_wr()
305 bucket = (void *)((uintptr_t)(map->entries + offset) & in ck_ht_map_probe_wr()
306 ~(CK_MD_CACHELINE - 1)); in ck_ht_map_probe_wr()
314 cursor = bucket + ((j + offset) & (CK_HT_BUCKET_LENGTH - 1)); in ck_ht_map_probe_wr()
321 if (cursor->key == CK_HT_KEY_TOMBSTONE) { in ck_ht_map_probe_wr()
330 if (cursor->key == CK_HT_KEY_EMPTY) in ck_ht_map_probe_wr()
333 if (cursor->key == (uintptr_t)key) in ck_ht_map_probe_wr()
336 if (map->mode & CK_HT_MODE_BYTESTRING) { in ck_ht_map_probe_wr()
340 * Check memoized portion of hash value before in ck_ht_map_probe_wr()
341 * expensive full-length comparison. in ck_ht_map_probe_wr()
348 if ((cursor->value >> CK_MD_VMA_BITS) != ((h.value >> 32) & CK_HT_KEY_MASK)) in ck_ht_map_probe_wr()
351 if (cursor->hash != h.value) in ck_ht_map_probe_wr()
356 if (memcmp(pointer, key, key_length) == 0) in ck_ht_map_probe_wr()
386 struct ck_ht_map *map = ht->map; in ck_ht_gc()
390 if (map->n_entries == 0) { in ck_ht_gc()
391 CK_HT_TYPE_STORE(&map->probe_maximum, 0); in ck_ht_gc()
392 if (map->probe_bound != NULL) in ck_ht_gc()
393 memset(map->probe_bound, 0, sizeof(CK_HT_WORD) * map->capacity); in ck_ht_gc()
401 if (map->probe_bound != NULL) { in ck_ht_gc()
402 size = sizeof(CK_HT_WORD) * map->capacity; in ck_ht_gc()
403 bounds = ht->m->malloc(size); in ck_ht_gc()
410 maximum = map->probe_maximum; in ck_ht_gc()
413 for (i = 0; i < map->capacity; i++) { in ck_ht_gc()
419 entry = &map->entries[(i + seed) & map->mask]; in ck_ht_gc()
420 if (entry->key == CK_HT_KEY_EMPTY || in ck_ht_gc()
421 entry->key == CK_HT_KEY_TOMBSTONE) { in ck_ht_gc()
425 if (ht->mode & CK_HT_MODE_BYTESTRING) { in ck_ht_gc()
427 h.value = entry->hash; in ck_ht_gc()
429 ht->h(&h, ck_ht_entry_key(entry), ck_ht_entry_key_length(entry), in ck_ht_gc()
430 ht->seed); in ck_ht_gc()
438 h.value = entry->hash; in ck_ht_gc()
440 ht->h(&h, &entry->key, sizeof(entry->key), ht->seed); in ck_ht_gc()
443 (void *)entry->key, in ck_ht_gc()
444 sizeof(entry->key), in ck_ht_gc()
448 offset = h.value & map->mask; in ck_ht_gc()
451 CK_HT_TYPE_STORE(&map->deletions, map->deletions + 1); in ck_ht_gc()
454 CK_HT_TYPE_STORE(&priority->key_length, entry->key_length); in ck_ht_gc()
455 CK_HT_TYPE_STORE(&priority->hash, entry->hash); in ck_ht_gc()
457 ck_pr_store_ptr_unsafe(&priority->value, (void *)entry->value); in ck_ht_gc()
459 ck_pr_store_ptr_unsafe(&priority->key, (void *)entry->key); in ck_ht_gc()
461 CK_HT_TYPE_STORE(&map->deletions, map->deletions + 1); in ck_ht_gc()
463 ck_pr_store_ptr_unsafe(&entry->key, (void *)CK_HT_KEY_TOMBSTONE); in ck_ht_gc()
476 } else if (--cycles == 0) in ck_ht_gc()
480 if (maximum != map->probe_maximum) in ck_ht_gc()
481 CK_HT_TYPE_STORE(&map->probe_maximum, maximum); in ck_ht_gc()
484 for (i = 0; i < map->capacity; i++) in ck_ht_gc()
485 CK_HT_STORE(&map->probe_bound[i], bounds[i]); in ck_ht_gc()
487 ht->m->free(bounds, size, false); in ck_ht_gc()
497 const void *key, in ck_ht_map_probe_rd() argument
512 offset = h.value & map->mask; in ck_ht_map_probe_rd()
514 for (i = 0; i < map->probe_limit; i++) { in ck_ht_map_probe_rd()
520 bucket = (void *)((uintptr_t)(map->entries + offset) & in ck_ht_map_probe_rd()
521 ~(CK_MD_CACHELINE - 1)); in ck_ht_map_probe_rd()
529 cursor = bucket + ((j + offset) & (CK_HT_BUCKET_LENGTH - 1)); in ck_ht_map_probe_rd()
532 snapshot->key = (uintptr_t)ck_pr_load_ptr(&cursor->key); in ck_ht_map_probe_rd()
534 snapshot->value = (uintptr_t)ck_pr_load_ptr(&cursor->value); in ck_ht_map_probe_rd()
536 d = CK_HT_TYPE_LOAD(&map->deletions); in ck_ht_map_probe_rd()
537 snapshot->key = (uintptr_t)ck_pr_load_ptr(&cursor->key); in ck_ht_map_probe_rd()
539 snapshot->key_length = CK_HT_TYPE_LOAD(&cursor->key_length); in ck_ht_map_probe_rd()
540 snapshot->hash = CK_HT_TYPE_LOAD(&cursor->hash); in ck_ht_map_probe_rd()
541 snapshot->value = (uintptr_t)ck_pr_load_ptr(&cursor->value); in ck_ht_map_probe_rd()
549 if (snapshot->key == CK_HT_KEY_TOMBSTONE) in ck_ht_map_probe_rd()
552 if (snapshot->key == CK_HT_KEY_EMPTY) in ck_ht_map_probe_rd()
555 if (snapshot->key == (uintptr_t)key) in ck_ht_map_probe_rd()
558 if (map->mode & CK_HT_MODE_BYTESTRING) { in ck_ht_map_probe_rd()
562 * Check memoized portion of hash value before in ck_ht_map_probe_rd()
563 * expensive full-length comparison. in ck_ht_map_probe_rd()
569 if ((snapshot->value >> CK_MD_VMA_BITS) != ((h.value >> 32) & CK_HT_KEY_MASK)) in ck_ht_map_probe_rd()
572 if (snapshot->hash != h.value) in ck_ht_map_probe_rd()
575 d_prime = CK_HT_TYPE_LOAD(&map->deletions); in ck_ht_map_probe_rd()
579 * replaced, initiate a re-probe. in ck_ht_map_probe_rd()
586 if (memcmp(pointer, key, key_length) == 0) in ck_ht_map_probe_rd()
603 struct ck_ht_map *map = ck_pr_load_ptr(&table->map); in ck_ht_count()
605 return CK_HT_TYPE_LOAD(&map->n_entries); in ck_ht_count()
613 struct ck_ht_map *map = table->map; in ck_ht_next()
614 uintptr_t key; in ck_ht_next() local
616 if (i->offset >= map->capacity) in ck_ht_next()
620 key = map->entries[i->offset].key; in ck_ht_next()
621 if (key != CK_HT_KEY_EMPTY && key != CK_HT_KEY_TOMBSTONE) in ck_ht_next()
623 } while (++i->offset < map->capacity); in ck_ht_next()
625 if (i->offset >= map->capacity) in ck_ht_next()
628 *entry = map->entries + i->offset++; in ck_ht_next()
637 map = table->map; in ck_ht_reset_size_spmc()
642 ck_pr_store_ptr_unsafe(&table->map, update); in ck_ht_reset_size_spmc()
643 ck_ht_map_destroy(table->m, map, true); in ck_ht_reset_size_spmc()
650 struct ck_ht_map *map = table->map; in ck_ht_reset_spmc()
652 return ck_ht_reset_size_spmc(table, map->capacity); in ck_ht_reset_spmc()
665 map = table->map; in ck_ht_grow_spmc()
667 if (map->capacity >= capacity) in ck_ht_grow_spmc()
674 for (k = 0; k < map->capacity; k++) { in ck_ht_grow_spmc()
675 previous = &map->entries[k]; in ck_ht_grow_spmc()
677 if (previous->key == CK_HT_KEY_EMPTY || previous->key == CK_HT_KEY_TOMBSTONE) in ck_ht_grow_spmc()
680 if (table->mode & CK_HT_MODE_BYTESTRING) { in ck_ht_grow_spmc()
682 void *key; in ck_ht_grow_spmc() local
685 key = ck_ht_entry_key(previous); in ck_ht_grow_spmc()
690 h.value = previous->hash; in ck_ht_grow_spmc()
692 table->h(&h, key, key_length, table->seed); in ck_ht_grow_spmc()
696 h.value = previous->hash; in ck_ht_grow_spmc()
698 table->h(&h, &previous->key, sizeof(previous->key), table->seed); in ck_ht_grow_spmc()
702 offset = h.value & update->mask; in ck_ht_grow_spmc()
705 for (i = 0; i < update->probe_limit; i++) { in ck_ht_grow_spmc()
706 bucket = (void *)((uintptr_t)(update->entries + offset) & ~(CK_MD_CACHELINE - 1)); in ck_ht_grow_spmc()
709 struct ck_ht_entry *cursor = bucket + ((j + offset) & (CK_HT_BUCKET_LENGTH - 1)); in ck_ht_grow_spmc()
712 if (CK_CC_LIKELY(cursor->key == CK_HT_KEY_EMPTY)) { in ck_ht_grow_spmc()
714 update->n_entries++; in ck_ht_grow_spmc()
726 if (i == update->probe_limit) { in ck_ht_grow_spmc()
731 ck_ht_map_destroy(table->m, update, false); in ck_ht_grow_spmc()
738 ck_pr_store_ptr_unsafe(&table->map, update); in ck_ht_grow_spmc()
739 ck_ht_map_destroy(table->m, map, true); in ck_ht_grow_spmc()
751 map = table->map; in ck_ht_remove_spmc()
753 if (table->mode & CK_HT_MODE_BYTESTRING) { in ck_ht_remove_spmc()
759 (void *)entry->key, in ck_ht_remove_spmc()
760 sizeof(entry->key)); in ck_ht_remove_spmc()
764 if (candidate == NULL || snapshot.key == CK_HT_KEY_EMPTY) in ck_ht_remove_spmc()
769 ck_pr_store_ptr_unsafe(&candidate->key, (void *)CK_HT_KEY_TOMBSTONE); in ck_ht_remove_spmc()
771 CK_HT_TYPE_STORE(&map->n_entries, map->n_entries - 1); in ck_ht_remove_spmc()
785 map = ck_pr_load_ptr(&table->map); in ck_ht_get_spmc()
788 * Platforms that cannot read key and key_length atomically must reprobe in ck_ht_get_spmc()
791 d = CK_HT_TYPE_LOAD(&map->deletions); in ck_ht_get_spmc()
793 if (table->mode & CK_HT_MODE_BYTESTRING) { in ck_ht_get_spmc()
798 (void *)entry->key, sizeof(entry->key)); in ck_ht_get_spmc()
801 d_prime = CK_HT_TYPE_LOAD(&map->deletions); in ck_ht_get_spmc()
811 if (candidate == NULL || snapshot.key == CK_HT_KEY_EMPTY) in ck_ht_get_spmc()
829 map = table->map; in ck_ht_set_spmc()
831 if (table->mode & CK_HT_MODE_BYTESTRING) { in ck_ht_set_spmc()
838 (void *)entry->key, in ck_ht_set_spmc()
839 sizeof(entry->key), in ck_ht_set_spmc()
851 if (ck_ht_grow_spmc(table, map->capacity << 1) == false) in ck_ht_set_spmc()
860 if (candidate->key != CK_HT_KEY_EMPTY && in ck_ht_set_spmc()
864 * We avoid a state of (K, B) (where [K, B] -> [K', B]) by in ck_ht_set_spmc()
872 CK_HT_TYPE_STORE(&priority->key_length, entry->key_length); in ck_ht_set_spmc()
873 CK_HT_TYPE_STORE(&priority->hash, entry->hash); in ck_ht_set_spmc()
878 * observe re-use. If they observe re-use, it is at most in ck_ht_set_spmc()
881 if (priority->value == CK_HT_KEY_TOMBSTONE) { in ck_ht_set_spmc()
882 CK_HT_TYPE_STORE(&map->deletions, map->deletions + 1); in ck_ht_set_spmc()
886 ck_pr_store_ptr_unsafe(&priority->value, (void *)entry->value); in ck_ht_set_spmc()
888 ck_pr_store_ptr_unsafe(&priority->key, (void *)entry->key); in ck_ht_set_spmc()
895 CK_HT_TYPE_STORE(&map->deletions, map->deletions + 1); in ck_ht_set_spmc()
898 ck_pr_store_ptr_unsafe(&candidate->key, (void *)CK_HT_KEY_TOMBSTONE); in ck_ht_set_spmc()
907 bool replace = candidate->key != CK_HT_KEY_EMPTY && in ck_ht_set_spmc()
908 candidate->key != CK_HT_KEY_TOMBSTONE; in ck_ht_set_spmc()
911 if (priority->key == CK_HT_KEY_TOMBSTONE) { in ck_ht_set_spmc()
912 CK_HT_TYPE_STORE(&map->deletions, map->deletions + 1); in ck_ht_set_spmc()
921 ck_pr_store_ptr_unsafe(&candidate->value, (void *)entry->value); in ck_ht_set_spmc()
923 ck_pr_store_ptr_unsafe(&candidate->key, (void *)entry->key); in ck_ht_set_spmc()
925 CK_HT_TYPE_STORE(&candidate->key_length, entry->key_length); in ck_ht_set_spmc()
926 CK_HT_TYPE_STORE(&candidate->hash, entry->hash); in ck_ht_set_spmc()
927 ck_pr_store_ptr_unsafe(&candidate->value, (void *)entry->value); in ck_ht_set_spmc()
929 ck_pr_store_ptr_unsafe(&candidate->key, (void *)entry->key); in ck_ht_set_spmc()
937 CK_HT_TYPE_STORE(&map->n_entries, map->n_entries + 1); in ck_ht_set_spmc()
943 if (map->n_entries * 2 > map->capacity) in ck_ht_set_spmc()
944 ck_ht_grow_spmc(table, map->capacity << 1); in ck_ht_set_spmc()
947 entry->key = CK_HT_KEY_EMPTY; in ck_ht_set_spmc()
965 map = table->map; in ck_ht_put_spmc()
967 if (table->mode & CK_HT_MODE_BYTESTRING) { in ck_ht_put_spmc()
974 (void *)entry->key, in ck_ht_put_spmc()
975 sizeof(entry->key), in ck_ht_put_spmc()
982 if (ck_ht_grow_spmc(table, map->capacity << 1) == false) in ck_ht_put_spmc()
987 /* Version counter is updated before re-use. */ in ck_ht_put_spmc()
988 CK_HT_TYPE_STORE(&map->deletions, map->deletions + 1); in ck_ht_put_spmc()
991 /* Re-use tombstone if one was found. */ in ck_ht_put_spmc()
994 } else if (candidate->key != CK_HT_KEY_EMPTY && in ck_ht_put_spmc()
995 candidate->key != CK_HT_KEY_TOMBSTONE) { in ck_ht_put_spmc()
997 * If the snapshot key is non-empty and the value field is not in ck_ht_put_spmc()
998 * a tombstone then an identical key was found. As store does in ck_ht_put_spmc()
1007 ck_pr_store_ptr_unsafe(&candidate->value, (void *)entry->value); in ck_ht_put_spmc()
1009 ck_pr_store_ptr_unsafe(&candidate->key, (void *)entry->key); in ck_ht_put_spmc()
1011 CK_HT_TYPE_STORE(&candidate->key_length, entry->key_length); in ck_ht_put_spmc()
1012 CK_HT_TYPE_STORE(&candidate->hash, entry->hash); in ck_ht_put_spmc()
1013 ck_pr_store_ptr_unsafe(&candidate->value, (void *)entry->value); in ck_ht_put_spmc()
1015 ck_pr_store_ptr_unsafe(&candidate->key, (void *)entry->key); in ck_ht_put_spmc()
1018 CK_HT_TYPE_STORE(&map->n_entries, map->n_entries + 1); in ck_ht_put_spmc()
1021 if (map->n_entries * 2 > map->capacity) in ck_ht_put_spmc()
1022 ck_ht_grow_spmc(table, map->capacity << 1); in ck_ht_put_spmc()
1031 ck_ht_map_destroy(table->m, table->map, false); in ck_ht_destroy()