1 /*
2 * Copyright 2024-2025 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 *
9 *
10 *
11 * Notes On hash table design and layout
12 * This hashtable uses a hopscotch algorithm to do indexing. The data structure
13 * looks as follows:
14 *
15 * hash +--------------+
16 * value+------->+ HT_VALUE |
17 * + +--------------+
18 * +-------+
19 * | |
20 * +---------------------------------------------------------+
21 * | | | | | |
22 * | entry | entry | entry | entry | |
23 * | | | | | |
24 * +---------------------------------------------------------+
25 * | | |
26 * | | |
27 * +---------------------------------------------------------+
28 * | + + +
29 * | neighborhood[0] neighborhood[1] |
30 * | |
31 * | |
32 * +---------------------------------------------------------+
33 * |
34 * +
35 * neighborhoods
36 *
37 * On lookup/insert/delete, the items key is hashed to a 64 bit value
38 * and the result is masked to provide an index into the neighborhoods
39 * table. Once a neighborhood is determined, an in-order search is done
40 * of the elements in the neighborhood indexes entries for a matching hash
41 * value, if found, the corresponding HT_VALUE is used for the respective
42 * operation. The number of entries in a neighborhood is determined at build
43 * time based on the cacheline size of the target CPU. The intent is for a
44 * neighborhood to have all entries in the neighborhood fit into a single cache
45 * line to speed up lookups. If all entries in a neighborhood are in use at the
46 * time of an insert, the table is expanded and rehashed.
47 *
48 * Lockless reads hash table is based on the same design but does not
49 * allow growing and deletion. Thus subsequent neighborhoods are always
50 * searched for a match until an empty entry is found.
51 */
52
53 #include <string.h>
54 #include <internal/rcu.h>
55 #include <internal/hashtable.h>
56 #include <internal/hashfunc.h>
57 #include <openssl/rand.h>
58
59 /*
60 * gcc defines __SANITIZE_THREAD__
61 * but clang uses the feature attributes api
62 * map the latter to the former
63 */
64 #if defined(__clang__) && defined(__has_feature)
65 # if __has_feature(thread_sanitizer)
66 # define __SANITIZE_THREADS__
67 # endif
68 #endif
69
70 #ifdef __SANITIZE_THREADS__
71 # include <sanitizer/tsan_interface.h>
72 #endif
73
74 #include "internal/numbers.h"
75 /*
76 * When we do a lookup/insert/delete, there is a high likelihood
77 * that we will iterate over at least part of the neighborhood list
78 * As such, because we design a neighborhood entry to fit into a single
79 * cache line it is advantageous, when supported to fetch the entire
80 * structure for faster lookups
81 */
82 #if defined(__GNUC__) || defined(__CLANG__)
83 # define PREFETCH_NEIGHBORHOOD(x) __builtin_prefetch(x.entries)
84 # define PREFETCH(x) __builtin_prefetch(x)
85 #else
86 # define PREFETCH_NEIGHBORHOOD(x)
87 # define PREFETCH(x)
88 #endif
89
90 /*
91 * Define our neighborhood list length
92 * Note: It should always be a power of 2
93 */
94 #define DEFAULT_NEIGH_LEN_LOG 4
95 #define DEFAULT_NEIGH_LEN (1 << DEFAULT_NEIGH_LEN_LOG)
96
97 /*
98 * For now assume cache line size is 64 bytes
99 */
100 #define CACHE_LINE_BYTES 64
101 #define CACHE_LINE_ALIGNMENT CACHE_LINE_BYTES
102
103 #define NEIGHBORHOOD_LEN (CACHE_LINE_BYTES / sizeof(struct ht_neighborhood_entry_st))
104 /*
105 * Defines our chains of values
106 */
107 struct ht_internal_value_st {
108 HT_VALUE value;
109 HT *ht;
110 };
111
112 struct ht_neighborhood_entry_st {
113 uint64_t hash;
114 struct ht_internal_value_st *value;
115 };
116
117 struct ht_neighborhood_st {
118 struct ht_neighborhood_entry_st entries[NEIGHBORHOOD_LEN];
119 };
120
121 /*
122 * Updates to data in this struct
123 * require an rcu sync after modification
124 * prior to free
125 */
126 struct ht_mutable_data_st {
127 struct ht_neighborhood_st *neighborhoods;
128 void *neighborhood_ptr_to_free;
129 uint64_t neighborhood_mask;
130 };
131
132 /*
133 * Private data may be updated on the write
134 * side only, and so do not require rcu sync
135 */
136 struct ht_write_private_data_st {
137 size_t neighborhood_len;
138 size_t value_count;
139 int need_sync;
140 };
141
142 struct ht_internal_st {
143 HT_CONFIG config;
144 CRYPTO_RCU_LOCK *lock;
145 CRYPTO_RWLOCK *atomic_lock;
146 struct ht_mutable_data_st *md;
147 struct ht_write_private_data_st wpd;
148 };
149
150 static void free_value(struct ht_internal_value_st *v);
151
alloc_new_neighborhood_list(size_t len,void ** freeptr)152 static struct ht_neighborhood_st *alloc_new_neighborhood_list(size_t len,
153 void **freeptr)
154 {
155 struct ht_neighborhood_st *ret;
156
157 ret = OPENSSL_aligned_alloc(sizeof(struct ht_neighborhood_st) * len,
158 CACHE_LINE_BYTES, freeptr);
159
160 /* fall back to regular malloc */
161 if (ret == NULL) {
162 ret = *freeptr = OPENSSL_malloc(sizeof(struct ht_neighborhood_st) * len);
163 if (ret == NULL)
164 return NULL;
165 }
166 memset(ret, 0, sizeof(struct ht_neighborhood_st) * len);
167 return ret;
168 }
169
internal_free_nop(HT_VALUE * v)170 static void internal_free_nop(HT_VALUE *v)
171 {
172 return;
173 }
174
ossl_ht_new(const HT_CONFIG * conf)175 HT *ossl_ht_new(const HT_CONFIG *conf)
176 {
177 HT *new = OPENSSL_zalloc(sizeof(*new));
178
179 if (new == NULL)
180 return NULL;
181
182 new->atomic_lock = CRYPTO_THREAD_lock_new();
183 if (new->atomic_lock == NULL)
184 goto err;
185
186 memcpy(&new->config, conf, sizeof(*conf));
187
188 if (new->config.init_neighborhoods != 0) {
189 new->wpd.neighborhood_len = new->config.init_neighborhoods;
190 /* round up to the next power of 2 */
191 new->wpd.neighborhood_len--;
192 new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 1;
193 new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 2;
194 new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 4;
195 new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 8;
196 new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 16;
197 new->wpd.neighborhood_len++;
198 } else {
199 new->wpd.neighborhood_len = DEFAULT_NEIGH_LEN;
200 }
201
202 if (new->config.ht_free_fn == NULL)
203 new->config.ht_free_fn = internal_free_nop;
204
205 new->md = OPENSSL_zalloc(sizeof(*new->md));
206 if (new->md == NULL)
207 goto err;
208
209 new->md->neighborhoods =
210 alloc_new_neighborhood_list(new->wpd.neighborhood_len,
211 &new->md->neighborhood_ptr_to_free);
212 if (new->md->neighborhoods == NULL)
213 goto err;
214 new->md->neighborhood_mask = new->wpd.neighborhood_len - 1;
215
216 new->lock = ossl_rcu_lock_new(1, conf->ctx);
217 if (new->lock == NULL)
218 goto err;
219
220 if (new->config.ht_hash_fn == NULL)
221 new->config.ht_hash_fn = ossl_fnv1a_hash;
222
223 return new;
224
225 err:
226 CRYPTO_THREAD_lock_free(new->atomic_lock);
227 ossl_rcu_lock_free(new->lock);
228 if (new->md != NULL)
229 OPENSSL_free(new->md->neighborhood_ptr_to_free);
230 OPENSSL_free(new->md);
231 OPENSSL_free(new);
232 return NULL;
233 }
234
ossl_ht_read_lock(HT * htable)235 void ossl_ht_read_lock(HT *htable)
236 {
237 ossl_rcu_read_lock(htable->lock);
238 }
239
ossl_ht_read_unlock(HT * htable)240 void ossl_ht_read_unlock(HT *htable)
241 {
242 ossl_rcu_read_unlock(htable->lock);
243 }
244
ossl_ht_write_lock(HT * htable)245 void ossl_ht_write_lock(HT *htable)
246 {
247 ossl_rcu_write_lock(htable->lock);
248 htable->wpd.need_sync = 0;
249 }
250
ossl_ht_write_unlock(HT * htable)251 void ossl_ht_write_unlock(HT *htable)
252 {
253 int need_sync = htable->wpd.need_sync;
254
255 htable->wpd.need_sync = 0;
256 ossl_rcu_write_unlock(htable->lock);
257 if (need_sync)
258 ossl_synchronize_rcu(htable->lock);
259 }
260
free_oldmd(void * arg)261 static void free_oldmd(void *arg)
262 {
263 struct ht_mutable_data_st *oldmd = arg;
264 size_t i, j;
265 size_t neighborhood_len = (size_t)oldmd->neighborhood_mask + 1;
266 struct ht_internal_value_st *v;
267
268 for (i = 0; i < neighborhood_len; i++) {
269 PREFETCH_NEIGHBORHOOD(oldmd->neighborhoods[i + 1]);
270 for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
271 if (oldmd->neighborhoods[i].entries[j].value != NULL) {
272 v = oldmd->neighborhoods[i].entries[j].value;
273 v->ht->config.ht_free_fn((HT_VALUE *)v);
274 free_value(v);
275 }
276 }
277 }
278
279 OPENSSL_free(oldmd->neighborhood_ptr_to_free);
280 OPENSSL_free(oldmd);
281 }
282
ossl_ht_flush_internal(HT * h)283 static int ossl_ht_flush_internal(HT *h)
284 {
285 struct ht_mutable_data_st *newmd = NULL;
286 struct ht_mutable_data_st *oldmd = NULL;
287
288 newmd = OPENSSL_zalloc(sizeof(*newmd));
289 if (newmd == NULL)
290 return 0;
291
292 newmd->neighborhoods = alloc_new_neighborhood_list(DEFAULT_NEIGH_LEN,
293 &newmd->neighborhood_ptr_to_free);
294 if (newmd->neighborhoods == NULL) {
295 OPENSSL_free(newmd);
296 return 0;
297 }
298
299 newmd->neighborhood_mask = DEFAULT_NEIGH_LEN - 1;
300
301 /* Swap the old and new mutable data sets */
302 oldmd = ossl_rcu_deref(&h->md);
303 ossl_rcu_assign_ptr(&h->md, &newmd);
304
305 /* Set the number of entries to 0 */
306 h->wpd.value_count = 0;
307 h->wpd.neighborhood_len = DEFAULT_NEIGH_LEN;
308
309 ossl_rcu_call(h->lock, free_oldmd, oldmd);
310 h->wpd.need_sync = 1;
311 return 1;
312 }
313
ossl_ht_flush(HT * h)314 int ossl_ht_flush(HT *h)
315 {
316 return ossl_ht_flush_internal(h);
317 }
318
ossl_ht_free(HT * h)319 void ossl_ht_free(HT *h)
320 {
321 if (h == NULL)
322 return;
323
324 ossl_ht_write_lock(h);
325 ossl_ht_flush_internal(h);
326 ossl_ht_write_unlock(h);
327 /* Freeing the lock does a final sync for us */
328 CRYPTO_THREAD_lock_free(h->atomic_lock);
329 ossl_rcu_lock_free(h->lock);
330 OPENSSL_free(h->md->neighborhood_ptr_to_free);
331 OPENSSL_free(h->md);
332 OPENSSL_free(h);
333 return;
334 }
335
ossl_ht_count(HT * h)336 size_t ossl_ht_count(HT *h)
337 {
338 size_t count;
339
340 count = h->wpd.value_count;
341 return count;
342 }
343
ossl_ht_foreach_until(HT * h,int (* cb)(HT_VALUE * obj,void * arg),void * arg)344 void ossl_ht_foreach_until(HT *h, int (*cb)(HT_VALUE *obj, void *arg),
345 void *arg)
346 {
347 size_t i, j;
348 struct ht_mutable_data_st *md;
349
350 md = ossl_rcu_deref(&h->md);
351 for (i = 0; i < md->neighborhood_mask + 1; i++) {
352 PREFETCH_NEIGHBORHOOD(md->neighborhoods[i + 1]);
353 for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
354 if (md->neighborhoods[i].entries[j].value != NULL) {
355 if (!cb((HT_VALUE *)md->neighborhoods[i].entries[j].value, arg))
356 goto out;
357 }
358 }
359 }
360 out:
361 return;
362 }
363
ossl_ht_filter(HT * h,size_t max_len,int (* filter)(HT_VALUE * obj,void * arg),void * arg)364 HT_VALUE_LIST *ossl_ht_filter(HT *h, size_t max_len,
365 int (*filter)(HT_VALUE *obj, void *arg),
366 void *arg)
367 {
368 struct ht_mutable_data_st *md;
369 HT_VALUE_LIST *list = OPENSSL_zalloc(sizeof(HT_VALUE_LIST)
370 + (sizeof(HT_VALUE *) * max_len));
371 size_t i, j;
372 struct ht_internal_value_st *v;
373
374 if (list == NULL)
375 return NULL;
376
377 /*
378 * The list array lives just beyond the end of
379 * the struct
380 */
381 list->list = (HT_VALUE **)(list + 1);
382
383 md = ossl_rcu_deref(&h->md);
384 for (i = 0; i < md->neighborhood_mask + 1; i++) {
385 PREFETCH_NEIGHBORHOOD(md->neighborhoods[i+1]);
386 for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
387 v = md->neighborhoods[i].entries[j].value;
388 if (v != NULL && filter((HT_VALUE *)v, arg)) {
389 list->list[list->list_len++] = (HT_VALUE *)v;
390 if (list->list_len == max_len)
391 goto out;
392 }
393 }
394 }
395 out:
396 return list;
397 }
398
ossl_ht_value_list_free(HT_VALUE_LIST * list)399 void ossl_ht_value_list_free(HT_VALUE_LIST *list)
400 {
401 OPENSSL_free(list);
402 }
403
compare_hash(uint64_t hash1,uint64_t hash2)404 static int compare_hash(uint64_t hash1, uint64_t hash2)
405 {
406 return (hash1 == hash2);
407 }
408
free_old_neigh_table(void * arg)409 static void free_old_neigh_table(void *arg)
410 {
411 struct ht_mutable_data_st *oldmd = arg;
412
413 OPENSSL_free(oldmd->neighborhood_ptr_to_free);
414 OPENSSL_free(oldmd);
415 }
416
417 /*
418 * Increase hash table bucket list
419 * must be called with write_lock held
420 */
grow_hashtable(HT * h,size_t oldsize)421 static int grow_hashtable(HT *h, size_t oldsize)
422 {
423 struct ht_mutable_data_st *newmd;
424 struct ht_mutable_data_st *oldmd = ossl_rcu_deref(&h->md);
425 int rc = 0;
426 uint64_t oldi, oldj, newi, newj;
427 uint64_t oldhash;
428 struct ht_internal_value_st *oldv;
429 int rehashed;
430 size_t newsize = oldsize * 2;
431
432 if (h->config.lockless_reads)
433 goto out;
434
435 if ((newmd = OPENSSL_zalloc(sizeof(*newmd))) == NULL)
436 goto out;
437
438 /* bucket list is always a power of 2 */
439 newmd->neighborhoods = alloc_new_neighborhood_list(oldsize * 2,
440 &newmd->neighborhood_ptr_to_free);
441 if (newmd->neighborhoods == NULL)
442 goto out_free;
443
444 /* being a power of 2 makes for easy mask computation */
445 newmd->neighborhood_mask = (newsize - 1);
446
447 /*
448 * Now we need to start rehashing entries
449 * Note we don't need to use atomics here as the new
450 * mutable data hasn't been published
451 */
452 for (oldi = 0; oldi < h->wpd.neighborhood_len; oldi++) {
453 PREFETCH_NEIGHBORHOOD(oldmd->neighborhoods[oldi + 1]);
454 for (oldj = 0; oldj < NEIGHBORHOOD_LEN; oldj++) {
455 oldv = oldmd->neighborhoods[oldi].entries[oldj].value;
456 if (oldv == NULL)
457 continue;
458 oldhash = oldmd->neighborhoods[oldi].entries[oldj].hash;
459 newi = oldhash & newmd->neighborhood_mask;
460 rehashed = 0;
461 for (newj = 0; newj < NEIGHBORHOOD_LEN; newj++) {
462 if (newmd->neighborhoods[newi].entries[newj].value == NULL) {
463 newmd->neighborhoods[newi].entries[newj].value = oldv;
464 newmd->neighborhoods[newi].entries[newj].hash = oldhash;
465 rehashed = 1;
466 break;
467 }
468 }
469 if (rehashed == 0) {
470 /* we ran out of space in a neighborhood, grow again */
471 OPENSSL_free(newmd->neighborhoods);
472 OPENSSL_free(newmd);
473 return grow_hashtable(h, newsize);
474 }
475 }
476 }
477 /*
478 * Now that our entries are all hashed into the new bucket list
479 * update our bucket_len and target_max_load
480 */
481 h->wpd.neighborhood_len = newsize;
482
483 /*
484 * Now we replace the old mutable data with the new
485 */
486 ossl_rcu_assign_ptr(&h->md, &newmd);
487 ossl_rcu_call(h->lock, free_old_neigh_table, oldmd);
488 h->wpd.need_sync = 1;
489 /*
490 * And we're done
491 */
492 rc = 1;
493
494 out:
495 return rc;
496 out_free:
497 OPENSSL_free(newmd->neighborhoods);
498 OPENSSL_free(newmd);
499 goto out;
500 }
501
free_old_ht_value(void * arg)502 static void free_old_ht_value(void *arg)
503 {
504 HT_VALUE *h = (HT_VALUE *)arg;
505
506 /*
507 * Note, this is only called on replacement,
508 * the caller is responsible for freeing the
509 * held data, we just need to free the wrapping
510 * struct here
511 */
512 OPENSSL_free(h);
513 }
514
match_key(HT_KEY * a,HT_KEY * b)515 static ossl_inline int match_key(HT_KEY *a, HT_KEY *b)
516 {
517 /*
518 * keys match if they are both present, the same size
519 * and compare equal in memory
520 */
521 PREFETCH(a->keybuf);
522 PREFETCH(b->keybuf);
523 if (a->keybuf != NULL && b->keybuf != NULL && a->keysize == b->keysize)
524 return !memcmp(a->keybuf, b->keybuf, a->keysize);
525
526 return 1;
527 }
528
ossl_ht_insert_locked(HT * h,uint64_t hash,struct ht_internal_value_st * newval,HT_VALUE ** olddata)529 static int ossl_ht_insert_locked(HT *h, uint64_t hash,
530 struct ht_internal_value_st *newval,
531 HT_VALUE **olddata)
532 {
533 struct ht_mutable_data_st *md = h->md;
534 uint64_t neigh_idx_start = hash & md->neighborhood_mask;
535 uint64_t neigh_idx = neigh_idx_start;
536 size_t j;
537 uint64_t ihash;
538 HT_VALUE *ival;
539 size_t empty_idx = SIZE_MAX;
540 int lockless_reads = h->config.lockless_reads;
541
542 do {
543 PREFETCH_NEIGHBORHOOD(md->neighborhoods[neigh_idx]);
544
545 for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
546 ival = ossl_rcu_deref(&md->neighborhoods[neigh_idx].entries[j].value);
547 if (ival == NULL) {
548 empty_idx = j;
549 /* lockless_reads implies no deletion, we can break out */
550 if (lockless_reads)
551 goto not_found;
552 continue;
553 }
554 if (!CRYPTO_atomic_load(&md->neighborhoods[neigh_idx].entries[j].hash,
555 &ihash, h->atomic_lock))
556 return 0;
557 if (compare_hash(hash, ihash) && match_key(&newval->value.key,
558 &ival->key)) {
559 if (olddata == NULL) {
560 /* This would insert a duplicate -> fail */
561 return 0;
562 }
563 /* Do a replacement */
564 if (!CRYPTO_atomic_store(&md->neighborhoods[neigh_idx].entries[j].hash,
565 hash, h->atomic_lock))
566 return 0;
567 *olddata = (HT_VALUE *)md->neighborhoods[neigh_idx].entries[j].value;
568 ossl_rcu_assign_ptr(&md->neighborhoods[neigh_idx].entries[j].value,
569 &newval);
570 ossl_rcu_call(h->lock, free_old_ht_value, *olddata);
571 h->wpd.need_sync = 1;
572 return 1;
573 }
574 }
575 if (!lockless_reads)
576 break;
577 /* Continue search in subsequent neighborhoods */
578 neigh_idx = (neigh_idx + 1) & md->neighborhood_mask;
579 } while (neigh_idx != neigh_idx_start);
580
581 not_found:
582 /* If we get to here, its just an insert */
583 if (empty_idx == SIZE_MAX)
584 return -1; /* out of space */
585 if (!CRYPTO_atomic_store(&md->neighborhoods[neigh_idx].entries[empty_idx].hash,
586 hash, h->atomic_lock))
587 return 0;
588 h->wpd.value_count++;
589 ossl_rcu_assign_ptr(&md->neighborhoods[neigh_idx].entries[empty_idx].value,
590 &newval);
591 return 1;
592 }
593
alloc_new_value(HT * h,HT_KEY * key,void * data,uintptr_t * type)594 static struct ht_internal_value_st *alloc_new_value(HT *h, HT_KEY *key,
595 void *data,
596 uintptr_t *type)
597 {
598 struct ht_internal_value_st *tmp;
599 size_t nvsize = sizeof(*tmp);
600
601 if (h->config.collision_check == 1)
602 nvsize += key->keysize;
603
604 tmp = OPENSSL_malloc(nvsize);
605
606 if (tmp == NULL)
607 return NULL;
608
609 tmp->ht = h;
610 tmp->value.value = data;
611 tmp->value.type_id = type;
612 tmp->value.key.keybuf = NULL;
613 if (h->config.collision_check) {
614 tmp->value.key.keybuf = (uint8_t *)(tmp + 1);
615 tmp->value.key.keysize = key->keysize;
616 memcpy(tmp->value.key.keybuf, key->keybuf, key->keysize);
617 }
618
619
620 return tmp;
621 }
622
free_value(struct ht_internal_value_st * v)623 static void free_value(struct ht_internal_value_st *v)
624 {
625 OPENSSL_free(v);
626 }
627
ossl_ht_insert(HT * h,HT_KEY * key,HT_VALUE * data,HT_VALUE ** olddata)628 int ossl_ht_insert(HT *h, HT_KEY *key, HT_VALUE *data, HT_VALUE **olddata)
629 {
630 struct ht_internal_value_st *newval = NULL;
631 uint64_t hash;
632 int rc = 0;
633 int i;
634
635 if (data->value == NULL)
636 goto out;
637
638 newval = alloc_new_value(h, key, data->value, data->type_id);
639 if (newval == NULL)
640 goto out;
641
642 /*
643 * we have to take our lock here to prevent other changes
644 * to the bucket list
645 */
646 hash = h->config.ht_hash_fn(key->keybuf, key->keysize);
647
648 for (i = 0;
649 (rc = ossl_ht_insert_locked(h, hash, newval, olddata)) == -1
650 && i < 4;
651 ++i)
652 if (!grow_hashtable(h, h->wpd.neighborhood_len)) {
653 rc = -1;
654 break;
655 }
656
657 if (rc <= 0)
658 free_value(newval);
659
660 out:
661 return rc;
662 }
663
ossl_ht_get(HT * h,HT_KEY * key)664 HT_VALUE *ossl_ht_get(HT *h, HT_KEY *key)
665 {
666 struct ht_mutable_data_st *md;
667 uint64_t hash;
668 uint64_t neigh_idx_start;
669 uint64_t neigh_idx;
670 struct ht_internal_value_st *ival = NULL;
671 size_t j;
672 uint64_t ehash;
673 int lockless_reads = h->config.lockless_reads;
674
675 hash = h->config.ht_hash_fn(key->keybuf, key->keysize);
676
677 md = ossl_rcu_deref(&h->md);
678 neigh_idx = neigh_idx_start = hash & md->neighborhood_mask;
679 do {
680 PREFETCH_NEIGHBORHOOD(md->neighborhoods[neigh_idx]);
681 for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
682 ival = ossl_rcu_deref(&md->neighborhoods[neigh_idx].entries[j].value);
683 if (ival == NULL) {
684 if (lockless_reads)
685 /* lockless_reads implies no deletion, we can break out */
686 return NULL;
687 continue;
688 }
689 if (!CRYPTO_atomic_load(&md->neighborhoods[neigh_idx].entries[j].hash,
690 &ehash, h->atomic_lock))
691 return NULL;
692 if (compare_hash(hash, ehash) && match_key(&ival->value.key, key))
693 return (HT_VALUE *)ival;
694 }
695 if (!lockless_reads)
696 break;
697 /* Continue search in subsequent neighborhoods */
698 neigh_idx = (neigh_idx + 1) & md->neighborhood_mask;
699 } while (neigh_idx != neigh_idx_start);
700
701 return NULL;
702 }
703
free_old_entry(void * arg)704 static void free_old_entry(void *arg)
705 {
706 struct ht_internal_value_st *v = arg;
707
708 v->ht->config.ht_free_fn((HT_VALUE *)v);
709 free_value(v);
710 }
711
ossl_ht_delete(HT * h,HT_KEY * key)712 int ossl_ht_delete(HT *h, HT_KEY *key)
713 {
714 uint64_t hash;
715 uint64_t neigh_idx;
716 size_t j;
717 struct ht_internal_value_st *v = NULL;
718 HT_VALUE *nv = NULL;
719 int rc = 0;
720
721 if (h->config.lockless_reads)
722 return 0;
723
724 hash = h->config.ht_hash_fn(key->keybuf, key->keysize);
725
726 neigh_idx = hash & h->md->neighborhood_mask;
727 PREFETCH_NEIGHBORHOOD(h->md->neighborhoods[neigh_idx]);
728 for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
729 v = (struct ht_internal_value_st *)h->md->neighborhoods[neigh_idx].entries[j].value;
730 if (v == NULL)
731 continue;
732 if (compare_hash(hash, h->md->neighborhoods[neigh_idx].entries[j].hash)
733 && match_key(key, &v->value.key)) {
734 if (!CRYPTO_atomic_store(&h->md->neighborhoods[neigh_idx].entries[j].hash,
735 0, h->atomic_lock))
736 break;
737 h->wpd.value_count--;
738 ossl_rcu_assign_ptr(&h->md->neighborhoods[neigh_idx].entries[j].value,
739 &nv);
740 rc = 1;
741 break;
742 }
743 }
744 if (rc == 1) {
745 ossl_rcu_call(h->lock, free_old_entry, v);
746 h->wpd.need_sync = 1;
747 }
748 return rc;
749 }
750