xref: /freebsd/sys/contrib/ck/src/ck_hs.c (revision 72df847a94bccee245a3316e4f848482b9ac2ac2)
1 /*
2  * Copyright 2012-2015 Samy Al Bahra.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <ck_cc.h>
28 #include <ck_hs.h>
29 #include <ck_limits.h>
30 #include <ck_md.h>
31 #include <ck_pr.h>
32 #include <ck_stdint.h>
33 #include <ck_stdbool.h>
34 #include <ck_string.h>
35 
36 #include "ck_internal.h"
37 
38 #ifndef CK_HS_PROBE_L1_SHIFT
39 #define CK_HS_PROBE_L1_SHIFT 3ULL
40 #endif /* CK_HS_PROBE_L1_SHIFT */
41 
42 #define CK_HS_PROBE_L1 (1 << CK_HS_PROBE_L1_SHIFT)
43 #define CK_HS_PROBE_L1_MASK (CK_HS_PROBE_L1 - 1)
44 
45 #ifndef CK_HS_PROBE_L1_DEFAULT
46 #define CK_HS_PROBE_L1_DEFAULT CK_MD_CACHELINE
47 #endif
48 
49 #define CK_HS_VMA_MASK ((uintptr_t)((1ULL << CK_MD_VMA_BITS) - 1))
50 #define CK_HS_VMA(x)	\
51 	((void *)((uintptr_t)(x) & CK_HS_VMA_MASK))
52 
53 #define CK_HS_EMPTY     NULL
54 #define CK_HS_TOMBSTONE ((void *)~(uintptr_t)0)
55 #define CK_HS_G		(2)
56 #define CK_HS_G_MASK	(CK_HS_G - 1)
57 
58 #if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_STORE_8)
59 #define CK_HS_WORD          uint8_t
60 #define CK_HS_WORD_MAX	    UINT8_MAX
61 #define CK_HS_STORE(x, y)   ck_pr_store_8(x, y)
62 #define CK_HS_LOAD(x)       ck_pr_load_8(x)
63 #elif defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_STORE_16)
64 #define CK_HS_WORD          uint16_t
65 #define CK_HS_WORD_MAX	    UINT16_MAX
66 #define CK_HS_STORE(x, y)   ck_pr_store_16(x, y)
67 #define CK_HS_LOAD(x)       ck_pr_load_16(x)
68 #elif defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_STORE_32)
69 #define CK_HS_WORD          uint32_t
70 #define CK_HS_WORD_MAX	    UINT32_MAX
71 #define CK_HS_STORE(x, y)   ck_pr_store_32(x, y)
72 #define CK_HS_LOAD(x)       ck_pr_load_32(x)
73 #else
74 #error "ck_hs is not supported on your platform."
75 #endif
76 
77 enum ck_hs_probe_behavior {
78 	CK_HS_PROBE = 0,	/* Default behavior. */
79 	CK_HS_PROBE_TOMBSTONE,	/* Short-circuit on tombstone. */
80 	CK_HS_PROBE_INSERT	/* Short-circuit on probe bound if tombstone found. */
81 };
82 
83 struct ck_hs_map {
84 	unsigned int generation[CK_HS_G];
85 	unsigned int probe_maximum;
86 	unsigned long mask;
87 	unsigned long step;
88 	unsigned int probe_limit;
89 	unsigned int tombstones;
90 	unsigned long n_entries;
91 	unsigned long capacity;
92 	unsigned long size;
93 	CK_HS_WORD *probe_bound;
94 	const void **entries;
95 };
96 
97 static inline void
98 ck_hs_map_signal(struct ck_hs_map *map, unsigned long h)
99 {
100 
101 	h &= CK_HS_G_MASK;
102 	ck_pr_store_uint(&map->generation[h],
103 	    map->generation[h] + 1);
104 	ck_pr_fence_store();
105 	return;
106 }
107 
108 static bool
109 _ck_hs_next(struct ck_hs *hs, struct ck_hs_map *map,
110     struct ck_hs_iterator *i, void **key)
111 {
112 	void *value;
113 
114 	if (i->offset >= map->capacity)
115 		return false;
116 
117 	do {
118 		value = CK_CC_DECONST_PTR(map->entries[i->offset]);
119 		if (value != CK_HS_EMPTY && value != CK_HS_TOMBSTONE) {
120 #ifdef CK_HS_PP
121 			if (hs->mode & CK_HS_MODE_OBJECT)
122 				value = CK_HS_VMA(value);
123 #else
124 			(void)hs; /* Avoid unused parameter warning. */
125 #endif
126 			i->offset++;
127 			*key = value;
128 			return true;
129 		}
130 	} while (++i->offset < map->capacity);
131 
132 	return false;
133 }
134 
135 void
136 ck_hs_iterator_init(struct ck_hs_iterator *iterator)
137 {
138 
139 	iterator->cursor = NULL;
140 	iterator->offset = 0;
141 	iterator->map = NULL;
142 	return;
143 }
144 
145 bool
146 ck_hs_next(struct ck_hs *hs, struct ck_hs_iterator *i, void **key)
147 {
148 
149 	return _ck_hs_next(hs, hs->map, i, key);
150 }
151 
152 bool
153 ck_hs_next_spmc(struct ck_hs *hs, struct ck_hs_iterator *i, void **key)
154 {
155 	struct ck_hs_map *m = i->map;
156 
157 	if (m == NULL) {
158 		m = i->map = ck_pr_load_ptr(&hs->map);
159 	}
160 
161 	return _ck_hs_next(hs, m, i, key);
162 }
163 
164 void
165 ck_hs_stat(struct ck_hs *hs, struct ck_hs_stat *st)
166 {
167 	struct ck_hs_map *map = hs->map;
168 
169 	st->n_entries = map->n_entries;
170 	st->tombstones = map->tombstones;
171 	st->probe_maximum = map->probe_maximum;
172 	return;
173 }
174 
175 unsigned long
176 ck_hs_count(struct ck_hs *hs)
177 {
178 
179 	return hs->map->n_entries;
180 }
181 
182 static void
183 ck_hs_map_destroy(struct ck_malloc *m, struct ck_hs_map *map, bool defer)
184 {
185 
186 	m->free(map, map->size, defer);
187 	return;
188 }
189 
190 void
191 ck_hs_destroy(struct ck_hs *hs)
192 {
193 
194 	ck_hs_map_destroy(hs->m, hs->map, false);
195 	return;
196 }
197 
198 static struct ck_hs_map *
199 ck_hs_map_create(struct ck_hs *hs, unsigned long entries)
200 {
201 	struct ck_hs_map *map;
202 	unsigned long size, n_entries, prefix, limit;
203 
204 	n_entries = ck_internal_power_2(entries);
205 	if (n_entries < CK_HS_PROBE_L1)
206 		n_entries = CK_HS_PROBE_L1;
207 
208 	size = sizeof(struct ck_hs_map) + (sizeof(void *) * n_entries + CK_MD_CACHELINE - 1);
209 
210 	if (hs->mode & CK_HS_MODE_DELETE) {
211 		prefix = sizeof(CK_HS_WORD) * n_entries;
212 		size += prefix;
213 	} else {
214 		prefix = 0;
215 	}
216 
217 	map = hs->m->malloc(size);
218 	if (map == NULL)
219 		return NULL;
220 
221 	map->size = size;
222 
223 	/* We should probably use a more intelligent heuristic for default probe length. */
224 	limit = ck_internal_max(n_entries >> (CK_HS_PROBE_L1_SHIFT + 2), CK_HS_PROBE_L1_DEFAULT);
225 	if (limit > UINT_MAX)
226 		limit = UINT_MAX;
227 
228 	map->probe_limit = (unsigned int)limit;
229 	map->probe_maximum = 0;
230 	map->capacity = n_entries;
231 	map->step = ck_cc_ffsl(n_entries);
232 	map->mask = n_entries - 1;
233 	map->n_entries = 0;
234 
235 	/* Align map allocation to cache line. */
236 	map->entries = (void *)(((uintptr_t)&map[1] + prefix +
237 	    CK_MD_CACHELINE - 1) & ~(CK_MD_CACHELINE - 1));
238 
239 	memset(map->entries, 0, sizeof(void *) * n_entries);
240 	memset(map->generation, 0, sizeof map->generation);
241 
242 	if (hs->mode & CK_HS_MODE_DELETE) {
243 		map->probe_bound = (CK_HS_WORD *)&map[1];
244 		memset(map->probe_bound, 0, prefix);
245 	} else {
246 		map->probe_bound = NULL;
247 	}
248 
249 	/* Commit entries purge with respect to map publication. */
250 	ck_pr_fence_store();
251 	return map;
252 }
253 
254 bool
255 ck_hs_reset_size(struct ck_hs *hs, unsigned long capacity)
256 {
257 	struct ck_hs_map *map, *previous;
258 
259 	previous = hs->map;
260 	map = ck_hs_map_create(hs, capacity);
261 	if (map == NULL)
262 		return false;
263 
264 	ck_pr_store_ptr(&hs->map, map);
265 	ck_hs_map_destroy(hs->m, previous, true);
266 	return true;
267 }
268 
269 bool
270 ck_hs_reset(struct ck_hs *hs)
271 {
272 	struct ck_hs_map *previous;
273 
274 	previous = hs->map;
275 	return ck_hs_reset_size(hs, previous->capacity);
276 }
277 
278 static inline unsigned long
279 ck_hs_map_probe_next(struct ck_hs_map *map,
280     unsigned long offset,
281     unsigned long h,
282     unsigned long level,
283     unsigned long probes)
284 {
285 	unsigned long r, stride;
286 
287 	r = (h >> map->step) >> level;
288 	stride = (r & ~CK_HS_PROBE_L1_MASK) << 1 | (r & CK_HS_PROBE_L1_MASK);
289 
290 	return (offset + (probes >> CK_HS_PROBE_L1_SHIFT) +
291 	    (stride | CK_HS_PROBE_L1)) & map->mask;
292 }
293 
294 static inline void
295 ck_hs_map_bound_set(struct ck_hs_map *m,
296     unsigned long h,
297     unsigned long n_probes)
298 {
299 	unsigned long offset = h & m->mask;
300 
301 	if (n_probes > m->probe_maximum)
302 		ck_pr_store_uint(&m->probe_maximum, n_probes);
303 
304 	if (m->probe_bound != NULL && m->probe_bound[offset] < n_probes) {
305 		if (n_probes > CK_HS_WORD_MAX)
306 			n_probes = CK_HS_WORD_MAX;
307 
308 		CK_HS_STORE(&m->probe_bound[offset], n_probes);
309 		ck_pr_fence_store();
310 	}
311 
312 	return;
313 }
314 
315 static inline unsigned int
316 ck_hs_map_bound_get(struct ck_hs_map *m, unsigned long h)
317 {
318 	unsigned long offset = h & m->mask;
319 	unsigned int r = CK_HS_WORD_MAX;
320 
321 	if (m->probe_bound != NULL) {
322 		r = CK_HS_LOAD(&m->probe_bound[offset]);
323 		if (r == CK_HS_WORD_MAX)
324 			r = ck_pr_load_uint(&m->probe_maximum);
325 	} else {
326 		r = ck_pr_load_uint(&m->probe_maximum);
327 	}
328 
329 	return r;
330 }
331 
332 bool
333 ck_hs_grow(struct ck_hs *hs,
334     unsigned long capacity)
335 {
336 	struct ck_hs_map *map, *update;
337 	unsigned long k, i, j, offset, probes;
338 	const void *previous, **bucket;
339 
340 restart:
341 	map = hs->map;
342 	if (map->capacity > capacity)
343 		return false;
344 
345 	update = ck_hs_map_create(hs, capacity);
346 	if (update == NULL)
347 		return false;
348 
349 	for (k = 0; k < map->capacity; k++) {
350 		unsigned long h;
351 
352 		previous = map->entries[k];
353 		if (previous == CK_HS_EMPTY || previous == CK_HS_TOMBSTONE)
354 			continue;
355 
356 #ifdef CK_HS_PP
357 		if (hs->mode & CK_HS_MODE_OBJECT)
358 			previous = CK_HS_VMA(previous);
359 #endif
360 
361 		h = hs->hf(previous, hs->seed);
362 		offset = h & update->mask;
363 		i = probes = 0;
364 
365 		for (;;) {
366 			bucket = (const void **)((uintptr_t)&update->entries[offset] & ~(CK_MD_CACHELINE - 1));
367 
368 			for (j = 0; j < CK_HS_PROBE_L1; j++) {
369 				const void **cursor = bucket + ((j + offset) & (CK_HS_PROBE_L1 - 1));
370 
371 				if (probes++ == update->probe_limit)
372 					break;
373 
374 				if (CK_CC_LIKELY(*cursor == CK_HS_EMPTY)) {
375 					*cursor = map->entries[k];
376 					update->n_entries++;
377 
378 					ck_hs_map_bound_set(update, h, probes);
379 					break;
380 				}
381 			}
382 
383 			if (j < CK_HS_PROBE_L1)
384 				break;
385 
386 			offset = ck_hs_map_probe_next(update, offset, h, i++, probes);
387 		}
388 
389 		if (probes > update->probe_limit) {
390 			/*
391 			 * We have hit the probe limit, map needs to be even larger.
392 			 */
393 			ck_hs_map_destroy(hs->m, update, false);
394 			capacity <<= 1;
395 			goto restart;
396 		}
397 	}
398 
399 	ck_pr_fence_store();
400 	ck_pr_store_ptr(&hs->map, update);
401 	ck_hs_map_destroy(hs->m, map, true);
402 	return true;
403 }
404 
405 static void
406 ck_hs_map_postinsert(struct ck_hs *hs, struct ck_hs_map *map)
407 {
408 
409 	map->n_entries++;
410 	if ((map->n_entries << 1) > map->capacity)
411 		ck_hs_grow(hs, map->capacity << 1);
412 
413 	return;
414 }
415 
416 bool
417 ck_hs_rebuild(struct ck_hs *hs)
418 {
419 
420 	return ck_hs_grow(hs, hs->map->capacity);
421 }
422 
423 static const void **
424 ck_hs_map_probe(struct ck_hs *hs,
425     struct ck_hs_map *map,
426     unsigned long *n_probes,
427     const void ***priority,
428     unsigned long h,
429     const void *key,
430     const void **object,
431     unsigned long probe_limit,
432     enum ck_hs_probe_behavior behavior)
433 {
434 	const void **bucket, **cursor, *k, *compare;
435 	const void **pr = NULL;
436 	unsigned long offset, j, i, probes, opl;
437 
438 #ifdef CK_HS_PP
439 	/* If we are storing object pointers, then we may leverage pointer packing. */
440 	unsigned long hv = 0;
441 
442 	if (hs->mode & CK_HS_MODE_OBJECT) {
443 		hv = (h >> 25) & CK_HS_KEY_MASK;
444 		compare = CK_HS_VMA(key);
445 	} else {
446 		compare = key;
447 	}
448 #else
449 	compare = key;
450 #endif
451 
452 	offset = h & map->mask;
453 	*object = NULL;
454 	i = probes = 0;
455 
456 	opl = probe_limit;
457 	if (behavior == CK_HS_PROBE_INSERT)
458 		probe_limit = ck_hs_map_bound_get(map, h);
459 
460 	for (;;) {
461 		bucket = (const void **)((uintptr_t)&map->entries[offset] & ~(CK_MD_CACHELINE - 1));
462 
463 		for (j = 0; j < CK_HS_PROBE_L1; j++) {
464 			cursor = bucket + ((j + offset) & (CK_HS_PROBE_L1 - 1));
465 
466 			if (probes++ == probe_limit) {
467 				if (probe_limit == opl || pr != NULL) {
468 					k = CK_HS_EMPTY;
469 					goto leave;
470 				}
471 
472 				/*
473 				 * If no eligible slot has been found yet, continue probe
474 				 * sequence with original probe limit.
475 				 */
476 				probe_limit = opl;
477 			}
478 
479 			k = ck_pr_load_ptr(cursor);
480 			if (k == CK_HS_EMPTY)
481 				goto leave;
482 
483 			if (k == CK_HS_TOMBSTONE) {
484 				if (pr == NULL) {
485 					pr = cursor;
486 					*n_probes = probes;
487 
488 					if (behavior == CK_HS_PROBE_TOMBSTONE) {
489 						k = CK_HS_EMPTY;
490 						goto leave;
491 					}
492 				}
493 
494 				continue;
495 			}
496 
497 #ifdef CK_HS_PP
498 			if (hs->mode & CK_HS_MODE_OBJECT) {
499 				if (((uintptr_t)k >> CK_MD_VMA_BITS) != hv)
500 					continue;
501 
502 				k = CK_HS_VMA(k);
503 			}
504 #endif
505 
506 			if (k == compare)
507 				goto leave;
508 
509 			if (hs->compare == NULL)
510 				continue;
511 
512 			if (hs->compare(k, key) == true)
513 				goto leave;
514 		}
515 
516 		offset = ck_hs_map_probe_next(map, offset, h, i++, probes);
517 	}
518 
519 leave:
520 	if (probes > probe_limit) {
521 		cursor = NULL;
522 	} else {
523 		*object = k;
524 	}
525 
526 	if (pr == NULL)
527 		*n_probes = probes;
528 
529 	*priority = pr;
530 	return cursor;
531 }
532 
533 static inline const void *
534 ck_hs_marshal(unsigned int mode, const void *key, unsigned long h)
535 {
536 #ifdef CK_HS_PP
537 	const void *insert;
538 
539 	if (mode & CK_HS_MODE_OBJECT) {
540 		insert = (void *)((uintptr_t)CK_HS_VMA(key) |
541 		    ((h >> 25) << CK_MD_VMA_BITS));
542 	} else {
543 		insert = key;
544 	}
545 
546 	return insert;
547 #else
548 	(void)mode;
549 	(void)h;
550 
551 	return key;
552 #endif
553 }
554 
555 bool
556 ck_hs_gc(struct ck_hs *hs, unsigned long cycles, unsigned long seed)
557 {
558 	unsigned long size = 0;
559 	unsigned long i;
560 	struct ck_hs_map *map = hs->map;
561 	unsigned int maximum;
562 	CK_HS_WORD *bounds = NULL;
563 
564 	if (map->n_entries == 0) {
565 		ck_pr_store_uint(&map->probe_maximum, 0);
566 		if (map->probe_bound != NULL)
567 			memset(map->probe_bound, 0, sizeof(CK_HS_WORD) * map->capacity);
568 
569 		return true;
570 	}
571 
572 	if (cycles == 0) {
573 		maximum = 0;
574 
575 		if (map->probe_bound != NULL) {
576 			size = sizeof(CK_HS_WORD) * map->capacity;
577 			bounds = hs->m->malloc(size);
578 			if (bounds == NULL)
579 				return false;
580 
581 			memset(bounds, 0, size);
582 		}
583 	} else {
584 		maximum = map->probe_maximum;
585 	}
586 
587 	for (i = 0; i < map->capacity; i++) {
588 		const void **first, *object, **slot, *entry;
589 		unsigned long n_probes, offset, h;
590 
591 		entry = map->entries[(i + seed) & map->mask];
592 		if (entry == CK_HS_EMPTY || entry == CK_HS_TOMBSTONE)
593 			continue;
594 
595 #ifdef CK_HS_PP
596 		if (hs->mode & CK_HS_MODE_OBJECT)
597 			entry = CK_HS_VMA(entry);
598 #endif
599 
600 		h = hs->hf(entry, hs->seed);
601 		offset = h & map->mask;
602 
603 		slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, entry, &object,
604 		    ck_hs_map_bound_get(map, h), CK_HS_PROBE);
605 
606 		if (first != NULL) {
607 			const void *insert = ck_hs_marshal(hs->mode, entry, h);
608 
609 			ck_pr_store_ptr(first, insert);
610 			ck_hs_map_signal(map, h);
611 			ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
612 		}
613 
614 		if (cycles == 0) {
615 			if (n_probes > maximum)
616 				maximum = n_probes;
617 
618 			if (n_probes > CK_HS_WORD_MAX)
619 				n_probes = CK_HS_WORD_MAX;
620 
621 			if (bounds != NULL && n_probes > bounds[offset])
622 				bounds[offset] = n_probes;
623 		} else if (--cycles == 0)
624 			break;
625 	}
626 
627 	/*
628 	 * The following only apply to garbage collection involving
629 	 * a full scan of all entries.
630 	 */
631 	if (maximum != map->probe_maximum)
632 		ck_pr_store_uint(&map->probe_maximum, maximum);
633 
634 	if (bounds != NULL) {
635 		for (i = 0; i < map->capacity; i++)
636 			CK_HS_STORE(&map->probe_bound[i], bounds[i]);
637 
638 		hs->m->free(bounds, size, false);
639 	}
640 
641 	return true;
642 }
643 
644 bool
645 ck_hs_fas(struct ck_hs *hs,
646     unsigned long h,
647     const void *key,
648     void **previous)
649 {
650 	const void **slot, **first, *object, *insert;
651 	struct ck_hs_map *map = hs->map;
652 	unsigned long n_probes;
653 
654 	*previous = NULL;
655 	slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object,
656 	    ck_hs_map_bound_get(map, h), CK_HS_PROBE);
657 
658 	/* Replacement semantics presume existence. */
659 	if (object == NULL)
660 		return false;
661 
662 	insert = ck_hs_marshal(hs->mode, key, h);
663 
664 	if (first != NULL) {
665 		ck_pr_store_ptr(first, insert);
666 		ck_hs_map_signal(map, h);
667 		ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
668 	} else {
669 		ck_pr_store_ptr(slot, insert);
670 	}
671 
672 	*previous = CK_CC_DECONST_PTR(object);
673 	return true;
674 }
675 
676 /*
677  * An apply function takes two arguments. The first argument is a pointer to a
678  * pre-existing object. The second argument is a pointer to the fifth argument
679  * passed to ck_hs_apply. If a non-NULL pointer is passed to the first argument
680  * and the return value of the apply function is NULL, then the pre-existing
681  * value is deleted. If the return pointer is the same as the one passed to the
682  * apply function then no changes are made to the hash table.  If the first
683  * argument is non-NULL and the return pointer is different than that passed to
684  * the apply function, then the pre-existing value is replaced. For
685  * replacement, it is required that the value itself is identical to the
686  * previous value.
687  */
688 bool
689 ck_hs_apply(struct ck_hs *hs,
690     unsigned long h,
691     const void *key,
692     ck_hs_apply_fn_t *fn,
693     void *cl)
694 {
695 	const void **slot, **first, *object, *delta, *insert;
696 	unsigned long n_probes;
697 	struct ck_hs_map *map;
698 
699 restart:
700 	map = hs->map;
701 
702 	slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, map->probe_limit, CK_HS_PROBE_INSERT);
703 	if (slot == NULL && first == NULL) {
704 		if (ck_hs_grow(hs, map->capacity << 1) == false)
705 			return false;
706 
707 		goto restart;
708 	}
709 
710 	delta = fn(CK_CC_DECONST_PTR(object), cl);
711 	if (delta == NULL) {
712 		/*
713 		 * The apply function has requested deletion. If the object doesn't exist,
714 		 * then exit early.
715 		 */
716 		if (CK_CC_UNLIKELY(object == NULL))
717 			return true;
718 
719 		/* Otherwise, mark slot as deleted. */
720 		ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
721 		map->n_entries--;
722 		map->tombstones++;
723 		return true;
724 	}
725 
726 	/* The apply function has not requested hash set modification so exit early. */
727 	if (delta == object)
728 		return true;
729 
730 	/* A modification or insertion has been requested. */
731 	ck_hs_map_bound_set(map, h, n_probes);
732 
733 	insert = ck_hs_marshal(hs->mode, delta, h);
734 	if (first != NULL) {
735 		/*
736 		 * This follows the same semantics as ck_hs_set, please refer to that
737 		 * function for documentation.
738 		 */
739 		ck_pr_store_ptr(first, insert);
740 
741 		if (object != NULL) {
742 			ck_hs_map_signal(map, h);
743 			ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
744 		}
745 	} else {
746 		/*
747 		 * If we are storing into same slot, then atomic store is sufficient
748 		 * for replacement.
749 		 */
750 		ck_pr_store_ptr(slot, insert);
751 	}
752 
753 	if (object == NULL)
754 		ck_hs_map_postinsert(hs, map);
755 
756 	return true;
757 }
758 
759 bool
760 ck_hs_set(struct ck_hs *hs,
761     unsigned long h,
762     const void *key,
763     void **previous)
764 {
765 	const void **slot, **first, *object, *insert;
766 	unsigned long n_probes;
767 	struct ck_hs_map *map;
768 
769 	*previous = NULL;
770 
771 restart:
772 	map = hs->map;
773 
774 	slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, map->probe_limit, CK_HS_PROBE_INSERT);
775 	if (slot == NULL && first == NULL) {
776 		if (ck_hs_grow(hs, map->capacity << 1) == false)
777 			return false;
778 
779 		goto restart;
780 	}
781 
782 	ck_hs_map_bound_set(map, h, n_probes);
783 	insert = ck_hs_marshal(hs->mode, key, h);
784 
785 	if (first != NULL) {
786 		/* If an earlier bucket was found, then store entry there. */
787 		ck_pr_store_ptr(first, insert);
788 
789 		/*
790 		 * If a duplicate key was found, then delete it after
791 		 * signaling concurrent probes to restart. Optionally,
792 		 * it is possible to install tombstone after grace
793 		 * period if we can guarantee earlier position of
794 		 * duplicate key.
795 		 */
796 		if (object != NULL) {
797 			ck_hs_map_signal(map, h);
798 			ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
799 		}
800 	} else {
801 		/*
802 		 * If we are storing into same slot, then atomic store is sufficient
803 		 * for replacement.
804 		 */
805 		ck_pr_store_ptr(slot, insert);
806 	}
807 
808 	if (object == NULL)
809 		ck_hs_map_postinsert(hs, map);
810 
811 	*previous = CK_CC_DECONST_PTR(object);
812 	return true;
813 }
814 
815 CK_CC_INLINE static bool
816 ck_hs_put_internal(struct ck_hs *hs,
817     unsigned long h,
818     const void *key,
819     enum ck_hs_probe_behavior behavior)
820 {
821 	const void **slot, **first, *object, *insert;
822 	unsigned long n_probes;
823 	struct ck_hs_map *map;
824 
825 restart:
826 	map = hs->map;
827 
828 	slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object,
829 	    map->probe_limit, behavior);
830 
831 	if (slot == NULL && first == NULL) {
832 		if (ck_hs_grow(hs, map->capacity << 1) == false)
833 			return false;
834 
835 		goto restart;
836 	}
837 
838 	/* Fail operation if a match was found. */
839 	if (object != NULL)
840 		return false;
841 
842 	ck_hs_map_bound_set(map, h, n_probes);
843 	insert = ck_hs_marshal(hs->mode, key, h);
844 
845 	if (first != NULL) {
846 		/* Insert key into first bucket in probe sequence. */
847 		ck_pr_store_ptr(first, insert);
848 	} else {
849 		/* An empty slot was found. */
850 		ck_pr_store_ptr(slot, insert);
851 	}
852 
853 	ck_hs_map_postinsert(hs, map);
854 	return true;
855 }
856 
857 bool
858 ck_hs_put(struct ck_hs *hs,
859     unsigned long h,
860     const void *key)
861 {
862 
863 	return ck_hs_put_internal(hs, h, key, CK_HS_PROBE_INSERT);
864 }
865 
866 bool
867 ck_hs_put_unique(struct ck_hs *hs,
868     unsigned long h,
869     const void *key)
870 {
871 
872 	return ck_hs_put_internal(hs, h, key, CK_HS_PROBE_TOMBSTONE);
873 }
874 
875 void *
876 ck_hs_get(struct ck_hs *hs,
877     unsigned long h,
878     const void *key)
879 {
880 	const void **first, *object;
881 	struct ck_hs_map *map;
882 	unsigned long n_probes;
883 	unsigned int g, g_p, probe;
884 	unsigned int *generation;
885 
886 	do {
887 		map = ck_pr_load_ptr(&hs->map);
888 		generation = &map->generation[h & CK_HS_G_MASK];
889 		g = ck_pr_load_uint(generation);
890 		probe  = ck_hs_map_bound_get(map, h);
891 		ck_pr_fence_load();
892 
893 		ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, probe, CK_HS_PROBE);
894 
895 		ck_pr_fence_load();
896 		g_p = ck_pr_load_uint(generation);
897 	} while (g != g_p);
898 
899 	return CK_CC_DECONST_PTR(object);
900 }
901 
902 void *
903 ck_hs_remove(struct ck_hs *hs,
904     unsigned long h,
905     const void *key)
906 {
907 	const void **slot, **first, *object;
908 	struct ck_hs_map *map = hs->map;
909 	unsigned long n_probes;
910 
911 	slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object,
912 	    ck_hs_map_bound_get(map, h), CK_HS_PROBE);
913 	if (object == NULL)
914 		return NULL;
915 
916 	ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
917 	map->n_entries--;
918 	map->tombstones++;
919 	return CK_CC_DECONST_PTR(object);
920 }
921 
922 bool
923 ck_hs_move(struct ck_hs *hs,
924     struct ck_hs *source,
925     ck_hs_hash_cb_t *hf,
926     ck_hs_compare_cb_t *compare,
927     struct ck_malloc *m)
928 {
929 
930 	if (m == NULL || m->malloc == NULL || m->free == NULL || hf == NULL)
931 		return false;
932 
933 	hs->mode = source->mode;
934 	hs->seed = source->seed;
935 	hs->map = source->map;
936 	hs->m = m;
937 	hs->hf = hf;
938 	hs->compare = compare;
939 	return true;
940 }
941 
942 bool
943 ck_hs_init(struct ck_hs *hs,
944     unsigned int mode,
945     ck_hs_hash_cb_t *hf,
946     ck_hs_compare_cb_t *compare,
947     struct ck_malloc *m,
948     unsigned long n_entries,
949     unsigned long seed)
950 {
951 
952 	if (m == NULL || m->malloc == NULL || m->free == NULL || hf == NULL)
953 		return false;
954 
955 	hs->m = m;
956 	hs->mode = mode;
957 	hs->seed = seed;
958 	hs->hf = hf;
959 	hs->compare = compare;
960 
961 	hs->map = ck_hs_map_create(hs, n_entries);
962 	return hs->map != NULL;
963 }
964