xref: /freebsd/sys/contrib/ck/src/ck_hs.c (revision 3cbb4cc200f8a0ad7ed08233425ea54524a21f1c)
1 /*
2  * Copyright 2012-2015 Samy Al Bahra.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <ck_cc.h>
28 #include <ck_hs.h>
29 #include <ck_limits.h>
30 #include <ck_md.h>
31 #include <ck_pr.h>
32 #include <ck_stdint.h>
33 #include <ck_stdbool.h>
34 #include <ck_string.h>
35 
36 #include "ck_internal.h"
37 
38 #ifndef CK_HS_PROBE_L1_SHIFT
39 #define CK_HS_PROBE_L1_SHIFT 3ULL
40 #endif /* CK_HS_PROBE_L1_SHIFT */
41 
42 #define CK_HS_PROBE_L1 (1 << CK_HS_PROBE_L1_SHIFT)
43 #define CK_HS_PROBE_L1_MASK (CK_HS_PROBE_L1 - 1)
44 
45 #ifndef CK_HS_PROBE_L1_DEFAULT
46 #define CK_HS_PROBE_L1_DEFAULT CK_MD_CACHELINE
47 #endif
48 
49 #define CK_HS_VMA_MASK ((uintptr_t)((1ULL << CK_MD_VMA_BITS) - 1))
50 #define CK_HS_VMA(x)	\
51 	((void *)((uintptr_t)(x) & CK_HS_VMA_MASK))
52 
53 #define CK_HS_EMPTY     NULL
54 #define CK_HS_TOMBSTONE ((void *)~(uintptr_t)0)
55 #define CK_HS_G		(2)
56 #define CK_HS_G_MASK	(CK_HS_G - 1)
57 
58 #if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_STORE_8)
59 #define CK_HS_WORD          uint8_t
60 #define CK_HS_WORD_MAX	    UINT8_MAX
61 #define CK_HS_STORE(x, y)   ck_pr_store_8(x, y)
62 #define CK_HS_LOAD(x)       ck_pr_load_8(x)
63 #elif defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_STORE_16)
64 #define CK_HS_WORD          uint16_t
65 #define CK_HS_WORD_MAX	    UINT16_MAX
66 #define CK_HS_STORE(x, y)   ck_pr_store_16(x, y)
67 #define CK_HS_LOAD(x)       ck_pr_load_16(x)
68 #elif defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_STORE_32)
69 #define CK_HS_WORD          uint32_t
70 #define CK_HS_WORD_MAX	    UINT32_MAX
71 #define CK_HS_STORE(x, y)   ck_pr_store_32(x, y)
72 #define CK_HS_LOAD(x)       ck_pr_load_32(x)
73 #else
74 #error "ck_hs is not supported on your platform."
75 #endif
76 
77 enum ck_hs_probe_behavior {
78 	CK_HS_PROBE = 0,	/* Default behavior. */
79 	CK_HS_PROBE_TOMBSTONE,	/* Short-circuit on tombstone. */
80 	CK_HS_PROBE_INSERT	/* Short-circuit on probe bound if tombstone found. */
81 };
82 
83 struct ck_hs_map {
84 	unsigned int generation[CK_HS_G];
85 	unsigned int probe_maximum;
86 	unsigned long mask;
87 	unsigned long step;
88 	unsigned int probe_limit;
89 	unsigned int tombstones;
90 	unsigned long n_entries;
91 	unsigned long capacity;
92 	unsigned long size;
93 	CK_HS_WORD *probe_bound;
94 	const void **entries;
95 };
96 
97 static inline void
98 ck_hs_map_signal(struct ck_hs_map *map, unsigned long h)
99 {
100 
101 	h &= CK_HS_G_MASK;
102 	ck_pr_store_uint(&map->generation[h],
103 	    map->generation[h] + 1);
104 	ck_pr_fence_store();
105 	return;
106 }
107 
108 static bool
109 _ck_hs_next(struct ck_hs *hs, struct ck_hs_map *map, struct ck_hs_iterator *i, void **key)
110 {
111 	void *value;
112 	if (i->offset >= map->capacity)
113 		return false;
114 
115 	do {
116 		value = CK_CC_DECONST_PTR(map->entries[i->offset]);
117 		if (value != CK_HS_EMPTY && value != CK_HS_TOMBSTONE) {
118 #ifdef CK_HS_PP
119 			if (hs->mode & CK_HS_MODE_OBJECT)
120 				value = CK_HS_VMA(value);
121 #else
122 			(void)hs; /* Avoid unused parameter warning. */
123 #endif
124 			i->offset++;
125 			*key = value;
126 			return true;
127 		}
128 	} while (++i->offset < map->capacity);
129 
130 	return false;
131 }
132 
133 void
134 ck_hs_iterator_init(struct ck_hs_iterator *iterator)
135 {
136 
137 	iterator->cursor = NULL;
138 	iterator->offset = 0;
139 	iterator->map = NULL;
140 	return;
141 }
142 
143 bool
144 ck_hs_next(struct ck_hs *hs, struct ck_hs_iterator *i, void **key)
145 {
146 	return _ck_hs_next(hs, hs->map, i, key);
147 }
148 
149 bool
150 ck_hs_next_spmc(struct ck_hs *hs, struct ck_hs_iterator *i, void **key)
151 {
152 	struct ck_hs_map *m = i->map;
153 	if (m == NULL) {
154 		m = i->map = ck_pr_load_ptr(&hs->map);
155 	}
156 	return _ck_hs_next(hs, m, i, key);
157 }
158 
159 void
160 ck_hs_stat(struct ck_hs *hs, struct ck_hs_stat *st)
161 {
162 	struct ck_hs_map *map = hs->map;
163 
164 	st->n_entries = map->n_entries;
165 	st->tombstones = map->tombstones;
166 	st->probe_maximum = map->probe_maximum;
167 	return;
168 }
169 
170 unsigned long
171 ck_hs_count(struct ck_hs *hs)
172 {
173 
174 	return hs->map->n_entries;
175 }
176 
177 static void
178 ck_hs_map_destroy(struct ck_malloc *m, struct ck_hs_map *map, bool defer)
179 {
180 
181 	m->free(map, map->size, defer);
182 	return;
183 }
184 
185 void
186 ck_hs_destroy(struct ck_hs *hs)
187 {
188 
189 	ck_hs_map_destroy(hs->m, hs->map, false);
190 	return;
191 }
192 
193 static struct ck_hs_map *
194 ck_hs_map_create(struct ck_hs *hs, unsigned long entries)
195 {
196 	struct ck_hs_map *map;
197 	unsigned long size, n_entries, prefix, limit;
198 
199 	n_entries = ck_internal_power_2(entries);
200 	if (n_entries < CK_HS_PROBE_L1)
201 		n_entries = CK_HS_PROBE_L1;
202 
203 	size = sizeof(struct ck_hs_map) + (sizeof(void *) * n_entries + CK_MD_CACHELINE - 1);
204 
205 	if (hs->mode & CK_HS_MODE_DELETE) {
206 		prefix = sizeof(CK_HS_WORD) * n_entries;
207 		size += prefix;
208 	} else {
209 		prefix = 0;
210 	}
211 
212 	map = hs->m->malloc(size);
213 	if (map == NULL)
214 		return NULL;
215 
216 	map->size = size;
217 
218 	/* We should probably use a more intelligent heuristic for default probe length. */
219 	limit = ck_internal_max(n_entries >> (CK_HS_PROBE_L1_SHIFT + 2), CK_HS_PROBE_L1_DEFAULT);
220 	if (limit > UINT_MAX)
221 		limit = UINT_MAX;
222 
223 	map->probe_limit = (unsigned int)limit;
224 	map->probe_maximum = 0;
225 	map->capacity = n_entries;
226 	map->step = ck_cc_ffsl(n_entries);
227 	map->mask = n_entries - 1;
228 	map->n_entries = 0;
229 
230 	/* Align map allocation to cache line. */
231 	map->entries = (void *)(((uintptr_t)&map[1] + prefix +
232 	    CK_MD_CACHELINE - 1) & ~(CK_MD_CACHELINE - 1));
233 
234 	memset(map->entries, 0, sizeof(void *) * n_entries);
235 	memset(map->generation, 0, sizeof map->generation);
236 
237 	if (hs->mode & CK_HS_MODE_DELETE) {
238 		map->probe_bound = (CK_HS_WORD *)&map[1];
239 		memset(map->probe_bound, 0, prefix);
240 	} else {
241 		map->probe_bound = NULL;
242 	}
243 
244 	/* Commit entries purge with respect to map publication. */
245 	ck_pr_fence_store();
246 	return map;
247 }
248 
249 bool
250 ck_hs_reset_size(struct ck_hs *hs, unsigned long capacity)
251 {
252 	struct ck_hs_map *map, *previous;
253 
254 	previous = hs->map;
255 	map = ck_hs_map_create(hs, capacity);
256 	if (map == NULL)
257 		return false;
258 
259 	ck_pr_store_ptr(&hs->map, map);
260 	ck_hs_map_destroy(hs->m, previous, true);
261 	return true;
262 }
263 
264 bool
265 ck_hs_reset(struct ck_hs *hs)
266 {
267 	struct ck_hs_map *previous;
268 
269 	previous = hs->map;
270 	return ck_hs_reset_size(hs, previous->capacity);
271 }
272 
273 static inline unsigned long
274 ck_hs_map_probe_next(struct ck_hs_map *map,
275     unsigned long offset,
276     unsigned long h,
277     unsigned long level,
278     unsigned long probes)
279 {
280 	unsigned long r, stride;
281 
282 	r = (h >> map->step) >> level;
283 	stride = (r & ~CK_HS_PROBE_L1_MASK) << 1 | (r & CK_HS_PROBE_L1_MASK);
284 
285 	return (offset + (probes >> CK_HS_PROBE_L1_SHIFT) +
286 	    (stride | CK_HS_PROBE_L1)) & map->mask;
287 }
288 
289 static inline void
290 ck_hs_map_bound_set(struct ck_hs_map *m,
291     unsigned long h,
292     unsigned long n_probes)
293 {
294 	unsigned long offset = h & m->mask;
295 
296 	if (n_probes > m->probe_maximum)
297 		ck_pr_store_uint(&m->probe_maximum, n_probes);
298 
299 	if (m->probe_bound != NULL && m->probe_bound[offset] < n_probes) {
300 		if (n_probes > CK_HS_WORD_MAX)
301 			n_probes = CK_HS_WORD_MAX;
302 
303 		CK_HS_STORE(&m->probe_bound[offset], n_probes);
304 		ck_pr_fence_store();
305 	}
306 
307 	return;
308 }
309 
310 static inline unsigned int
311 ck_hs_map_bound_get(struct ck_hs_map *m, unsigned long h)
312 {
313 	unsigned long offset = h & m->mask;
314 	unsigned int r = CK_HS_WORD_MAX;
315 
316 	if (m->probe_bound != NULL) {
317 		r = CK_HS_LOAD(&m->probe_bound[offset]);
318 		if (r == CK_HS_WORD_MAX)
319 			r = ck_pr_load_uint(&m->probe_maximum);
320 	} else {
321 		r = ck_pr_load_uint(&m->probe_maximum);
322 	}
323 
324 	return r;
325 }
326 
327 bool
328 ck_hs_grow(struct ck_hs *hs,
329     unsigned long capacity)
330 {
331 	struct ck_hs_map *map, *update;
332 	unsigned long k, i, j, offset, probes;
333 	const void *previous, **bucket;
334 
335 restart:
336 	map = hs->map;
337 	if (map->capacity > capacity)
338 		return false;
339 
340 	update = ck_hs_map_create(hs, capacity);
341 	if (update == NULL)
342 		return false;
343 
344 	for (k = 0; k < map->capacity; k++) {
345 		unsigned long h;
346 
347 		previous = map->entries[k];
348 		if (previous == CK_HS_EMPTY || previous == CK_HS_TOMBSTONE)
349 			continue;
350 
351 #ifdef CK_HS_PP
352 		if (hs->mode & CK_HS_MODE_OBJECT)
353 			previous = CK_HS_VMA(previous);
354 #endif
355 
356 		h = hs->hf(previous, hs->seed);
357 		offset = h & update->mask;
358 		i = probes = 0;
359 
360 		for (;;) {
361 			bucket = (const void **)((uintptr_t)&update->entries[offset] & ~(CK_MD_CACHELINE - 1));
362 
363 			for (j = 0; j < CK_HS_PROBE_L1; j++) {
364 				const void **cursor = bucket + ((j + offset) & (CK_HS_PROBE_L1 - 1));
365 
366 				if (probes++ == update->probe_limit)
367 					break;
368 
369 				if (CK_CC_LIKELY(*cursor == CK_HS_EMPTY)) {
370 					*cursor = map->entries[k];
371 					update->n_entries++;
372 
373 					ck_hs_map_bound_set(update, h, probes);
374 					break;
375 				}
376 			}
377 
378 			if (j < CK_HS_PROBE_L1)
379 				break;
380 
381 			offset = ck_hs_map_probe_next(update, offset, h, i++, probes);
382 		}
383 
384 		if (probes > update->probe_limit) {
385 			/*
386 			 * We have hit the probe limit, map needs to be even larger.
387 			 */
388 			ck_hs_map_destroy(hs->m, update, false);
389 			capacity <<= 1;
390 			goto restart;
391 		}
392 	}
393 
394 	ck_pr_fence_store();
395 	ck_pr_store_ptr(&hs->map, update);
396 	ck_hs_map_destroy(hs->m, map, true);
397 	return true;
398 }
399 
400 static void
401 ck_hs_map_postinsert(struct ck_hs *hs, struct ck_hs_map *map)
402 {
403 
404 	map->n_entries++;
405 	if ((map->n_entries << 1) > map->capacity)
406 		ck_hs_grow(hs, map->capacity << 1);
407 
408 	return;
409 }
410 
411 bool
412 ck_hs_rebuild(struct ck_hs *hs)
413 {
414 
415 	return ck_hs_grow(hs, hs->map->capacity);
416 }
417 
418 static const void **
419 ck_hs_map_probe(struct ck_hs *hs,
420     struct ck_hs_map *map,
421     unsigned long *n_probes,
422     const void ***priority,
423     unsigned long h,
424     const void *key,
425     const void **object,
426     unsigned long probe_limit,
427     enum ck_hs_probe_behavior behavior)
428 {
429 	const void **bucket, **cursor, *k, *compare;
430 	const void **pr = NULL;
431 	unsigned long offset, j, i, probes, opl;
432 
433 #ifdef CK_HS_PP
434 	/* If we are storing object pointers, then we may leverage pointer packing. */
435 	unsigned long hv = 0;
436 
437 	if (hs->mode & CK_HS_MODE_OBJECT) {
438 		hv = (h >> 25) & CK_HS_KEY_MASK;
439 		compare = CK_HS_VMA(key);
440 	} else {
441 		compare = key;
442 	}
443 #else
444 	compare = key;
445 #endif
446 
447 	offset = h & map->mask;
448 	*object = NULL;
449 	i = probes = 0;
450 
451 	opl = probe_limit;
452 	if (behavior == CK_HS_PROBE_INSERT)
453 		probe_limit = ck_hs_map_bound_get(map, h);
454 
455 	for (;;) {
456 		bucket = (const void **)((uintptr_t)&map->entries[offset] & ~(CK_MD_CACHELINE - 1));
457 
458 		for (j = 0; j < CK_HS_PROBE_L1; j++) {
459 			cursor = bucket + ((j + offset) & (CK_HS_PROBE_L1 - 1));
460 
461 			if (probes++ == probe_limit) {
462 				if (probe_limit == opl || pr != NULL) {
463 					k = CK_HS_EMPTY;
464 					goto leave;
465 				}
466 
467 				/*
468 				 * If no eligible slot has been found yet, continue probe
469 				 * sequence with original probe limit.
470 				 */
471 				probe_limit = opl;
472 			}
473 
474 			k = ck_pr_load_ptr(cursor);
475 			if (k == CK_HS_EMPTY)
476 				goto leave;
477 
478 			if (k == CK_HS_TOMBSTONE) {
479 				if (pr == NULL) {
480 					pr = cursor;
481 					*n_probes = probes;
482 
483 					if (behavior == CK_HS_PROBE_TOMBSTONE) {
484 						k = CK_HS_EMPTY;
485 						goto leave;
486 					}
487 				}
488 
489 				continue;
490 			}
491 
492 #ifdef CK_HS_PP
493 			if (hs->mode & CK_HS_MODE_OBJECT) {
494 				if (((uintptr_t)k >> CK_MD_VMA_BITS) != hv)
495 					continue;
496 
497 				k = CK_HS_VMA(k);
498 			}
499 #endif
500 
501 			if (k == compare)
502 				goto leave;
503 
504 			if (hs->compare == NULL)
505 				continue;
506 
507 			if (hs->compare(k, key) == true)
508 				goto leave;
509 		}
510 
511 		offset = ck_hs_map_probe_next(map, offset, h, i++, probes);
512 	}
513 
514 leave:
515 	if (probes > probe_limit) {
516 		cursor = NULL;
517 	} else {
518 		*object = k;
519 	}
520 
521 	if (pr == NULL)
522 		*n_probes = probes;
523 
524 	*priority = pr;
525 	return cursor;
526 }
527 
528 static inline const void *
529 ck_hs_marshal(unsigned int mode, const void *key, unsigned long h)
530 {
531 #ifdef CK_HS_PP
532 	const void *insert;
533 
534 	if (mode & CK_HS_MODE_OBJECT) {
535 		insert = (void *)((uintptr_t)CK_HS_VMA(key) |
536 		    ((h >> 25) << CK_MD_VMA_BITS));
537 	} else {
538 		insert = key;
539 	}
540 
541 	return insert;
542 #else
543 	(void)mode;
544 	(void)h;
545 
546 	return key;
547 #endif
548 }
549 
550 bool
551 ck_hs_gc(struct ck_hs *hs, unsigned long cycles, unsigned long seed)
552 {
553 	unsigned long size = 0;
554 	unsigned long i;
555 	struct ck_hs_map *map = hs->map;
556 	unsigned int maximum;
557 	CK_HS_WORD *bounds = NULL;
558 
559 	if (map->n_entries == 0) {
560 		ck_pr_store_uint(&map->probe_maximum, 0);
561 		if (map->probe_bound != NULL)
562 			memset(map->probe_bound, 0, sizeof(CK_HS_WORD) * map->capacity);
563 
564 		return true;
565 	}
566 
567 	if (cycles == 0) {
568 		maximum = 0;
569 
570 		if (map->probe_bound != NULL) {
571 			size = sizeof(CK_HS_WORD) * map->capacity;
572 			bounds = hs->m->malloc(size);
573 			if (bounds == NULL)
574 				return false;
575 
576 			memset(bounds, 0, size);
577 		}
578 	} else {
579 		maximum = map->probe_maximum;
580 	}
581 
582 	for (i = 0; i < map->capacity; i++) {
583 		const void **first, *object, **slot, *entry;
584 		unsigned long n_probes, offset, h;
585 
586 		entry = map->entries[(i + seed) & map->mask];
587 		if (entry == CK_HS_EMPTY || entry == CK_HS_TOMBSTONE)
588 			continue;
589 
590 #ifdef CK_HS_PP
591 		if (hs->mode & CK_HS_MODE_OBJECT)
592 			entry = CK_HS_VMA(entry);
593 #endif
594 
595 		h = hs->hf(entry, hs->seed);
596 		offset = h & map->mask;
597 
598 		slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, entry, &object,
599 		    ck_hs_map_bound_get(map, h), CK_HS_PROBE);
600 
601 		if (first != NULL) {
602 			const void *insert = ck_hs_marshal(hs->mode, entry, h);
603 
604 			ck_pr_store_ptr(first, insert);
605 			ck_hs_map_signal(map, h);
606 			ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
607 		}
608 
609 		if (cycles == 0) {
610 			if (n_probes > maximum)
611 				maximum = n_probes;
612 
613 			if (n_probes > CK_HS_WORD_MAX)
614 				n_probes = CK_HS_WORD_MAX;
615 
616 			if (bounds != NULL && n_probes > bounds[offset])
617 				bounds[offset] = n_probes;
618 		} else if (--cycles == 0)
619 			break;
620 	}
621 
622 	/*
623 	 * The following only apply to garbage collection involving
624 	 * a full scan of all entries.
625 	 */
626 	if (maximum != map->probe_maximum)
627 		ck_pr_store_uint(&map->probe_maximum, maximum);
628 
629 	if (bounds != NULL) {
630 		for (i = 0; i < map->capacity; i++)
631 			CK_HS_STORE(&map->probe_bound[i], bounds[i]);
632 
633 		hs->m->free(bounds, size, false);
634 	}
635 
636 	return true;
637 }
638 
639 bool
640 ck_hs_fas(struct ck_hs *hs,
641     unsigned long h,
642     const void *key,
643     void **previous)
644 {
645 	const void **slot, **first, *object, *insert;
646 	struct ck_hs_map *map = hs->map;
647 	unsigned long n_probes;
648 
649 	*previous = NULL;
650 	slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object,
651 	    ck_hs_map_bound_get(map, h), CK_HS_PROBE);
652 
653 	/* Replacement semantics presume existence. */
654 	if (object == NULL)
655 		return false;
656 
657 	insert = ck_hs_marshal(hs->mode, key, h);
658 
659 	if (first != NULL) {
660 		ck_pr_store_ptr(first, insert);
661 		ck_hs_map_signal(map, h);
662 		ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
663 	} else {
664 		ck_pr_store_ptr(slot, insert);
665 	}
666 
667 	*previous = CK_CC_DECONST_PTR(object);
668 	return true;
669 }
670 
671 /*
672  * An apply function takes two arguments. The first argument is a pointer to a
673  * pre-existing object. The second argument is a pointer to the fifth argument
674  * passed to ck_hs_apply. If a non-NULL pointer is passed to the first argument
675  * and the return value of the apply function is NULL, then the pre-existing
676  * value is deleted. If the return pointer is the same as the one passed to the
677  * apply function then no changes are made to the hash table.  If the first
678  * argument is non-NULL and the return pointer is different than that passed to
679  * the apply function, then the pre-existing value is replaced. For
680  * replacement, it is required that the value itself is identical to the
681  * previous value.
682  */
683 bool
684 ck_hs_apply(struct ck_hs *hs,
685     unsigned long h,
686     const void *key,
687     ck_hs_apply_fn_t *fn,
688     void *cl)
689 {
690 	const void **slot, **first, *object, *delta, *insert;
691 	unsigned long n_probes;
692 	struct ck_hs_map *map;
693 
694 restart:
695 	map = hs->map;
696 
697 	slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, map->probe_limit, CK_HS_PROBE_INSERT);
698 	if (slot == NULL && first == NULL) {
699 		if (ck_hs_grow(hs, map->capacity << 1) == false)
700 			return false;
701 
702 		goto restart;
703 	}
704 
705 	delta = fn(CK_CC_DECONST_PTR(object), cl);
706 	if (delta == NULL) {
707 		/*
708 		 * The apply function has requested deletion. If the object doesn't exist,
709 		 * then exit early.
710 		 */
711 		if (CK_CC_UNLIKELY(object == NULL))
712 			return true;
713 
714 		/* Otherwise, mark slot as deleted. */
715 		ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
716 		map->n_entries--;
717 		map->tombstones++;
718 		return true;
719 	}
720 
721 	/* The apply function has not requested hash set modification so exit early. */
722 	if (delta == object)
723 		return true;
724 
725 	/* A modification or insertion has been requested. */
726 	ck_hs_map_bound_set(map, h, n_probes);
727 
728 	insert = ck_hs_marshal(hs->mode, delta, h);
729 	if (first != NULL) {
730 		/*
731 		 * This follows the same semantics as ck_hs_set, please refer to that
732 		 * function for documentation.
733 		 */
734 		ck_pr_store_ptr(first, insert);
735 
736 		if (object != NULL) {
737 			ck_hs_map_signal(map, h);
738 			ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
739 		}
740 	} else {
741 		/*
742 		 * If we are storing into same slot, then atomic store is sufficient
743 		 * for replacement.
744 		 */
745 		ck_pr_store_ptr(slot, insert);
746 	}
747 
748 	if (object == NULL)
749 		ck_hs_map_postinsert(hs, map);
750 
751 	return true;
752 }
753 
754 bool
755 ck_hs_set(struct ck_hs *hs,
756     unsigned long h,
757     const void *key,
758     void **previous)
759 {
760 	const void **slot, **first, *object, *insert;
761 	unsigned long n_probes;
762 	struct ck_hs_map *map;
763 
764 	*previous = NULL;
765 
766 restart:
767 	map = hs->map;
768 
769 	slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, map->probe_limit, CK_HS_PROBE_INSERT);
770 	if (slot == NULL && first == NULL) {
771 		if (ck_hs_grow(hs, map->capacity << 1) == false)
772 			return false;
773 
774 		goto restart;
775 	}
776 
777 	ck_hs_map_bound_set(map, h, n_probes);
778 	insert = ck_hs_marshal(hs->mode, key, h);
779 
780 	if (first != NULL) {
781 		/* If an earlier bucket was found, then store entry there. */
782 		ck_pr_store_ptr(first, insert);
783 
784 		/*
785 		 * If a duplicate key was found, then delete it after
786 		 * signaling concurrent probes to restart. Optionally,
787 		 * it is possible to install tombstone after grace
788 		 * period if we can guarantee earlier position of
789 		 * duplicate key.
790 		 */
791 		if (object != NULL) {
792 			ck_hs_map_signal(map, h);
793 			ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
794 		}
795 	} else {
796 		/*
797 		 * If we are storing into same slot, then atomic store is sufficient
798 		 * for replacement.
799 		 */
800 		ck_pr_store_ptr(slot, insert);
801 	}
802 
803 	if (object == NULL)
804 		ck_hs_map_postinsert(hs, map);
805 
806 	*previous = CK_CC_DECONST_PTR(object);
807 	return true;
808 }
809 
810 CK_CC_INLINE static bool
811 ck_hs_put_internal(struct ck_hs *hs,
812     unsigned long h,
813     const void *key,
814     enum ck_hs_probe_behavior behavior)
815 {
816 	const void **slot, **first, *object, *insert;
817 	unsigned long n_probes;
818 	struct ck_hs_map *map;
819 
820 restart:
821 	map = hs->map;
822 
823 	slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object,
824 	    map->probe_limit, behavior);
825 
826 	if (slot == NULL && first == NULL) {
827 		if (ck_hs_grow(hs, map->capacity << 1) == false)
828 			return false;
829 
830 		goto restart;
831 	}
832 
833 	/* Fail operation if a match was found. */
834 	if (object != NULL)
835 		return false;
836 
837 	ck_hs_map_bound_set(map, h, n_probes);
838 	insert = ck_hs_marshal(hs->mode, key, h);
839 
840 	if (first != NULL) {
841 		/* Insert key into first bucket in probe sequence. */
842 		ck_pr_store_ptr(first, insert);
843 	} else {
844 		/* An empty slot was found. */
845 		ck_pr_store_ptr(slot, insert);
846 	}
847 
848 	ck_hs_map_postinsert(hs, map);
849 	return true;
850 }
851 
852 bool
853 ck_hs_put(struct ck_hs *hs,
854     unsigned long h,
855     const void *key)
856 {
857 
858 	return ck_hs_put_internal(hs, h, key, CK_HS_PROBE_INSERT);
859 }
860 
861 bool
862 ck_hs_put_unique(struct ck_hs *hs,
863     unsigned long h,
864     const void *key)
865 {
866 
867 	return ck_hs_put_internal(hs, h, key, CK_HS_PROBE_TOMBSTONE);
868 }
869 
870 void *
871 ck_hs_get(struct ck_hs *hs,
872     unsigned long h,
873     const void *key)
874 {
875 	const void **first, *object;
876 	struct ck_hs_map *map;
877 	unsigned long n_probes;
878 	unsigned int g, g_p, probe;
879 	unsigned int *generation;
880 
881 	do {
882 		map = ck_pr_load_ptr(&hs->map);
883 		generation = &map->generation[h & CK_HS_G_MASK];
884 		g = ck_pr_load_uint(generation);
885 		probe  = ck_hs_map_bound_get(map, h);
886 		ck_pr_fence_load();
887 
888 		ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, probe, CK_HS_PROBE);
889 
890 		ck_pr_fence_load();
891 		g_p = ck_pr_load_uint(generation);
892 	} while (g != g_p);
893 
894 	return CK_CC_DECONST_PTR(object);
895 }
896 
897 void *
898 ck_hs_remove(struct ck_hs *hs,
899     unsigned long h,
900     const void *key)
901 {
902 	const void **slot, **first, *object;
903 	struct ck_hs_map *map = hs->map;
904 	unsigned long n_probes;
905 
906 	slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object,
907 	    ck_hs_map_bound_get(map, h), CK_HS_PROBE);
908 	if (object == NULL)
909 		return NULL;
910 
911 	ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
912 	map->n_entries--;
913 	map->tombstones++;
914 	return CK_CC_DECONST_PTR(object);
915 }
916 
917 bool
918 ck_hs_move(struct ck_hs *hs,
919     struct ck_hs *source,
920     ck_hs_hash_cb_t *hf,
921     ck_hs_compare_cb_t *compare,
922     struct ck_malloc *m)
923 {
924 
925 	if (m == NULL || m->malloc == NULL || m->free == NULL || hf == NULL)
926 		return false;
927 
928 	hs->mode = source->mode;
929 	hs->seed = source->seed;
930 	hs->map = source->map;
931 	hs->m = m;
932 	hs->hf = hf;
933 	hs->compare = compare;
934 	return true;
935 }
936 
937 bool
938 ck_hs_init(struct ck_hs *hs,
939     unsigned int mode,
940     ck_hs_hash_cb_t *hf,
941     ck_hs_compare_cb_t *compare,
942     struct ck_malloc *m,
943     unsigned long n_entries,
944     unsigned long seed)
945 {
946 
947 	if (m == NULL || m->malloc == NULL || m->free == NULL || hf == NULL)
948 		return false;
949 
950 	hs->m = m;
951 	hs->mode = mode;
952 	hs->seed = seed;
953 	hs->hf = hf;
954 	hs->compare = compare;
955 
956 	hs->map = ck_hs_map_create(hs, n_entries);
957 	return hs->map != NULL;
958 }
959