xref: /freebsd/sys/contrib/ck/src/ck_hs.c (revision 3416500aef140042c64bc149cb1ec6620483bc44)
1 /*
2  * Copyright 2012-2015 Samy Al Bahra.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <ck_cc.h>
28 #include <ck_hs.h>
29 #include <ck_limits.h>
30 #include <ck_md.h>
31 #include <ck_pr.h>
32 #include <ck_stdint.h>
33 #include <ck_stdbool.h>
34 #include <ck_string.h>
35 
36 #include "ck_internal.h"
37 
38 #ifndef CK_HS_PROBE_L1_SHIFT
39 #define CK_HS_PROBE_L1_SHIFT 3ULL
40 #endif /* CK_HS_PROBE_L1_SHIFT */
41 
42 #define CK_HS_PROBE_L1 (1 << CK_HS_PROBE_L1_SHIFT)
43 #define CK_HS_PROBE_L1_MASK (CK_HS_PROBE_L1 - 1)
44 
45 #ifndef CK_HS_PROBE_L1_DEFAULT
46 #define CK_HS_PROBE_L1_DEFAULT CK_MD_CACHELINE
47 #endif
48 
49 #define CK_HS_VMA_MASK ((uintptr_t)((1ULL << CK_MD_VMA_BITS) - 1))
50 #define CK_HS_VMA(x)	\
51 	((void *)((uintptr_t)(x) & CK_HS_VMA_MASK))
52 
53 #define CK_HS_EMPTY     NULL
54 #define CK_HS_TOMBSTONE ((void *)~(uintptr_t)0)
55 #define CK_HS_G		(2)
56 #define CK_HS_G_MASK	(CK_HS_G - 1)
57 
58 #if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_STORE_8)
59 #define CK_HS_WORD          uint8_t
60 #define CK_HS_WORD_MAX	    UINT8_MAX
61 #define CK_HS_STORE(x, y)   ck_pr_store_8(x, y)
62 #define CK_HS_LOAD(x)       ck_pr_load_8(x)
63 #elif defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_STORE_16)
64 #define CK_HS_WORD          uint16_t
65 #define CK_HS_WORD_MAX	    UINT16_MAX
66 #define CK_HS_STORE(x, y)   ck_pr_store_16(x, y)
67 #define CK_HS_LOAD(x)       ck_pr_load_16(x)
68 #elif defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_STORE_32)
69 #define CK_HS_WORD          uint32_t
70 #define CK_HS_WORD_MAX	    UINT32_MAX
71 #define CK_HS_STORE(x, y)   ck_pr_store_32(x, y)
72 #define CK_HS_LOAD(x)       ck_pr_load_32(x)
73 #else
74 #error "ck_hs is not supported on your platform."
75 #endif
76 
77 enum ck_hs_probe_behavior {
78 	CK_HS_PROBE = 0,	/* Default behavior. */
79 	CK_HS_PROBE_TOMBSTONE,	/* Short-circuit on tombstone. */
80 	CK_HS_PROBE_INSERT	/* Short-circuit on probe bound if tombstone found. */
81 };
82 
83 struct ck_hs_map {
84 	unsigned int generation[CK_HS_G];
85 	unsigned int probe_maximum;
86 	unsigned long mask;
87 	unsigned long step;
88 	unsigned int probe_limit;
89 	unsigned int tombstones;
90 	unsigned long n_entries;
91 	unsigned long capacity;
92 	unsigned long size;
93 	CK_HS_WORD *probe_bound;
94 	const void **entries;
95 };
96 
97 static inline void
98 ck_hs_map_signal(struct ck_hs_map *map, unsigned long h)
99 {
100 
101 	h &= CK_HS_G_MASK;
102 	ck_pr_store_uint(&map->generation[h],
103 	    map->generation[h] + 1);
104 	ck_pr_fence_store();
105 	return;
106 }
107 
108 void
109 ck_hs_iterator_init(struct ck_hs_iterator *iterator)
110 {
111 
112 	iterator->cursor = NULL;
113 	iterator->offset = 0;
114 	return;
115 }
116 
117 bool
118 ck_hs_next(struct ck_hs *hs, struct ck_hs_iterator *i, void **key)
119 {
120 	struct ck_hs_map *map = hs->map;
121 	void *value;
122 
123 	if (i->offset >= map->capacity)
124 		return false;
125 
126 	do {
127 		value = CK_CC_DECONST_PTR(map->entries[i->offset]);
128 		if (value != CK_HS_EMPTY && value != CK_HS_TOMBSTONE) {
129 #ifdef CK_HS_PP
130 			if (hs->mode & CK_HS_MODE_OBJECT)
131 				value = CK_HS_VMA(value);
132 #endif
133 			i->offset++;
134 			*key = value;
135 			return true;
136 		}
137 	} while (++i->offset < map->capacity);
138 
139 	return false;
140 }
141 
142 void
143 ck_hs_stat(struct ck_hs *hs, struct ck_hs_stat *st)
144 {
145 	struct ck_hs_map *map = hs->map;
146 
147 	st->n_entries = map->n_entries;
148 	st->tombstones = map->tombstones;
149 	st->probe_maximum = map->probe_maximum;
150 	return;
151 }
152 
153 unsigned long
154 ck_hs_count(struct ck_hs *hs)
155 {
156 
157 	return hs->map->n_entries;
158 }
159 
160 static void
161 ck_hs_map_destroy(struct ck_malloc *m, struct ck_hs_map *map, bool defer)
162 {
163 
164 	m->free(map, map->size, defer);
165 	return;
166 }
167 
168 void
169 ck_hs_destroy(struct ck_hs *hs)
170 {
171 
172 	ck_hs_map_destroy(hs->m, hs->map, false);
173 	return;
174 }
175 
176 static struct ck_hs_map *
177 ck_hs_map_create(struct ck_hs *hs, unsigned long entries)
178 {
179 	struct ck_hs_map *map;
180 	unsigned long size, n_entries, prefix, limit;
181 
182 	n_entries = ck_internal_power_2(entries);
183 	if (n_entries < CK_HS_PROBE_L1)
184 		n_entries = CK_HS_PROBE_L1;
185 
186 	size = sizeof(struct ck_hs_map) + (sizeof(void *) * n_entries + CK_MD_CACHELINE - 1);
187 
188 	if (hs->mode & CK_HS_MODE_DELETE) {
189 		prefix = sizeof(CK_HS_WORD) * n_entries;
190 		size += prefix;
191 	} else {
192 		prefix = 0;
193 	}
194 
195 	map = hs->m->malloc(size);
196 	if (map == NULL)
197 		return NULL;
198 
199 	map->size = size;
200 
201 	/* We should probably use a more intelligent heuristic for default probe length. */
202 	limit = ck_internal_max(n_entries >> (CK_HS_PROBE_L1_SHIFT + 2), CK_HS_PROBE_L1_DEFAULT);
203 	if (limit > UINT_MAX)
204 		limit = UINT_MAX;
205 
206 	map->probe_limit = (unsigned int)limit;
207 	map->probe_maximum = 0;
208 	map->capacity = n_entries;
209 	map->step = ck_internal_bsf(n_entries);
210 	map->mask = n_entries - 1;
211 	map->n_entries = 0;
212 
213 	/* Align map allocation to cache line. */
214 	map->entries = (void *)(((uintptr_t)&map[1] + prefix +
215 	    CK_MD_CACHELINE - 1) & ~(CK_MD_CACHELINE - 1));
216 
217 	memset(map->entries, 0, sizeof(void *) * n_entries);
218 	memset(map->generation, 0, sizeof map->generation);
219 
220 	if (hs->mode & CK_HS_MODE_DELETE) {
221 		map->probe_bound = (CK_HS_WORD *)&map[1];
222 		memset(map->probe_bound, 0, prefix);
223 	} else {
224 		map->probe_bound = NULL;
225 	}
226 
227 	/* Commit entries purge with respect to map publication. */
228 	ck_pr_fence_store();
229 	return map;
230 }
231 
232 bool
233 ck_hs_reset_size(struct ck_hs *hs, unsigned long capacity)
234 {
235 	struct ck_hs_map *map, *previous;
236 
237 	previous = hs->map;
238 	map = ck_hs_map_create(hs, capacity);
239 	if (map == NULL)
240 		return false;
241 
242 	ck_pr_store_ptr(&hs->map, map);
243 	ck_hs_map_destroy(hs->m, previous, true);
244 	return true;
245 }
246 
247 bool
248 ck_hs_reset(struct ck_hs *hs)
249 {
250 	struct ck_hs_map *previous;
251 
252 	previous = hs->map;
253 	return ck_hs_reset_size(hs, previous->capacity);
254 }
255 
256 static inline unsigned long
257 ck_hs_map_probe_next(struct ck_hs_map *map,
258     unsigned long offset,
259     unsigned long h,
260     unsigned long level,
261     unsigned long probes)
262 {
263 	unsigned long r, stride;
264 
265 	r = (h >> map->step) >> level;
266 	stride = (r & ~CK_HS_PROBE_L1_MASK) << 1 | (r & CK_HS_PROBE_L1_MASK);
267 
268 	return (offset + (probes >> CK_HS_PROBE_L1_SHIFT) +
269 	    (stride | CK_HS_PROBE_L1)) & map->mask;
270 }
271 
272 static inline void
273 ck_hs_map_bound_set(struct ck_hs_map *m,
274     unsigned long h,
275     unsigned long n_probes)
276 {
277 	unsigned long offset = h & m->mask;
278 
279 	if (n_probes > m->probe_maximum)
280 		ck_pr_store_uint(&m->probe_maximum, n_probes);
281 
282 	if (m->probe_bound != NULL && m->probe_bound[offset] < n_probes) {
283 		if (n_probes > CK_HS_WORD_MAX)
284 			n_probes = CK_HS_WORD_MAX;
285 
286 		CK_HS_STORE(&m->probe_bound[offset], n_probes);
287 		ck_pr_fence_store();
288 	}
289 
290 	return;
291 }
292 
293 static inline unsigned int
294 ck_hs_map_bound_get(struct ck_hs_map *m, unsigned long h)
295 {
296 	unsigned long offset = h & m->mask;
297 	unsigned int r = CK_HS_WORD_MAX;
298 
299 	if (m->probe_bound != NULL) {
300 		r = CK_HS_LOAD(&m->probe_bound[offset]);
301 		if (r == CK_HS_WORD_MAX)
302 			r = ck_pr_load_uint(&m->probe_maximum);
303 	} else {
304 		r = ck_pr_load_uint(&m->probe_maximum);
305 	}
306 
307 	return r;
308 }
309 
310 bool
311 ck_hs_grow(struct ck_hs *hs,
312     unsigned long capacity)
313 {
314 	struct ck_hs_map *map, *update;
315 	unsigned long k, i, j, offset, probes;
316 	const void *previous, **bucket;
317 
318 restart:
319 	map = hs->map;
320 	if (map->capacity > capacity)
321 		return false;
322 
323 	update = ck_hs_map_create(hs, capacity);
324 	if (update == NULL)
325 		return false;
326 
327 	for (k = 0; k < map->capacity; k++) {
328 		unsigned long h;
329 
330 		previous = map->entries[k];
331 		if (previous == CK_HS_EMPTY || previous == CK_HS_TOMBSTONE)
332 			continue;
333 
334 #ifdef CK_HS_PP
335 		if (hs->mode & CK_HS_MODE_OBJECT)
336 			previous = CK_HS_VMA(previous);
337 #endif
338 
339 		h = hs->hf(previous, hs->seed);
340 		offset = h & update->mask;
341 		i = probes = 0;
342 
343 		for (;;) {
344 			bucket = (const void **)((uintptr_t)&update->entries[offset] & ~(CK_MD_CACHELINE - 1));
345 
346 			for (j = 0; j < CK_HS_PROBE_L1; j++) {
347 				const void **cursor = bucket + ((j + offset) & (CK_HS_PROBE_L1 - 1));
348 
349 				if (probes++ == update->probe_limit)
350 					break;
351 
352 				if (CK_CC_LIKELY(*cursor == CK_HS_EMPTY)) {
353 					*cursor = map->entries[k];
354 					update->n_entries++;
355 
356 					ck_hs_map_bound_set(update, h, probes);
357 					break;
358 				}
359 			}
360 
361 			if (j < CK_HS_PROBE_L1)
362 				break;
363 
364 			offset = ck_hs_map_probe_next(update, offset, h, i++, probes);
365 		}
366 
367 		if (probes > update->probe_limit) {
368 			/*
369 			 * We have hit the probe limit, map needs to be even larger.
370 			 */
371 			ck_hs_map_destroy(hs->m, update, false);
372 			capacity <<= 1;
373 			goto restart;
374 		}
375 	}
376 
377 	ck_pr_fence_store();
378 	ck_pr_store_ptr(&hs->map, update);
379 	ck_hs_map_destroy(hs->m, map, true);
380 	return true;
381 }
382 
383 static void
384 ck_hs_map_postinsert(struct ck_hs *hs, struct ck_hs_map *map)
385 {
386 
387 	map->n_entries++;
388 	if ((map->n_entries << 1) > map->capacity)
389 		ck_hs_grow(hs, map->capacity << 1);
390 
391 	return;
392 }
393 
394 bool
395 ck_hs_rebuild(struct ck_hs *hs)
396 {
397 
398 	return ck_hs_grow(hs, hs->map->capacity);
399 }
400 
401 static const void **
402 ck_hs_map_probe(struct ck_hs *hs,
403     struct ck_hs_map *map,
404     unsigned long *n_probes,
405     const void ***priority,
406     unsigned long h,
407     const void *key,
408     const void **object,
409     unsigned long probe_limit,
410     enum ck_hs_probe_behavior behavior)
411 {
412 	const void **bucket, **cursor, *k, *compare;
413 	const void **pr = NULL;
414 	unsigned long offset, j, i, probes, opl;
415 
416 #ifdef CK_HS_PP
417 	/* If we are storing object pointers, then we may leverage pointer packing. */
418 	unsigned long hv = 0;
419 
420 	if (hs->mode & CK_HS_MODE_OBJECT) {
421 		hv = (h >> 25) & CK_HS_KEY_MASK;
422 		compare = CK_HS_VMA(key);
423 	} else {
424 		compare = key;
425 	}
426 #else
427 	compare = key;
428 #endif
429 
430 	offset = h & map->mask;
431 	*object = NULL;
432 	i = probes = 0;
433 
434 	opl = probe_limit;
435 	if (behavior == CK_HS_PROBE_INSERT)
436 		probe_limit = ck_hs_map_bound_get(map, h);
437 
438 	for (;;) {
439 		bucket = (const void **)((uintptr_t)&map->entries[offset] & ~(CK_MD_CACHELINE - 1));
440 
441 		for (j = 0; j < CK_HS_PROBE_L1; j++) {
442 			cursor = bucket + ((j + offset) & (CK_HS_PROBE_L1 - 1));
443 
444 			if (probes++ == probe_limit) {
445 				if (probe_limit == opl || pr != NULL) {
446 					k = CK_HS_EMPTY;
447 					goto leave;
448 				}
449 
450 				/*
451 				 * If no eligible slot has been found yet, continue probe
452 				 * sequence with original probe limit.
453 				 */
454 				probe_limit = opl;
455 			}
456 
457 			k = ck_pr_load_ptr(cursor);
458 			if (k == CK_HS_EMPTY)
459 				goto leave;
460 
461 			if (k == CK_HS_TOMBSTONE) {
462 				if (pr == NULL) {
463 					pr = cursor;
464 					*n_probes = probes;
465 
466 					if (behavior == CK_HS_PROBE_TOMBSTONE) {
467 						k = CK_HS_EMPTY;
468 						goto leave;
469 					}
470 				}
471 
472 				continue;
473 			}
474 
475 #ifdef CK_HS_PP
476 			if (hs->mode & CK_HS_MODE_OBJECT) {
477 				if (((uintptr_t)k >> CK_MD_VMA_BITS) != hv)
478 					continue;
479 
480 				k = CK_HS_VMA(k);
481 			}
482 #endif
483 
484 			if (k == compare)
485 				goto leave;
486 
487 			if (hs->compare == NULL)
488 				continue;
489 
490 			if (hs->compare(k, key) == true)
491 				goto leave;
492 		}
493 
494 		offset = ck_hs_map_probe_next(map, offset, h, i++, probes);
495 	}
496 
497 leave:
498 	if (probes > probe_limit) {
499 		cursor = NULL;
500 	} else {
501 		*object = k;
502 	}
503 
504 	if (pr == NULL)
505 		*n_probes = probes;
506 
507 	*priority = pr;
508 	return cursor;
509 }
510 
511 static inline const void *
512 ck_hs_marshal(unsigned int mode, const void *key, unsigned long h)
513 {
514 #ifdef CK_HS_PP
515 	const void *insert;
516 
517 	if (mode & CK_HS_MODE_OBJECT) {
518 		insert = (void *)((uintptr_t)CK_HS_VMA(key) |
519 		    ((h >> 25) << CK_MD_VMA_BITS));
520 	} else {
521 		insert = key;
522 	}
523 
524 	return insert;
525 #else
526 	(void)mode;
527 	(void)h;
528 
529 	return key;
530 #endif
531 }
532 
533 bool
534 ck_hs_gc(struct ck_hs *hs, unsigned long cycles, unsigned long seed)
535 {
536 	unsigned long size = 0;
537 	unsigned long i;
538 	struct ck_hs_map *map = hs->map;
539 	unsigned int maximum;
540 	CK_HS_WORD *bounds = NULL;
541 
542 	if (map->n_entries == 0) {
543 		ck_pr_store_uint(&map->probe_maximum, 0);
544 		if (map->probe_bound != NULL)
545 			memset(map->probe_bound, 0, sizeof(CK_HS_WORD) * map->capacity);
546 
547 		return true;
548 	}
549 
550 	if (cycles == 0) {
551 		maximum = 0;
552 
553 		if (map->probe_bound != NULL) {
554 			size = sizeof(CK_HS_WORD) * map->capacity;
555 			bounds = hs->m->malloc(size);
556 			if (bounds == NULL)
557 				return false;
558 
559 			memset(bounds, 0, size);
560 		}
561 	} else {
562 		maximum = map->probe_maximum;
563 	}
564 
565 	for (i = 0; i < map->capacity; i++) {
566 		const void **first, *object, **slot, *entry;
567 		unsigned long n_probes, offset, h;
568 
569 		entry = map->entries[(i + seed) & map->mask];
570 		if (entry == CK_HS_EMPTY || entry == CK_HS_TOMBSTONE)
571 			continue;
572 
573 #ifdef CK_HS_PP
574 		if (hs->mode & CK_HS_MODE_OBJECT)
575 			entry = CK_HS_VMA(entry);
576 #endif
577 
578 		h = hs->hf(entry, hs->seed);
579 		offset = h & map->mask;
580 
581 		slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, entry, &object,
582 		    ck_hs_map_bound_get(map, h), CK_HS_PROBE);
583 
584 		if (first != NULL) {
585 			const void *insert = ck_hs_marshal(hs->mode, entry, h);
586 
587 			ck_pr_store_ptr(first, insert);
588 			ck_hs_map_signal(map, h);
589 			ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
590 		}
591 
592 		if (cycles == 0) {
593 			if (n_probes > maximum)
594 				maximum = n_probes;
595 
596 			if (n_probes > CK_HS_WORD_MAX)
597 				n_probes = CK_HS_WORD_MAX;
598 
599 			if (bounds != NULL && n_probes > bounds[offset])
600 				bounds[offset] = n_probes;
601 		} else if (--cycles == 0)
602 			break;
603 	}
604 
605 	/*
606 	 * The following only apply to garbage collection involving
607 	 * a full scan of all entries.
608 	 */
609 	if (maximum != map->probe_maximum)
610 		ck_pr_store_uint(&map->probe_maximum, maximum);
611 
612 	if (bounds != NULL) {
613 		for (i = 0; i < map->capacity; i++)
614 			CK_HS_STORE(&map->probe_bound[i], bounds[i]);
615 
616 		hs->m->free(bounds, size, false);
617 	}
618 
619 	return true;
620 }
621 
622 bool
623 ck_hs_fas(struct ck_hs *hs,
624     unsigned long h,
625     const void *key,
626     void **previous)
627 {
628 	const void **slot, **first, *object, *insert;
629 	struct ck_hs_map *map = hs->map;
630 	unsigned long n_probes;
631 
632 	*previous = NULL;
633 	slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object,
634 	    ck_hs_map_bound_get(map, h), CK_HS_PROBE);
635 
636 	/* Replacement semantics presume existence. */
637 	if (object == NULL)
638 		return false;
639 
640 	insert = ck_hs_marshal(hs->mode, key, h);
641 
642 	if (first != NULL) {
643 		ck_pr_store_ptr(first, insert);
644 		ck_hs_map_signal(map, h);
645 		ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
646 	} else {
647 		ck_pr_store_ptr(slot, insert);
648 	}
649 
650 	*previous = CK_CC_DECONST_PTR(object);
651 	return true;
652 }
653 
654 /*
655  * An apply function takes two arguments. The first argument is a pointer to a
656  * pre-existing object. The second argument is a pointer to the fifth argument
657  * passed to ck_hs_apply. If a non-NULL pointer is passed to the first argument
658  * and the return value of the apply function is NULL, then the pre-existing
659  * value is deleted. If the return pointer is the same as the one passed to the
660  * apply function then no changes are made to the hash table.  If the first
661  * argument is non-NULL and the return pointer is different than that passed to
662  * the apply function, then the pre-existing value is replaced. For
663  * replacement, it is required that the value itself is identical to the
664  * previous value.
665  */
666 bool
667 ck_hs_apply(struct ck_hs *hs,
668     unsigned long h,
669     const void *key,
670     ck_hs_apply_fn_t *fn,
671     void *cl)
672 {
673 	const void **slot, **first, *object, *delta, *insert;
674 	unsigned long n_probes;
675 	struct ck_hs_map *map;
676 
677 restart:
678 	map = hs->map;
679 
680 	slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, map->probe_limit, CK_HS_PROBE_INSERT);
681 	if (slot == NULL && first == NULL) {
682 		if (ck_hs_grow(hs, map->capacity << 1) == false)
683 			return false;
684 
685 		goto restart;
686 	}
687 
688 	delta = fn(CK_CC_DECONST_PTR(object), cl);
689 	if (delta == NULL) {
690 		/*
691 		 * The apply function has requested deletion. If the object doesn't exist,
692 		 * then exit early.
693 		 */
694 		if (CK_CC_UNLIKELY(object == NULL))
695 			return true;
696 
697 		/* Otherwise, mark slot as deleted. */
698 		ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
699 		map->n_entries--;
700 		map->tombstones++;
701 		return true;
702 	}
703 
704 	/* The apply function has not requested hash set modification so exit early. */
705 	if (delta == object)
706 		return true;
707 
708 	/* A modification or insertion has been requested. */
709 	ck_hs_map_bound_set(map, h, n_probes);
710 
711 	insert = ck_hs_marshal(hs->mode, delta, h);
712 	if (first != NULL) {
713 		/*
714 		 * This follows the same semantics as ck_hs_set, please refer to that
715 		 * function for documentation.
716 		 */
717 		ck_pr_store_ptr(first, insert);
718 
719 		if (object != NULL) {
720 			ck_hs_map_signal(map, h);
721 			ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
722 		}
723 	} else {
724 		/*
725 		 * If we are storing into same slot, then atomic store is sufficient
726 		 * for replacement.
727 		 */
728 		ck_pr_store_ptr(slot, insert);
729 	}
730 
731 	if (object == NULL)
732 		ck_hs_map_postinsert(hs, map);
733 
734 	return true;
735 }
736 
737 bool
738 ck_hs_set(struct ck_hs *hs,
739     unsigned long h,
740     const void *key,
741     void **previous)
742 {
743 	const void **slot, **first, *object, *insert;
744 	unsigned long n_probes;
745 	struct ck_hs_map *map;
746 
747 	*previous = NULL;
748 
749 restart:
750 	map = hs->map;
751 
752 	slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, map->probe_limit, CK_HS_PROBE_INSERT);
753 	if (slot == NULL && first == NULL) {
754 		if (ck_hs_grow(hs, map->capacity << 1) == false)
755 			return false;
756 
757 		goto restart;
758 	}
759 
760 	ck_hs_map_bound_set(map, h, n_probes);
761 	insert = ck_hs_marshal(hs->mode, key, h);
762 
763 	if (first != NULL) {
764 		/* If an earlier bucket was found, then store entry there. */
765 		ck_pr_store_ptr(first, insert);
766 
767 		/*
768 		 * If a duplicate key was found, then delete it after
769 		 * signaling concurrent probes to restart. Optionally,
770 		 * it is possible to install tombstone after grace
771 		 * period if we can guarantee earlier position of
772 		 * duplicate key.
773 		 */
774 		if (object != NULL) {
775 			ck_hs_map_signal(map, h);
776 			ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
777 		}
778 	} else {
779 		/*
780 		 * If we are storing into same slot, then atomic store is sufficient
781 		 * for replacement.
782 		 */
783 		ck_pr_store_ptr(slot, insert);
784 	}
785 
786 	if (object == NULL)
787 		ck_hs_map_postinsert(hs, map);
788 
789 	*previous = CK_CC_DECONST_PTR(object);
790 	return true;
791 }
792 
793 CK_CC_INLINE static bool
794 ck_hs_put_internal(struct ck_hs *hs,
795     unsigned long h,
796     const void *key,
797     enum ck_hs_probe_behavior behavior)
798 {
799 	const void **slot, **first, *object, *insert;
800 	unsigned long n_probes;
801 	struct ck_hs_map *map;
802 
803 restart:
804 	map = hs->map;
805 
806 	slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object,
807 	    map->probe_limit, behavior);
808 
809 	if (slot == NULL && first == NULL) {
810 		if (ck_hs_grow(hs, map->capacity << 1) == false)
811 			return false;
812 
813 		goto restart;
814 	}
815 
816 	/* Fail operation if a match was found. */
817 	if (object != NULL)
818 		return false;
819 
820 	ck_hs_map_bound_set(map, h, n_probes);
821 	insert = ck_hs_marshal(hs->mode, key, h);
822 
823 	if (first != NULL) {
824 		/* Insert key into first bucket in probe sequence. */
825 		ck_pr_store_ptr(first, insert);
826 	} else {
827 		/* An empty slot was found. */
828 		ck_pr_store_ptr(slot, insert);
829 	}
830 
831 	ck_hs_map_postinsert(hs, map);
832 	return true;
833 }
834 
835 bool
836 ck_hs_put(struct ck_hs *hs,
837     unsigned long h,
838     const void *key)
839 {
840 
841 	return ck_hs_put_internal(hs, h, key, CK_HS_PROBE_INSERT);
842 }
843 
844 bool
845 ck_hs_put_unique(struct ck_hs *hs,
846     unsigned long h,
847     const void *key)
848 {
849 
850 	return ck_hs_put_internal(hs, h, key, CK_HS_PROBE_TOMBSTONE);
851 }
852 
853 void *
854 ck_hs_get(struct ck_hs *hs,
855     unsigned long h,
856     const void *key)
857 {
858 	const void **first, *object;
859 	struct ck_hs_map *map;
860 	unsigned long n_probes;
861 	unsigned int g, g_p, probe;
862 	unsigned int *generation;
863 
864 	do {
865 		map = ck_pr_load_ptr(&hs->map);
866 		generation = &map->generation[h & CK_HS_G_MASK];
867 		g = ck_pr_load_uint(generation);
868 		probe  = ck_hs_map_bound_get(map, h);
869 		ck_pr_fence_load();
870 
871 		ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, probe, CK_HS_PROBE);
872 
873 		ck_pr_fence_load();
874 		g_p = ck_pr_load_uint(generation);
875 	} while (g != g_p);
876 
877 	return CK_CC_DECONST_PTR(object);
878 }
879 
880 void *
881 ck_hs_remove(struct ck_hs *hs,
882     unsigned long h,
883     const void *key)
884 {
885 	const void **slot, **first, *object;
886 	struct ck_hs_map *map = hs->map;
887 	unsigned long n_probes;
888 
889 	slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object,
890 	    ck_hs_map_bound_get(map, h), CK_HS_PROBE);
891 	if (object == NULL)
892 		return NULL;
893 
894 	ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
895 	map->n_entries--;
896 	map->tombstones++;
897 	return CK_CC_DECONST_PTR(object);
898 }
899 
900 bool
901 ck_hs_move(struct ck_hs *hs,
902     struct ck_hs *source,
903     ck_hs_hash_cb_t *hf,
904     ck_hs_compare_cb_t *compare,
905     struct ck_malloc *m)
906 {
907 
908 	if (m == NULL || m->malloc == NULL || m->free == NULL || hf == NULL)
909 		return false;
910 
911 	hs->mode = source->mode;
912 	hs->seed = source->seed;
913 	hs->map = source->map;
914 	hs->m = m;
915 	hs->hf = hf;
916 	hs->compare = compare;
917 	return true;
918 }
919 
920 bool
921 ck_hs_init(struct ck_hs *hs,
922     unsigned int mode,
923     ck_hs_hash_cb_t *hf,
924     ck_hs_compare_cb_t *compare,
925     struct ck_malloc *m,
926     unsigned long n_entries,
927     unsigned long seed)
928 {
929 
930 	if (m == NULL || m->malloc == NULL || m->free == NULL || hf == NULL)
931 		return false;
932 
933 	hs->m = m;
934 	hs->mode = mode;
935 	hs->seed = seed;
936 	hs->hf = hf;
937 	hs->compare = compare;
938 
939 	hs->map = ck_hs_map_create(hs, n_entries);
940 	return hs->map != NULL;
941 }
942