xref: /freebsd/sys/contrib/ck/src/ck_rhs.c (revision 271ce40239ec643af1264c36b73ea1563f4263ba)
1 /*
2  * Copyright 2014-2015 Olivier Houchard.
3  * Copyright 2012-2015 Samy Al Bahra.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <ck_cc.h>
29 #include <ck_rhs.h>
30 #include <ck_limits.h>
31 #include <ck_md.h>
32 #include <ck_pr.h>
33 #include <ck_stdint.h>
34 #include <ck_stdbool.h>
35 #include <ck_string.h>
36 
37 #include "ck_internal.h"
38 
39 #ifndef CK_RHS_PROBE_L1_SHIFT
40 #define CK_RHS_PROBE_L1_SHIFT 3ULL
41 #endif /* CK_RHS_PROBE_L1_SHIFT */
42 
43 #define CK_RHS_PROBE_L1 (1 << CK_RHS_PROBE_L1_SHIFT)
44 #define CK_RHS_PROBE_L1_MASK (CK_RHS_PROBE_L1 - 1)
45 
46 #ifndef CK_RHS_PROBE_L1_DEFAULT
47 #define CK_RHS_PROBE_L1_DEFAULT CK_MD_CACHELINE
48 #endif
49 
50 #define CK_RHS_VMA_MASK ((uintptr_t)((1ULL << CK_MD_VMA_BITS) - 1))
51 #define CK_RHS_VMA(x)	\
52 	((void *)((uintptr_t)(x) & CK_RHS_VMA_MASK))
53 
54 #define CK_RHS_EMPTY     NULL
55 #define CK_RHS_G		(1024)
56 #define CK_RHS_G_MASK	(CK_RHS_G - 1)
57 
58 #if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_STORE_8)
59 #define CK_RHS_WORD          uint8_t
60 #define CK_RHS_WORD_MAX	    UINT8_MAX
61 #define CK_RHS_STORE(x, y)   ck_pr_store_8(x, y)
62 #define CK_RHS_LOAD(x)       ck_pr_load_8(x)
63 #elif defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_STORE_16)
64 #define CK_RHS_WORD          uint16_t
65 #define CK_RHS_WORD_MAX	    UINT16_MAX
66 #define CK_RHS_STORE(x, y)   ck_pr_store_16(x, y)
67 #define CK_RHS_LOAD(x)       ck_pr_load_16(x)
68 #elif defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_STORE_32)
69 #define CK_RHS_WORD          uint32_t
70 #define CK_RHS_WORD_MAX	    UINT32_MAX
71 #define CK_RHS_STORE(x, y)   ck_pr_store_32(x, y)
72 #define CK_RHS_LOAD(x)       ck_pr_load_32(x)
73 #else
74 #error "ck_rhs is not supported on your platform."
75 #endif
76 
77 #define CK_RHS_MAX_WANTED	0xffff
78 
79 enum ck_rhs_probe_behavior {
80 	CK_RHS_PROBE = 0,	/* Default behavior. */
81 	CK_RHS_PROBE_RH,	/* Short-circuit if RH slot found. */
82 	CK_RHS_PROBE_INSERT,	/* Short-circuit on probe bound if tombstone found. */
83 
84 	CK_RHS_PROBE_ROBIN_HOOD,/* Look for the first slot available for the entry we are about to replace, only used to internally implement Robin Hood */
85 	CK_RHS_PROBE_NO_RH,	/* Don't do the RH dance */
86 };
87 struct ck_rhs_entry_desc {
88 	unsigned int probes;
89 	unsigned short wanted;
90 	CK_RHS_WORD probe_bound;
91 	bool in_rh;
92 	const void *entry;
93 } CK_CC_ALIGN(16);
94 
95 struct ck_rhs_no_entry_desc {
96 	unsigned int probes;
97 	unsigned short wanted;
98 	CK_RHS_WORD probe_bound;
99 	bool in_rh;
100 } CK_CC_ALIGN(8);
101 
102 typedef long ck_rhs_probe_cb_t(struct ck_rhs *hs,
103     struct ck_rhs_map *map,
104     unsigned long *n_probes,
105     long *priority,
106     unsigned long h,
107     const void *key,
108     const void **object,
109     unsigned long probe_limit,
110     enum ck_rhs_probe_behavior behavior);
111 
112 struct ck_rhs_map {
113 	unsigned int generation[CK_RHS_G];
114 	unsigned int probe_maximum;
115 	unsigned long mask;
116 	unsigned long step;
117 	unsigned int probe_limit;
118 	unsigned long n_entries;
119 	unsigned long capacity;
120 	unsigned long size;
121 	unsigned long max_entries;
122 	char offset_mask;
123 	union {
124 		struct ck_rhs_entry_desc *descs;
125 		struct ck_rhs_no_entry {
126 			const void **entries;
127 			struct ck_rhs_no_entry_desc *descs;
128 		} no_entries;
129 	} entries;
130 	bool read_mostly;
131 	ck_rhs_probe_cb_t *probe_func;
132 };
133 
134 static CK_CC_INLINE const void *
ck_rhs_entry(struct ck_rhs_map * map,long offset)135 ck_rhs_entry(struct ck_rhs_map *map, long offset)
136 {
137 
138 	if (map->read_mostly)
139 		return (map->entries.no_entries.entries[offset]);
140 	else
141 		return (map->entries.descs[offset].entry);
142 }
143 
144 static CK_CC_INLINE const void **
ck_rhs_entry_addr(struct ck_rhs_map * map,long offset)145 ck_rhs_entry_addr(struct ck_rhs_map *map, long offset)
146 {
147 
148 	if (map->read_mostly)
149 		return (&map->entries.no_entries.entries[offset]);
150 	else
151 		return (&map->entries.descs[offset].entry);
152 }
153 
154 static CK_CC_INLINE struct ck_rhs_entry_desc *
ck_rhs_desc(struct ck_rhs_map * map,long offset)155 ck_rhs_desc(struct ck_rhs_map *map, long offset)
156 {
157 
158 	if (CK_CC_UNLIKELY(map->read_mostly))
159 		return ((struct ck_rhs_entry_desc *)(void *)&map->entries.no_entries.descs[offset]);
160 	else
161 		return (&map->entries.descs[offset]);
162 }
163 
164 static CK_CC_INLINE void
ck_rhs_wanted_inc(struct ck_rhs_map * map,long offset)165 ck_rhs_wanted_inc(struct ck_rhs_map *map, long offset)
166 {
167 
168 	if (CK_CC_UNLIKELY(map->read_mostly))
169 		map->entries.no_entries.descs[offset].wanted++;
170 	else
171 		map->entries.descs[offset].wanted++;
172 }
173 
174 static CK_CC_INLINE unsigned int
ck_rhs_probes(struct ck_rhs_map * map,long offset)175 ck_rhs_probes(struct ck_rhs_map *map, long offset)
176 {
177 
178 	if (CK_CC_UNLIKELY(map->read_mostly))
179 		return (map->entries.no_entries.descs[offset].probes);
180 	else
181 		return (map->entries.descs[offset].probes);
182 }
183 
184 static CK_CC_INLINE void
ck_rhs_set_probes(struct ck_rhs_map * map,long offset,unsigned int value)185 ck_rhs_set_probes(struct ck_rhs_map *map, long offset, unsigned int value)
186 {
187 
188 	if (CK_CC_UNLIKELY(map->read_mostly))
189 		map->entries.no_entries.descs[offset].probes = value;
190 	else
191 		map->entries.descs[offset].probes = value;
192 }
193 
194 static CK_CC_INLINE CK_RHS_WORD
ck_rhs_probe_bound(struct ck_rhs_map * map,long offset)195 ck_rhs_probe_bound(struct ck_rhs_map *map, long offset)
196 {
197 
198 	if (CK_CC_UNLIKELY(map->read_mostly))
199 		return (map->entries.no_entries.descs[offset].probe_bound);
200 	else
201 		return (map->entries.descs[offset].probe_bound);
202 }
203 
204 static CK_CC_INLINE CK_RHS_WORD *
ck_rhs_probe_bound_addr(struct ck_rhs_map * map,long offset)205 ck_rhs_probe_bound_addr(struct ck_rhs_map *map, long offset)
206 {
207 
208 	if (CK_CC_UNLIKELY(map->read_mostly))
209 		return (&map->entries.no_entries.descs[offset].probe_bound);
210 	else
211 		return (&map->entries.descs[offset].probe_bound);
212 }
213 
214 
215 static CK_CC_INLINE bool
ck_rhs_in_rh(struct ck_rhs_map * map,long offset)216 ck_rhs_in_rh(struct ck_rhs_map *map, long offset)
217 {
218 
219 	if (CK_CC_UNLIKELY(map->read_mostly))
220 		return (map->entries.no_entries.descs[offset].in_rh);
221 	else
222 		return (map->entries.descs[offset].in_rh);
223 }
224 
225 static CK_CC_INLINE void
ck_rhs_set_rh(struct ck_rhs_map * map,long offset)226 ck_rhs_set_rh(struct ck_rhs_map *map, long offset)
227 {
228 
229 	if (CK_CC_UNLIKELY(map->read_mostly))
230 		map->entries.no_entries.descs[offset].in_rh = true;
231 	else
232 		map->entries.descs[offset].in_rh = true;
233 }
234 
235 static CK_CC_INLINE void
ck_rhs_unset_rh(struct ck_rhs_map * map,long offset)236 ck_rhs_unset_rh(struct ck_rhs_map *map, long offset)
237 {
238 
239 	if (CK_CC_UNLIKELY(map->read_mostly))
240 		map->entries.no_entries.descs[offset].in_rh = false;
241 	else
242 		map->entries.descs[offset].in_rh = false;
243 }
244 
245 
246 #define CK_RHS_DEFAULT_LOAD_FACTOR	50
247 
248 static ck_rhs_probe_cb_t ck_rhs_map_probe;
249 static ck_rhs_probe_cb_t ck_rhs_map_probe_rm;
250 
251 bool
ck_rhs_set_load_factor(struct ck_rhs * hs,unsigned int load_factor)252 ck_rhs_set_load_factor(struct ck_rhs *hs, unsigned int load_factor)
253 {
254 	struct ck_rhs_map *map = hs->map;
255 
256 	if (load_factor == 0 || load_factor > 100)
257 		return false;
258 
259 	hs->load_factor = load_factor;
260 	map->max_entries = (map->capacity * (unsigned long)hs->load_factor) / 100;
261 	while (map->n_entries > map->max_entries) {
262 		if (ck_rhs_grow(hs, map->capacity << 1) == false)
263 			return false;
264 		map = hs->map;
265 	}
266 	return true;
267 }
268 
269 void
ck_rhs_iterator_init(struct ck_rhs_iterator * iterator)270 ck_rhs_iterator_init(struct ck_rhs_iterator *iterator)
271 {
272 
273 	iterator->cursor = NULL;
274 	iterator->offset = 0;
275 	return;
276 }
277 
278 bool
ck_rhs_next(struct ck_rhs * hs,struct ck_rhs_iterator * i,void ** key)279 ck_rhs_next(struct ck_rhs *hs, struct ck_rhs_iterator *i, void **key)
280 {
281 	struct ck_rhs_map *map = hs->map;
282 	void *value;
283 
284 	if (i->offset >= map->capacity)
285 		return false;
286 
287 	do {
288 		value = CK_CC_DECONST_PTR(ck_rhs_entry(map, i->offset));
289 		if (value != CK_RHS_EMPTY) {
290 #ifdef CK_RHS_PP
291 			if (hs->mode & CK_RHS_MODE_OBJECT)
292 				value = CK_RHS_VMA(value);
293 #endif
294 			i->offset++;
295 			*key = value;
296 			return true;
297 		}
298 	} while (++i->offset < map->capacity);
299 
300 	return false;
301 }
302 
303 void
ck_rhs_stat(struct ck_rhs * hs,struct ck_rhs_stat * st)304 ck_rhs_stat(struct ck_rhs *hs, struct ck_rhs_stat *st)
305 {
306 	struct ck_rhs_map *map = hs->map;
307 
308 	st->n_entries = map->n_entries;
309 	st->probe_maximum = map->probe_maximum;
310 	return;
311 }
312 
313 unsigned long
ck_rhs_count(struct ck_rhs * hs)314 ck_rhs_count(struct ck_rhs *hs)
315 {
316 
317 	return hs->map->n_entries;
318 }
319 
320 static void
ck_rhs_map_destroy(struct ck_malloc * m,struct ck_rhs_map * map,bool defer)321 ck_rhs_map_destroy(struct ck_malloc *m, struct ck_rhs_map *map, bool defer)
322 {
323 
324 	m->free(map, map->size, defer);
325 	return;
326 }
327 
328 void
ck_rhs_destroy(struct ck_rhs * hs)329 ck_rhs_destroy(struct ck_rhs *hs)
330 {
331 
332 	ck_rhs_map_destroy(hs->m, hs->map, false);
333 	return;
334 }
335 
336 static struct ck_rhs_map *
ck_rhs_map_create(struct ck_rhs * hs,unsigned long entries)337 ck_rhs_map_create(struct ck_rhs *hs, unsigned long entries)
338 {
339 	struct ck_rhs_map *map;
340 	unsigned long size, n_entries, limit;
341 
342 	n_entries = ck_internal_power_2(entries);
343 	if (n_entries < CK_RHS_PROBE_L1)
344 		n_entries = CK_RHS_PROBE_L1;
345 
346 	if (hs->mode & CK_RHS_MODE_READ_MOSTLY)
347 		size = sizeof(struct ck_rhs_map) +
348 		    (sizeof(void *) * n_entries +
349 		     sizeof(struct ck_rhs_no_entry_desc) * n_entries +
350 		     2 * CK_MD_CACHELINE - 1);
351 	else
352 		size = sizeof(struct ck_rhs_map) +
353 		    (sizeof(struct ck_rhs_entry_desc) * n_entries +
354 		     CK_MD_CACHELINE - 1);
355 	map = hs->m->malloc(size);
356 	if (map == NULL)
357 		return NULL;
358 	map->read_mostly = !!(hs->mode & CK_RHS_MODE_READ_MOSTLY);
359 
360 	map->size = size;
361 	/* We should probably use a more intelligent heuristic for default probe length. */
362 	limit = ck_internal_max(n_entries >> (CK_RHS_PROBE_L1_SHIFT + 2), CK_RHS_PROBE_L1_DEFAULT);
363 	if (limit > UINT_MAX)
364 		limit = UINT_MAX;
365 
366 	map->probe_limit = (unsigned int)limit;
367 	map->probe_maximum = 0;
368 	map->capacity = n_entries;
369 	map->step = ck_cc_ffsl(n_entries);
370 	map->mask = n_entries - 1;
371 	map->n_entries = 0;
372 
373 	map->max_entries = (map->capacity * (unsigned long)hs->load_factor) / 100;
374 	/* Align map allocation to cache line. */
375 	if (map->read_mostly) {
376 		map->entries.no_entries.entries = (void *)(((uintptr_t)&map[1] +
377 		    CK_MD_CACHELINE - 1) & ~(CK_MD_CACHELINE - 1));
378 		map->entries.no_entries.descs = (void *)(((uintptr_t)map->entries.no_entries.entries + (sizeof(void *) * n_entries) + CK_MD_CACHELINE - 1) &~ (CK_MD_CACHELINE - 1));
379 		memset(map->entries.no_entries.entries, 0,
380 		    sizeof(void *) * n_entries);
381 		memset(map->entries.no_entries.descs, 0,
382 		    sizeof(struct ck_rhs_no_entry_desc) * n_entries);
383 		map->offset_mask = (CK_MD_CACHELINE / sizeof(void *)) - 1;
384 		map->probe_func = ck_rhs_map_probe_rm;
385 
386 	} else {
387 		map->entries.descs = (void *)(((uintptr_t)&map[1] +
388 		    CK_MD_CACHELINE - 1) & ~(CK_MD_CACHELINE - 1));
389 		memset(map->entries.descs, 0, sizeof(struct ck_rhs_entry_desc) * n_entries);
390 		map->offset_mask = (CK_MD_CACHELINE / sizeof(struct ck_rhs_entry_desc)) - 1;
391 		map->probe_func = ck_rhs_map_probe;
392 	}
393 	memset(map->generation, 0, sizeof map->generation);
394 
395 	/* Commit entries purge with respect to map publication. */
396 	ck_pr_fence_store();
397 	return map;
398 }
399 
400 bool
ck_rhs_reset_size(struct ck_rhs * hs,unsigned long capacity)401 ck_rhs_reset_size(struct ck_rhs *hs, unsigned long capacity)
402 {
403 	struct ck_rhs_map *map, *previous;
404 
405 	previous = hs->map;
406 	map = ck_rhs_map_create(hs, capacity);
407 	if (map == NULL)
408 		return false;
409 
410 	ck_pr_store_ptr(&hs->map, map);
411 	ck_rhs_map_destroy(hs->m, previous, true);
412 	return true;
413 }
414 
415 bool
ck_rhs_reset(struct ck_rhs * hs)416 ck_rhs_reset(struct ck_rhs *hs)
417 {
418 	struct ck_rhs_map *previous;
419 
420 	previous = hs->map;
421 	return ck_rhs_reset_size(hs, previous->capacity);
422 }
423 
424 static inline unsigned long
ck_rhs_map_probe_next(struct ck_rhs_map * map,unsigned long offset,unsigned long probes)425 ck_rhs_map_probe_next(struct ck_rhs_map *map,
426     unsigned long offset,
427     unsigned long probes)
428 {
429 
430 	if (probes & map->offset_mask) {
431 		offset = (offset &~ map->offset_mask) +
432 		    ((offset + 1) & map->offset_mask);
433 		return offset;
434 	} else
435 		return (offset + probes) & map->mask;
436 }
437 
438 static inline unsigned long
ck_rhs_map_probe_prev(struct ck_rhs_map * map,unsigned long offset,unsigned long probes)439 ck_rhs_map_probe_prev(struct ck_rhs_map *map, unsigned long offset,
440     unsigned long probes)
441 {
442 
443 	if (probes & map->offset_mask) {
444 		offset = (offset &~ map->offset_mask) + ((offset - 1) &
445 		    map->offset_mask);
446 		return offset;
447 	} else
448 		return ((offset - probes) & map->mask);
449 }
450 
451 
452 static inline void
ck_rhs_map_bound_set(struct ck_rhs_map * m,unsigned long h,unsigned long n_probes)453 ck_rhs_map_bound_set(struct ck_rhs_map *m,
454     unsigned long h,
455     unsigned long n_probes)
456 {
457 	unsigned long offset = h & m->mask;
458 	struct ck_rhs_entry_desc *desc;
459 
460 	if (n_probes > m->probe_maximum)
461 		ck_pr_store_uint(&m->probe_maximum, n_probes);
462 	if (!(m->read_mostly)) {
463 		desc = &m->entries.descs[offset];
464 
465 		if (desc->probe_bound < n_probes) {
466 			if (n_probes > CK_RHS_WORD_MAX)
467 				n_probes = CK_RHS_WORD_MAX;
468 
469 			CK_RHS_STORE(&desc->probe_bound, n_probes);
470 			ck_pr_fence_store();
471 		}
472 	}
473 
474 	return;
475 }
476 
477 static inline unsigned int
ck_rhs_map_bound_get(struct ck_rhs_map * m,unsigned long h)478 ck_rhs_map_bound_get(struct ck_rhs_map *m, unsigned long h)
479 {
480 	unsigned long offset = h & m->mask;
481 	unsigned int r = CK_RHS_WORD_MAX;
482 
483 	if (m->read_mostly)
484 		r = ck_pr_load_uint(&m->probe_maximum);
485 	else {
486 		r = CK_RHS_LOAD(&m->entries.descs[offset].probe_bound);
487 		if (r == CK_RHS_WORD_MAX)
488 			r = ck_pr_load_uint(&m->probe_maximum);
489 	}
490 	return r;
491 }
492 
493 bool
ck_rhs_grow(struct ck_rhs * hs,unsigned long capacity)494 ck_rhs_grow(struct ck_rhs *hs,
495     unsigned long capacity)
496 {
497 	struct ck_rhs_map *map, *update;
498 	const void *previous, *prev_saved;
499 	unsigned long k, offset, probes;
500 
501 restart:
502 	map = hs->map;
503 	if (map->capacity > capacity)
504 		return false;
505 
506 	update = ck_rhs_map_create(hs, capacity);
507 	if (update == NULL)
508 		return false;
509 
510 	for (k = 0; k < map->capacity; k++) {
511 		unsigned long h;
512 
513 		prev_saved = previous = ck_rhs_entry(map, k);
514 		if (previous == CK_RHS_EMPTY)
515 			continue;
516 
517 #ifdef CK_RHS_PP
518 		if (hs->mode & CK_RHS_MODE_OBJECT)
519 			previous = CK_RHS_VMA(previous);
520 #endif
521 
522 		h = hs->hf(previous, hs->seed);
523 		offset = h & update->mask;
524 		probes = 0;
525 
526 		for (;;) {
527 			const void **cursor = ck_rhs_entry_addr(update, offset);
528 
529 			if (probes++ == update->probe_limit) {
530 				/*
531 				 * We have hit the probe limit, map needs to be even larger.
532 				 */
533 				ck_rhs_map_destroy(hs->m, update, false);
534 				capacity <<= 1;
535 				goto restart;
536 			}
537 
538 			if (CK_CC_LIKELY(*cursor == CK_RHS_EMPTY)) {
539 				*cursor = prev_saved;
540 				update->n_entries++;
541 				ck_rhs_set_probes(update, offset, probes);
542 				ck_rhs_map_bound_set(update, h, probes);
543 				break;
544 			} else if (ck_rhs_probes(update, offset) < probes) {
545 				const void *tmp = prev_saved;
546 				unsigned int old_probes;
547 				prev_saved = previous = *cursor;
548 #ifdef CK_RHS_PP
549 				if (hs->mode & CK_RHS_MODE_OBJECT)
550 					previous = CK_RHS_VMA(previous);
551 #endif
552 				*cursor = tmp;
553 				ck_rhs_map_bound_set(update, h, probes);
554 				h = hs->hf(previous, hs->seed);
555 				old_probes = ck_rhs_probes(update, offset);
556 				ck_rhs_set_probes(update, offset, probes);
557 				probes = old_probes - 1;
558 				continue;
559 			}
560 			ck_rhs_wanted_inc(update, offset);
561 			offset = ck_rhs_map_probe_next(update, offset,  probes);
562 		}
563 
564 	}
565 
566 	ck_pr_fence_store();
567 	ck_pr_store_ptr(&hs->map, update);
568 	ck_rhs_map_destroy(hs->m, map, true);
569 	return true;
570 }
571 
572 bool
ck_rhs_rebuild(struct ck_rhs * hs)573 ck_rhs_rebuild(struct ck_rhs *hs)
574 {
575 
576 	return ck_rhs_grow(hs, hs->map->capacity);
577 }
578 
579 static long
ck_rhs_map_probe_rm(struct ck_rhs * hs,struct ck_rhs_map * map,unsigned long * n_probes,long * priority,unsigned long h,const void * key,const void ** object,unsigned long probe_limit,enum ck_rhs_probe_behavior behavior)580 ck_rhs_map_probe_rm(struct ck_rhs *hs,
581     struct ck_rhs_map *map,
582     unsigned long *n_probes,
583     long *priority,
584     unsigned long h,
585     const void *key,
586     const void **object,
587     unsigned long probe_limit,
588     enum ck_rhs_probe_behavior behavior)
589 {
590 	const void *k;
591 	const void *compare;
592 	long pr = -1;
593 	unsigned long offset, probes, opl;
594 
595 #ifdef CK_RHS_PP
596 	/* If we are storing object pointers, then we may leverage pointer packing. */
597 	unsigned long hv = 0;
598 
599 	if (hs->mode & CK_RHS_MODE_OBJECT) {
600 		hv = (h >> 25) & CK_RHS_KEY_MASK;
601 		compare = CK_RHS_VMA(key);
602 	} else {
603 		compare = key;
604 	}
605 #else
606 	compare = key;
607 #endif
608  	*object = NULL;
609 	if (behavior != CK_RHS_PROBE_ROBIN_HOOD) {
610 		probes = 0;
611 		offset = h & map->mask;
612 	} else {
613 		/* Restart from the bucket we were previously in */
614 		probes = *n_probes;
615 		offset = ck_rhs_map_probe_next(map, *priority,
616 		    probes);
617 	}
618 	opl = probe_limit;
619 
620 	for (;;) {
621 		if (probes++ == probe_limit) {
622 			if (probe_limit == opl || pr != -1) {
623 				k = CK_RHS_EMPTY;
624 				goto leave;
625 			}
626 			/*
627 			 * If no eligible slot has been found yet, continue probe
628 			 * sequence with original probe limit.
629 			 */
630 			probe_limit = opl;
631 		}
632 		k = ck_pr_load_ptr(&map->entries.no_entries.entries[offset]);
633 		if (k == CK_RHS_EMPTY)
634 			goto leave;
635 
636 		if (behavior != CK_RHS_PROBE_NO_RH) {
637 			struct ck_rhs_entry_desc *desc = (void *)&map->entries.no_entries.descs[offset];
638 
639 			if (pr == -1 &&
640 			    desc->in_rh == false && desc->probes < probes) {
641 				pr = offset;
642 				*n_probes = probes;
643 
644 				if (behavior == CK_RHS_PROBE_RH ||
645 				    behavior == CK_RHS_PROBE_ROBIN_HOOD) {
646 					k = CK_RHS_EMPTY;
647 					goto leave;
648 				}
649 			}
650 		}
651 
652 		if (behavior != CK_RHS_PROBE_ROBIN_HOOD) {
653 #ifdef CK_RHS_PP
654 			if (hs->mode & CK_RHS_MODE_OBJECT) {
655 				if (((uintptr_t)k >> CK_MD_VMA_BITS) != hv) {
656 					offset = ck_rhs_map_probe_next(map, offset, probes);
657 					continue;
658 				}
659 
660 				k = CK_RHS_VMA(k);
661 			}
662 #endif
663 
664 			if (k == compare)
665 				goto leave;
666 
667 			if (hs->compare == NULL) {
668 				offset = ck_rhs_map_probe_next(map, offset, probes);
669 				continue;
670 			}
671 
672 			if (hs->compare(k, key) == true)
673 				goto leave;
674 		}
675 		offset = ck_rhs_map_probe_next(map, offset, probes);
676 	}
677 leave:
678 	if (probes > probe_limit) {
679 		offset = -1;
680 	} else {
681 		*object = k;
682 	}
683 
684 	if (pr == -1)
685 		*n_probes = probes;
686 
687 	*priority = pr;
688 	return offset;
689 }
690 
691 static long
ck_rhs_map_probe(struct ck_rhs * hs,struct ck_rhs_map * map,unsigned long * n_probes,long * priority,unsigned long h,const void * key,const void ** object,unsigned long probe_limit,enum ck_rhs_probe_behavior behavior)692 ck_rhs_map_probe(struct ck_rhs *hs,
693     struct ck_rhs_map *map,
694     unsigned long *n_probes,
695     long *priority,
696     unsigned long h,
697     const void *key,
698     const void **object,
699     unsigned long probe_limit,
700     enum ck_rhs_probe_behavior behavior)
701 {
702 	const void *k;
703 	const void *compare;
704 	long pr = -1;
705 	unsigned long offset, probes, opl;
706 
707 #ifdef CK_RHS_PP
708 	/* If we are storing object pointers, then we may leverage pointer packing. */
709 	unsigned long hv = 0;
710 
711 	if (hs->mode & CK_RHS_MODE_OBJECT) {
712 		hv = (h >> 25) & CK_RHS_KEY_MASK;
713 		compare = CK_RHS_VMA(key);
714 	} else {
715 		compare = key;
716 	}
717 #else
718 	compare = key;
719 #endif
720 
721  	*object = NULL;
722 	if (behavior != CK_RHS_PROBE_ROBIN_HOOD) {
723 		probes = 0;
724 		offset = h & map->mask;
725 	} else {
726 		/* Restart from the bucket we were previously in */
727 		probes = *n_probes;
728 		offset = ck_rhs_map_probe_next(map, *priority,
729 		    probes);
730 	}
731 	opl = probe_limit;
732 	if (behavior == CK_RHS_PROBE_INSERT)
733 		probe_limit = ck_rhs_map_bound_get(map, h);
734 
735 	for (;;) {
736 		if (probes++ == probe_limit) {
737 			if (probe_limit == opl || pr != -1) {
738 				k = CK_RHS_EMPTY;
739 				goto leave;
740 			}
741 			/*
742 			 * If no eligible slot has been found yet, continue probe
743 			 * sequence with original probe limit.
744 			 */
745 			probe_limit = opl;
746 		}
747 		k = ck_pr_load_ptr(&map->entries.descs[offset].entry);
748 		if (k == CK_RHS_EMPTY)
749 			goto leave;
750 		if ((behavior != CK_RHS_PROBE_NO_RH)) {
751 			struct ck_rhs_entry_desc *desc = &map->entries.descs[offset];
752 
753 			if (pr == -1 &&
754 			    desc->in_rh == false && desc->probes < probes) {
755 				pr = offset;
756 				*n_probes = probes;
757 
758 				if (behavior == CK_RHS_PROBE_RH ||
759 				    behavior == CK_RHS_PROBE_ROBIN_HOOD) {
760 					k = CK_RHS_EMPTY;
761 					goto leave;
762 				}
763 			}
764 		}
765 
766 		if (behavior != CK_RHS_PROBE_ROBIN_HOOD) {
767 #ifdef CK_RHS_PP
768 			if (hs->mode & CK_RHS_MODE_OBJECT) {
769 				if (((uintptr_t)k >> CK_MD_VMA_BITS) != hv) {
770 					offset = ck_rhs_map_probe_next(map, offset, probes);
771 					continue;
772 				}
773 
774 				k = CK_RHS_VMA(k);
775 			}
776 #endif
777 
778 			if (k == compare)
779 				goto leave;
780 
781 			if (hs->compare == NULL) {
782 				offset = ck_rhs_map_probe_next(map, offset, probes);
783 				continue;
784 			}
785 
786 			if (hs->compare(k, key) == true)
787 				goto leave;
788 		}
789 		offset = ck_rhs_map_probe_next(map, offset, probes);
790 	}
791 leave:
792 	if (probes > probe_limit) {
793 		offset = -1;
794 	} else {
795 		*object = k;
796 	}
797 
798 	if (pr == -1)
799 		*n_probes = probes;
800 
801 	*priority = pr;
802 	return offset;
803 }
804 
805 static inline const void *
ck_rhs_marshal(unsigned int mode,const void * key,unsigned long h)806 ck_rhs_marshal(unsigned int mode, const void *key, unsigned long h)
807 {
808 #ifdef CK_RHS_PP
809 	const void *insert;
810 
811 	if (mode & CK_RHS_MODE_OBJECT) {
812 		insert = (void *)((uintptr_t)CK_RHS_VMA(key) | ((h >> 25) << CK_MD_VMA_BITS));
813 	} else {
814 		insert = key;
815 	}
816 
817 	return insert;
818 #else
819 	(void)mode;
820 	(void)h;
821 
822 	return key;
823 #endif
824 }
825 
826 bool
ck_rhs_gc(struct ck_rhs * hs)827 ck_rhs_gc(struct ck_rhs *hs)
828 {
829 	unsigned long i;
830 	struct ck_rhs_map *map = hs->map;
831 
832 	unsigned int max_probes = 0;
833 	for (i = 0; i < map->capacity; i++) {
834 		if (ck_rhs_probes(map, i) > max_probes)
835 			max_probes = ck_rhs_probes(map, i);
836 	}
837 	map->probe_maximum = max_probes;
838 	return true;
839 }
840 
841 static void
ck_rhs_add_wanted(struct ck_rhs * hs,long end_offset,long old_slot,unsigned long h)842 ck_rhs_add_wanted(struct ck_rhs *hs, long end_offset, long old_slot,
843 	unsigned long h)
844 {
845 	struct ck_rhs_map *map = hs->map;
846 	long offset;
847 	unsigned int probes = 1;
848 	bool found_slot = false;
849 	struct ck_rhs_entry_desc *desc;
850 
851 	offset = h & map->mask;
852 
853 	if (old_slot == -1)
854 		found_slot = true;
855 	while (offset != end_offset) {
856 		if (offset == old_slot)
857 			found_slot = true;
858 		if (found_slot) {
859 			desc = ck_rhs_desc(map, offset);
860 			if (desc->wanted < CK_RHS_MAX_WANTED)
861 				desc->wanted++;
862 		}
863 		offset = ck_rhs_map_probe_next(map, offset, probes);
864 		probes++;
865 	}
866 }
867 
868 static unsigned long
ck_rhs_remove_wanted(struct ck_rhs * hs,long offset,long limit)869 ck_rhs_remove_wanted(struct ck_rhs *hs, long offset, long limit)
870 {
871 	struct ck_rhs_map *map = hs->map;
872 	int probes = ck_rhs_probes(map, offset);
873 	bool do_remove = true;
874 	struct ck_rhs_entry_desc *desc;
875 
876 	while (probes > 1) {
877 		probes--;
878 		offset = ck_rhs_map_probe_prev(map, offset, probes);
879 		if (offset == limit)
880 			do_remove = false;
881 		if (do_remove) {
882 			desc = ck_rhs_desc(map, offset);
883 			if (desc->wanted != CK_RHS_MAX_WANTED)
884 				desc->wanted--;
885 		}
886 	}
887 	return offset;
888 }
889 
890 static long
ck_rhs_get_first_offset(struct ck_rhs_map * map,unsigned long offset,unsigned int probes)891 ck_rhs_get_first_offset(struct ck_rhs_map *map, unsigned long offset, unsigned int probes)
892 {
893 	while (probes > (unsigned long)map->offset_mask + 1) {
894 		offset -= ((probes - 1) &~ map->offset_mask);
895 		offset &= map->mask;
896 		offset = (offset &~ map->offset_mask) +
897 		    ((offset - map->offset_mask) & map->offset_mask);
898 		probes -= map->offset_mask + 1;
899 	}
900 	return ((offset &~ map->offset_mask) + ((offset - (probes - 1)) & map->offset_mask));
901 }
902 
903 #define CK_RHS_MAX_RH	512
904 
905 static int
ck_rhs_put_robin_hood(struct ck_rhs * hs,long orig_slot,struct ck_rhs_entry_desc * desc)906 ck_rhs_put_robin_hood(struct ck_rhs *hs,
907     long orig_slot, struct ck_rhs_entry_desc *desc)
908 {
909 	long slot, first;
910 	const void *object, *insert;
911 	unsigned long n_probes;
912 	struct ck_rhs_map *map;
913 	unsigned long h = 0;
914 	long prev;
915 	void *key;
916 	long prevs[CK_RHS_MAX_RH];
917 	unsigned int prevs_nb = 0;
918 	unsigned int i;
919 
920 	map = hs->map;
921 	first = orig_slot;
922 	n_probes = desc->probes;
923 restart:
924 	key = CK_CC_DECONST_PTR(ck_rhs_entry(map, first));
925 	insert = key;
926 #ifdef CK_RHS_PP
927 	if (hs->mode & CK_RHS_MODE_OBJECT)
928 	    key = CK_RHS_VMA(key);
929 #endif
930 	orig_slot = first;
931 	ck_rhs_set_rh(map, orig_slot);
932 
933 	slot = map->probe_func(hs, map, &n_probes, &first, h, key, &object,
934 	    map->probe_limit, prevs_nb == CK_RHS_MAX_RH ?
935 	    CK_RHS_PROBE_NO_RH : CK_RHS_PROBE_ROBIN_HOOD);
936 
937 	if (slot == -1 && first == -1) {
938 		if (ck_rhs_grow(hs, map->capacity << 1) == false) {
939 			desc->in_rh = false;
940 
941 			for (i = 0; i < prevs_nb; i++)
942 				ck_rhs_unset_rh(map, prevs[i]);
943 
944 			return -1;
945 		}
946 
947 		return 1;
948 	}
949 
950 	if (first != -1) {
951 		desc = ck_rhs_desc(map, first);
952 		int old_probes = desc->probes;
953 
954 		desc->probes = n_probes;
955 		h = ck_rhs_get_first_offset(map, first, n_probes);
956 		ck_rhs_map_bound_set(map, h, n_probes);
957 		prev = orig_slot;
958 		prevs[prevs_nb++] = prev;
959 		n_probes = old_probes;
960 		goto restart;
961 	} else {
962 		/* An empty slot was found. */
963 		h =  ck_rhs_get_first_offset(map, slot, n_probes);
964 		ck_rhs_map_bound_set(map, h, n_probes);
965 		ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert);
966 		ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]);
967 		ck_pr_fence_atomic_store();
968 		ck_rhs_set_probes(map, slot, n_probes);
969 		desc->in_rh = 0;
970 		ck_rhs_add_wanted(hs, slot, orig_slot, h);
971 	}
972 	while (prevs_nb > 0) {
973 		prev = prevs[--prevs_nb];
974 		ck_pr_store_ptr(ck_rhs_entry_addr(map, orig_slot),
975 		    ck_rhs_entry(map, prev));
976 		h = ck_rhs_get_first_offset(map, orig_slot,
977 		    desc->probes);
978 		ck_rhs_add_wanted(hs, orig_slot, prev, h);
979 		ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]);
980 		ck_pr_fence_atomic_store();
981 		orig_slot = prev;
982 		desc->in_rh = false;
983 		desc = ck_rhs_desc(map, orig_slot);
984 	}
985 	return 0;
986 }
987 
988 static void
ck_rhs_do_backward_shift_delete(struct ck_rhs * hs,long slot)989 ck_rhs_do_backward_shift_delete(struct ck_rhs *hs, long slot)
990 {
991 	struct ck_rhs_map *map = hs->map;
992 	struct ck_rhs_entry_desc *desc, *new_desc = NULL;
993 	unsigned long h;
994 
995 	desc = ck_rhs_desc(map, slot);
996 	h = ck_rhs_remove_wanted(hs, slot, -1);
997 	while (desc->wanted > 0) {
998 		unsigned long offset = 0, tmp_offset;
999 		unsigned long wanted_probes = 1;
1000 		unsigned int probe = 0;
1001 		unsigned int max_probes;
1002 
1003 		/* Find a successor */
1004 		while (wanted_probes < map->probe_maximum) {
1005 			probe = wanted_probes;
1006 			offset = ck_rhs_map_probe_next(map, slot, probe);
1007 			while (probe < map->probe_maximum) {
1008 				new_desc = ck_rhs_desc(map, offset);
1009 				if (new_desc->probes == probe + 1)
1010 					break;
1011 				probe++;
1012 				offset = ck_rhs_map_probe_next(map, offset,
1013 				    probe);
1014 			}
1015 			if (probe < map->probe_maximum)
1016 				break;
1017 			wanted_probes++;
1018 		}
1019 		if (!(wanted_probes < map->probe_maximum)) {
1020 			desc->wanted = 0;
1021 			break;
1022 		}
1023 		desc->probes = wanted_probes;
1024 		h = ck_rhs_remove_wanted(hs, offset, slot);
1025 		ck_pr_store_ptr(ck_rhs_entry_addr(map, slot),
1026 		    ck_rhs_entry(map, offset));
1027 		ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]);
1028 		ck_pr_fence_atomic_store();
1029 		if (wanted_probes < CK_RHS_WORD_MAX) {
1030 			struct ck_rhs_entry_desc *hdesc = ck_rhs_desc(map, h);
1031 			if (hdesc->wanted == 1)
1032 				CK_RHS_STORE(&hdesc->probe_bound,
1033 				    wanted_probes);
1034 			else if (hdesc->probe_bound == CK_RHS_WORD_MAX ||
1035 			    hdesc->probe_bound == new_desc->probes) {
1036 				probe++;
1037 				if (hdesc->probe_bound == CK_RHS_WORD_MAX)
1038 					max_probes = map->probe_maximum;
1039 				else {
1040 					max_probes = hdesc->probe_bound;
1041 					max_probes--;
1042 				}
1043 				tmp_offset = ck_rhs_map_probe_next(map, offset,
1044 				    probe);
1045 				while (probe < max_probes) {
1046 					if (h == (unsigned long)ck_rhs_get_first_offset(map, tmp_offset, probe))
1047 						break;
1048 					probe++;
1049 					tmp_offset = ck_rhs_map_probe_next(map, tmp_offset, probe);
1050 				}
1051 				if (probe == max_probes)
1052 					CK_RHS_STORE(&hdesc->probe_bound,
1053 					    wanted_probes);
1054 			}
1055 		}
1056 		if (desc->wanted < CK_RHS_MAX_WANTED)
1057 			desc->wanted--;
1058 		slot = offset;
1059 		desc = new_desc;
1060 	}
1061 	ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), CK_RHS_EMPTY);
1062 	if ((desc->probes - 1) < CK_RHS_WORD_MAX)
1063 		CK_RHS_STORE(ck_rhs_probe_bound_addr(map, h),
1064 		    desc->probes - 1);
1065 	desc->probes = 0;
1066 }
1067 
1068 bool
ck_rhs_fas(struct ck_rhs * hs,unsigned long h,const void * key,void ** previous)1069 ck_rhs_fas(struct ck_rhs *hs,
1070     unsigned long h,
1071     const void *key,
1072     void **previous)
1073 {
1074 	long slot, first;
1075 	const void *object;
1076 	const void *insert;
1077 	unsigned long n_probes;
1078 	struct ck_rhs_map *map = hs->map;
1079 	struct ck_rhs_entry_desc *desc, *desc2;
1080 
1081 	*previous = NULL;
1082 restart:
1083 	slot = map->probe_func(hs, map, &n_probes, &first, h, key, &object,
1084 	    ck_rhs_map_bound_get(map, h), CK_RHS_PROBE);
1085 
1086 	/* Replacement semantics presume existence. */
1087 	if (object == NULL)
1088 		return false;
1089 
1090 	insert = ck_rhs_marshal(hs->mode, key, h);
1091 
1092 	if (first != -1) {
1093 		int ret;
1094 
1095 		desc = ck_rhs_desc(map, slot);
1096 		desc2 = ck_rhs_desc(map, first);
1097 		desc->in_rh = true;
1098 		ret = ck_rhs_put_robin_hood(hs, first, desc2);
1099 		desc->in_rh = false;
1100 		if (CK_CC_UNLIKELY(ret == 1))
1101 			goto restart;
1102 		else if (CK_CC_UNLIKELY(ret != 0))
1103 			return false;
1104 		ck_pr_store_ptr(ck_rhs_entry_addr(map, first), insert);
1105 		ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]);
1106 		ck_pr_fence_atomic_store();
1107 		desc2->probes = n_probes;
1108 		ck_rhs_add_wanted(hs, first, -1, h);
1109 		ck_rhs_do_backward_shift_delete(hs, slot);
1110 	} else {
1111 		ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert);
1112 		ck_rhs_set_probes(map, slot, n_probes);
1113 	}
1114 	*previous = CK_CC_DECONST_PTR(object);
1115 	return true;
1116 }
1117 
1118 /*
1119  * An apply function takes two arguments. The first argument is a pointer to a
1120  * pre-existing object. The second argument is a pointer to the fifth argument
1121  * passed to ck_hs_apply. If a non-NULL pointer is passed to the first argument
1122  * and the return value of the apply function is NULL, then the pre-existing
1123  * value is deleted. If the return pointer is the same as the one passed to the
1124  * apply function then no changes are made to the hash table.  If the first
1125  * argument is non-NULL and the return pointer is different than that passed to
1126  * the apply function, then the pre-existing value is replaced. For
1127  * replacement, it is required that the value itself is identical to the
1128  * previous value.
1129  */
1130 bool
ck_rhs_apply(struct ck_rhs * hs,unsigned long h,const void * key,ck_rhs_apply_fn_t * fn,void * cl)1131 ck_rhs_apply(struct ck_rhs *hs,
1132     unsigned long h,
1133     const void *key,
1134     ck_rhs_apply_fn_t *fn,
1135     void *cl)
1136 {
1137 	const void *insert;
1138 	const void  *object, *delta = false;
1139 	unsigned long n_probes;
1140 	long slot, first;
1141 	struct ck_rhs_map *map;
1142 	bool delta_set = false;
1143 
1144 restart:
1145 	map = hs->map;
1146 
1147 	slot = map->probe_func(hs, map, &n_probes, &first, h, key, &object, map->probe_limit, CK_RHS_PROBE_INSERT);
1148 	if (slot == -1 && first == -1) {
1149 		if (ck_rhs_grow(hs, map->capacity << 1) == false)
1150 			return false;
1151 
1152 		goto restart;
1153 	}
1154 	if (!delta_set) {
1155 		delta = fn(CK_CC_DECONST_PTR(object), cl);
1156 		delta_set = true;
1157 	}
1158 
1159 	if (delta == NULL) {
1160 		/*
1161 		 * The apply function has requested deletion. If the object doesn't exist,
1162 		 * then exit early.
1163 		 */
1164 		if (CK_CC_UNLIKELY(object == NULL))
1165 			return true;
1166 
1167 		/* Otherwise, delete it. */
1168 		ck_rhs_do_backward_shift_delete(hs, slot);
1169 		return true;
1170 	}
1171 
1172 	/* The apply function has not requested hash set modification so exit early. */
1173 	if (delta == object)
1174 		return true;
1175 
1176 	/* A modification or insertion has been requested. */
1177 	ck_rhs_map_bound_set(map, h, n_probes);
1178 
1179 	insert = ck_rhs_marshal(hs->mode, delta, h);
1180 	if (first != -1) {
1181 		/*
1182 		 * This follows the same semantics as ck_hs_set, please refer to that
1183 		 * function for documentation.
1184 		 */
1185 		struct ck_rhs_entry_desc *desc = NULL, *desc2;
1186 		if (slot != -1) {
1187 			desc = ck_rhs_desc(map, slot);
1188 			desc->in_rh = true;
1189 		}
1190 		desc2 = ck_rhs_desc(map, first);
1191 		int ret = ck_rhs_put_robin_hood(hs, first, desc2);
1192 		if (slot != -1)
1193 			desc->in_rh = false;
1194 
1195 		if (CK_CC_UNLIKELY(ret == 1))
1196 			goto restart;
1197 		if (CK_CC_UNLIKELY(ret == -1))
1198 			return false;
1199 		/* If an earlier bucket was found, then store entry there. */
1200 		ck_pr_store_ptr(ck_rhs_entry_addr(map, first), insert);
1201 		desc2->probes = n_probes;
1202 		/*
1203 		 * If a duplicate key was found, then delete it after
1204 		 * signaling concurrent probes to restart. Optionally,
1205 		 * it is possible to install tombstone after grace
1206 		 * period if we can guarantee earlier position of
1207 		 * duplicate key.
1208 		 */
1209 		ck_rhs_add_wanted(hs, first, -1, h);
1210 		if (object != NULL) {
1211 			ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]);
1212 			ck_pr_fence_atomic_store();
1213 			ck_rhs_do_backward_shift_delete(hs, slot);
1214 		}
1215 	} else {
1216 		/*
1217 		 * If we are storing into same slot, then atomic store is sufficient
1218 		 * for replacement.
1219 		 */
1220 		ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert);
1221 		ck_rhs_set_probes(map, slot, n_probes);
1222 		if (object == NULL)
1223 			ck_rhs_add_wanted(hs, slot, -1, h);
1224 	}
1225 
1226 	if (object == NULL) {
1227 		map->n_entries++;
1228 		if ((map->n_entries ) > map->max_entries)
1229 			ck_rhs_grow(hs, map->capacity << 1);
1230 	}
1231 	return true;
1232 }
1233 
1234 bool
ck_rhs_set(struct ck_rhs * hs,unsigned long h,const void * key,void ** previous)1235 ck_rhs_set(struct ck_rhs *hs,
1236     unsigned long h,
1237     const void *key,
1238     void **previous)
1239 {
1240 	long slot, first;
1241 	const void *object;
1242 	const void *insert;
1243 	unsigned long n_probes;
1244 	struct ck_rhs_map *map;
1245 
1246 	*previous = NULL;
1247 
1248 restart:
1249 	map = hs->map;
1250 
1251 	slot = map->probe_func(hs, map, &n_probes, &first, h, key, &object, map->probe_limit, CK_RHS_PROBE_INSERT);
1252 	if (slot == -1 && first == -1) {
1253 		if (ck_rhs_grow(hs, map->capacity << 1) == false)
1254 			return false;
1255 
1256 		goto restart;
1257 	}
1258 	ck_rhs_map_bound_set(map, h, n_probes);
1259 	insert = ck_rhs_marshal(hs->mode, key, h);
1260 
1261 	if (first != -1) {
1262 		struct ck_rhs_entry_desc *desc = NULL, *desc2;
1263 		if (slot != -1) {
1264 			desc = ck_rhs_desc(map, slot);
1265 			desc->in_rh = true;
1266 		}
1267 		desc2 = ck_rhs_desc(map, first);
1268 		int ret = ck_rhs_put_robin_hood(hs, first, desc2);
1269 		if (slot != -1)
1270 			desc->in_rh = false;
1271 
1272 		if (CK_CC_UNLIKELY(ret == 1))
1273 			goto restart;
1274 		if (CK_CC_UNLIKELY(ret == -1))
1275 			return false;
1276 		/* If an earlier bucket was found, then store entry there. */
1277 		ck_pr_store_ptr(ck_rhs_entry_addr(map, first), insert);
1278 		desc2->probes = n_probes;
1279 		/*
1280 		 * If a duplicate key was found, then delete it after
1281 		 * signaling concurrent probes to restart. Optionally,
1282 		 * it is possible to install tombstone after grace
1283 		 * period if we can guarantee earlier position of
1284 		 * duplicate key.
1285 		 */
1286 		ck_rhs_add_wanted(hs, first, -1, h);
1287 		if (object != NULL) {
1288 			ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]);
1289 			ck_pr_fence_atomic_store();
1290 			ck_rhs_do_backward_shift_delete(hs, slot);
1291 		}
1292 
1293 	} else {
1294 		/*
1295 		 * If we are storing into same slot, then atomic store is sufficient
1296 		 * for replacement.
1297 		 */
1298 		ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert);
1299 		ck_rhs_set_probes(map, slot, n_probes);
1300 		if (object == NULL)
1301 			ck_rhs_add_wanted(hs, slot, -1, h);
1302 	}
1303 
1304 	if (object == NULL) {
1305 		map->n_entries++;
1306 		if ((map->n_entries ) > map->max_entries)
1307 			ck_rhs_grow(hs, map->capacity << 1);
1308 	}
1309 
1310 	*previous = CK_CC_DECONST_PTR(object);
1311 	return true;
1312 }
1313 
1314 static bool
ck_rhs_put_internal(struct ck_rhs * hs,unsigned long h,const void * key,enum ck_rhs_probe_behavior behavior)1315 ck_rhs_put_internal(struct ck_rhs *hs,
1316     unsigned long h,
1317     const void *key,
1318     enum ck_rhs_probe_behavior behavior)
1319 {
1320 	long slot, first;
1321 	const void *object;
1322 	const void *insert;
1323 	unsigned long n_probes;
1324 	struct ck_rhs_map *map;
1325 
1326 restart:
1327 	map = hs->map;
1328 
1329 	slot = map->probe_func(hs, map, &n_probes, &first, h, key, &object,
1330 	    map->probe_limit, behavior);
1331 
1332 	if (slot == -1 && first == -1) {
1333 		if (ck_rhs_grow(hs, map->capacity << 1) == false)
1334 			return false;
1335 
1336 		goto restart;
1337 	}
1338 
1339 	/* Fail operation if a match was found. */
1340 	if (object != NULL)
1341 		return false;
1342 
1343 	ck_rhs_map_bound_set(map, h, n_probes);
1344 	insert = ck_rhs_marshal(hs->mode, key, h);
1345 
1346 	if (first != -1) {
1347 		struct ck_rhs_entry_desc *desc = ck_rhs_desc(map, first);
1348 		int ret = ck_rhs_put_robin_hood(hs, first, desc);
1349 		if (CK_CC_UNLIKELY(ret == 1))
1350 			return ck_rhs_put_internal(hs, h, key, behavior);
1351 		else if (CK_CC_UNLIKELY(ret == -1))
1352 			return false;
1353 		/* Insert key into first bucket in probe sequence. */
1354 		ck_pr_store_ptr(ck_rhs_entry_addr(map, first), insert);
1355 		desc->probes = n_probes;
1356 		ck_rhs_add_wanted(hs, first, -1, h);
1357 	} else {
1358 		/* An empty slot was found. */
1359 		ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert);
1360 		ck_rhs_set_probes(map, slot, n_probes);
1361 		ck_rhs_add_wanted(hs, slot, -1, h);
1362 	}
1363 
1364 	map->n_entries++;
1365 	if ((map->n_entries ) > map->max_entries)
1366 		ck_rhs_grow(hs, map->capacity << 1);
1367 	return true;
1368 }
1369 
1370 bool
ck_rhs_put(struct ck_rhs * hs,unsigned long h,const void * key)1371 ck_rhs_put(struct ck_rhs *hs,
1372     unsigned long h,
1373     const void *key)
1374 {
1375 
1376 	return ck_rhs_put_internal(hs, h, key, CK_RHS_PROBE_INSERT);
1377 }
1378 
1379 bool
ck_rhs_put_unique(struct ck_rhs * hs,unsigned long h,const void * key)1380 ck_rhs_put_unique(struct ck_rhs *hs,
1381     unsigned long h,
1382     const void *key)
1383 {
1384 
1385 	return ck_rhs_put_internal(hs, h, key, CK_RHS_PROBE_RH);
1386 }
1387 
1388 void *
ck_rhs_get(struct ck_rhs * hs,unsigned long h,const void * key)1389 ck_rhs_get(struct ck_rhs *hs,
1390     unsigned long h,
1391     const void *key)
1392 {
1393 	long first;
1394 	const void *object;
1395 	struct ck_rhs_map *map;
1396 	unsigned long n_probes;
1397 	unsigned int g, g_p, probe;
1398 	unsigned int *generation;
1399 
1400 	do {
1401 		map = ck_pr_load_ptr(&hs->map);
1402 		generation = &map->generation[h & CK_RHS_G_MASK];
1403 		g = ck_pr_load_uint(generation);
1404 		probe  = ck_rhs_map_bound_get(map, h);
1405 		ck_pr_fence_load();
1406 
1407 		first = -1;
1408 		map->probe_func(hs, map, &n_probes, &first, h, key, &object, probe, CK_RHS_PROBE_NO_RH);
1409 
1410 		ck_pr_fence_load();
1411 		g_p = ck_pr_load_uint(generation);
1412 	} while (g != g_p);
1413 
1414 	return CK_CC_DECONST_PTR(object);
1415 }
1416 
1417 void *
ck_rhs_remove(struct ck_rhs * hs,unsigned long h,const void * key)1418 ck_rhs_remove(struct ck_rhs *hs,
1419     unsigned long h,
1420     const void *key)
1421 {
1422 	long slot, first;
1423 	const void *object;
1424 	struct ck_rhs_map *map = hs->map;
1425 	unsigned long n_probes;
1426 
1427 	slot = map->probe_func(hs, map, &n_probes, &first, h, key, &object,
1428 	    ck_rhs_map_bound_get(map, h), CK_RHS_PROBE_NO_RH);
1429 	if (object == NULL)
1430 		return NULL;
1431 
1432 	map->n_entries--;
1433 	ck_rhs_do_backward_shift_delete(hs, slot);
1434 	return CK_CC_DECONST_PTR(object);
1435 }
1436 
1437 bool
ck_rhs_move(struct ck_rhs * hs,struct ck_rhs * source,ck_rhs_hash_cb_t * hf,ck_rhs_compare_cb_t * compare,struct ck_malloc * m)1438 ck_rhs_move(struct ck_rhs *hs,
1439     struct ck_rhs *source,
1440     ck_rhs_hash_cb_t *hf,
1441     ck_rhs_compare_cb_t *compare,
1442     struct ck_malloc *m)
1443 {
1444 
1445 	if (m == NULL || m->malloc == NULL || m->free == NULL || hf == NULL)
1446 		return false;
1447 
1448 	hs->mode = source->mode;
1449 	hs->seed = source->seed;
1450 	hs->map = source->map;
1451 	hs->load_factor = source->load_factor;
1452 	hs->m = m;
1453 	hs->hf = hf;
1454 	hs->compare = compare;
1455 	return true;
1456 }
1457 
1458 bool
ck_rhs_init(struct ck_rhs * hs,unsigned int mode,ck_rhs_hash_cb_t * hf,ck_rhs_compare_cb_t * compare,struct ck_malloc * m,unsigned long n_entries,unsigned long seed)1459 ck_rhs_init(struct ck_rhs *hs,
1460     unsigned int mode,
1461     ck_rhs_hash_cb_t *hf,
1462     ck_rhs_compare_cb_t *compare,
1463     struct ck_malloc *m,
1464     unsigned long n_entries,
1465     unsigned long seed)
1466 {
1467 
1468 	if (m == NULL || m->malloc == NULL || m->free == NULL || hf == NULL)
1469 		return false;
1470 
1471 	hs->m = m;
1472 	hs->mode = mode;
1473 	hs->seed = seed;
1474 	hs->hf = hf;
1475 	hs->compare = compare;
1476 	hs->load_factor = CK_RHS_DEFAULT_LOAD_FACTOR;
1477 
1478 	hs->map = ck_rhs_map_create(hs, n_entries);
1479 	return hs->map != NULL;
1480 }
1481