xref: /linux/drivers/md/dm-vdo/int-map.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2023 Red Hat
4  */
5 
6 /**
7  * DOC:
8  *
9  * Hash table implementation of a map from integers to pointers, implemented using the Hopscotch
10  * Hashing algorithm by Herlihy, Shavit, and Tzafrir (see
11  * http://en.wikipedia.org/wiki/Hopscotch_hashing). This implementation does not contain any of the
12  * locking/concurrency features of the algorithm, just the collision resolution scheme.
13  *
14  * Hopscotch Hashing is based on hashing with open addressing and linear probing. All the entries
15  * are stored in a fixed array of buckets, with no dynamic allocation for collisions. Unlike linear
16  * probing, all the entries that hash to a given bucket are stored within a fixed neighborhood
17  * starting at that bucket. Chaining is effectively represented as a bit vector relative to each
18  * bucket instead of as pointers or explicit offsets.
19  *
20  * When an empty bucket cannot be found within a given neighborhood, subsequent neighborhoods are
21  * searched, and one or more entries will "hop" into those neighborhoods. When this process works,
22  * an empty bucket will move into the desired neighborhood, allowing the entry to be added. When
23  * that process fails (typically when the buckets are around 90% full), the table must be resized
24  * and the all entries rehashed and added to the expanded table.
25  *
26  * Unlike linear probing, the number of buckets that must be searched in the worst case has a fixed
27  * upper bound (the size of the neighborhood). Those entries occupy a small number of memory cache
28  * lines, leading to improved use of the cache (fewer misses on both successful and unsuccessful
29  * searches). Hopscotch hashing outperforms linear probing at much higher load factors, so even
30  * with the increased memory burden for maintaining the hop vectors, less memory is needed to
31  * achieve that performance. Hopscotch is also immune to "contamination" from deleting entries
32  * since entries are genuinely removed instead of being replaced by a placeholder.
33  *
34  * The published description of the algorithm used a bit vector, but the paper alludes to an offset
35  * scheme which is used by this implementation. Since the entries in the neighborhood are within N
36  * entries of the hash bucket at the start of the neighborhood, a pair of small offset fields each
37  * log2(N) bits wide is all that's needed to maintain the hops as a linked list. In order to encode
38  * "no next hop" (i.e. NULL) as the natural initial value of zero, the offsets are biased by one
39  * (i.e. 0 => NULL, 1 => offset=0, 2 => offset=1, etc.) We can represent neighborhoods of up to 255
40  * entries with just 8+8=16 bits per entry. The hop list is sorted by hop offset so the first entry
41  * in the list is always the bucket closest to the start of the neighborhood.
42  *
43  * While individual accesses tend to be very fast, the table resize operations are very, very
44  * expensive. If an upper bound on the latency of adding an entry to the table is needed, we either
45  * need to ensure the table is pre-sized to be large enough so no resize is ever needed, or we'll
46  * need to develop an approach to incrementally resize the table.
47  */
48 
49 #include "int-map.h"
50 
51 #include <linux/minmax.h>
52 
53 #include "errors.h"
54 #include "logger.h"
55 #include "memory-alloc.h"
56 #include "numeric.h"
57 #include "permassert.h"
58 
59 #define DEFAULT_CAPACITY 16 /* the number of neighborhoods in a new table */
60 #define NEIGHBORHOOD 255    /* the number of buckets in each neighborhood */
61 #define MAX_PROBES 1024     /* limit on the number of probes for a free bucket */
62 #define NULL_HOP_OFFSET 0   /* the hop offset value terminating the hop list */
63 #define DEFAULT_LOAD 75     /* a compromise between memory use and performance */
64 
65 /**
66  * struct bucket - hash bucket
67  *
68  * Buckets are packed together to reduce memory usage and improve cache efficiency. It would be
69  * tempting to encode the hop offsets separately and maintain alignment of key/value pairs, but
70  * it's crucial to keep the hop fields near the buckets that they use them so they'll tend to share
71  * cache lines.
72  */
73 struct __packed bucket {
74 	/**
75 	 * @first_hop: The biased offset of the first entry in the hop list of the neighborhood
76 	 *             that hashes to this bucket.
77 	 */
78 	u8 first_hop;
79 	/** @next_hop: The biased offset of the next bucket in the hop list. */
80 	u8 next_hop;
81 	/** @key: The key stored in this bucket. */
82 	u64 key;
83 	/** @value: The value stored in this bucket (NULL if empty). */
84 	void *value;
85 };
86 
87 /**
88  * struct int_map - The concrete definition of the opaque int_map type.
89  *
90  * To avoid having to wrap the neighborhoods of the last entries back around to the start of the
91  * bucket array, we allocate a few more buckets at the end of the array instead, which is why
92  * capacity and bucket_count are different.
93  */
94 struct int_map {
95 	/** @size: The number of entries stored in the map. */
96 	size_t size;
97 	/** @capacity: The number of neighborhoods in the map. */
98 	size_t capacity;
99 	/** @bucket_count: The number of buckets in the bucket array. */
100 	size_t bucket_count;
101 	/** @buckets: The array of hash buckets. */
102 	struct bucket *buckets;
103 };
104 
105 /**
106  * mix() - The Google CityHash 16-byte hash mixing function.
107  * @input1: The first input value.
108  * @input2: The second input value.
109  *
110  * Return: A hash of the two inputs.
111  */
mix(u64 input1,u64 input2)112 static u64 mix(u64 input1, u64 input2)
113 {
114 	static const u64 CITY_MULTIPLIER = 0x9ddfea08eb382d69ULL;
115 	u64 hash = (input1 ^ input2);
116 
117 	hash *= CITY_MULTIPLIER;
118 	hash ^= (hash >> 47);
119 	hash ^= input2;
120 	hash *= CITY_MULTIPLIER;
121 	hash ^= (hash >> 47);
122 	hash *= CITY_MULTIPLIER;
123 	return hash;
124 }
125 
126 /**
127  * hash_key() - Calculate a 64-bit non-cryptographic hash value for the provided 64-bit integer
128  *              key.
129  * @key: The mapping key.
130  *
131  * The implementation is based on Google's CityHash, only handling the specific case of an 8-byte
132  * input.
133  *
134  * Return: The hash of the mapping key.
135  */
hash_key(u64 key)136 static u64 hash_key(u64 key)
137 {
138 	/*
139 	 * Aliasing restrictions forbid us from casting pointer types, so use a union to convert a
140 	 * single u64 to two u32 values.
141 	 */
142 	union {
143 		u64 u64;
144 		u32 u32[2];
145 	} pun = {.u64 = key};
146 
147 	return mix(sizeof(key) + (((u64) pun.u32[0]) << 3), pun.u32[1]);
148 }
149 
150 /**
151  * allocate_buckets() - Initialize an int_map.
152  * @map: The map to initialize.
153  * @capacity: The initial capacity of the map.
154  *
155  * Return: VDO_SUCCESS or an error code.
156  */
allocate_buckets(struct int_map * map,size_t capacity)157 static int allocate_buckets(struct int_map *map, size_t capacity)
158 {
159 	map->size = 0;
160 	map->capacity = capacity;
161 
162 	/*
163 	 * Allocate NEIGHBORHOOD - 1 extra buckets so the last bucket can have a full neighborhood
164 	 * without have to wrap back around to element zero.
165 	 */
166 	map->bucket_count = capacity + (NEIGHBORHOOD - 1);
167 	return vdo_allocate(map->bucket_count, struct bucket,
168 			    "struct int_map buckets", &map->buckets);
169 }
170 
171 /**
172  * vdo_int_map_create() - Allocate and initialize an int_map.
173  * @initial_capacity: The number of entries the map should initially be capable of holding (zero
174  *                    tells the map to use its own small default).
175  * @map_ptr: Output, a pointer to hold the new int_map.
176  *
177  * Return: VDO_SUCCESS or an error code.
178  */
vdo_int_map_create(size_t initial_capacity,struct int_map ** map_ptr)179 int vdo_int_map_create(size_t initial_capacity, struct int_map **map_ptr)
180 {
181 	struct int_map *map;
182 	int result;
183 	size_t capacity;
184 
185 	result = vdo_allocate(1, struct int_map, "struct int_map", &map);
186 	if (result != VDO_SUCCESS)
187 		return result;
188 
189 	/* Use the default capacity if the caller did not specify one. */
190 	capacity = (initial_capacity > 0) ? initial_capacity : DEFAULT_CAPACITY;
191 
192 	/*
193 	 * Scale up the capacity by the specified initial load factor. (i.e to hold 1000 entries at
194 	 * 80% load we need a capacity of 1250)
195 	 */
196 	capacity = capacity * 100 / DEFAULT_LOAD;
197 
198 	result = allocate_buckets(map, capacity);
199 	if (result != VDO_SUCCESS) {
200 		vdo_int_map_free(vdo_forget(map));
201 		return result;
202 	}
203 
204 	*map_ptr = map;
205 	return VDO_SUCCESS;
206 }
207 
208 /**
209  * vdo_int_map_free() - Free an int_map.
210  * @map: The int_map to free.
211  *
212  * NOTE: The map does not own the pointer values stored in the map and they are not freed by this
213  * call.
214  */
vdo_int_map_free(struct int_map * map)215 void vdo_int_map_free(struct int_map *map)
216 {
217 	if (map == NULL)
218 		return;
219 
220 	vdo_free(vdo_forget(map->buckets));
221 	vdo_free(vdo_forget(map));
222 }
223 
224 /**
225  * vdo_int_map_size() - Get the number of entries stored in an int_map.
226  * @map: The int_map to query.
227  *
228  * Return: The number of entries in the map.
229  */
vdo_int_map_size(const struct int_map * map)230 size_t vdo_int_map_size(const struct int_map *map)
231 {
232 	return map->size;
233 }
234 
235 /**
236  * dereference_hop() - Convert a biased hop offset within a neighborhood to a pointer to the bucket
237  *                     it references.
238  * @neighborhood: The first bucket in the neighborhood.
239  * @hop_offset: The biased hop offset to the desired bucket.
240  *
241  * Return: NULL if hop_offset is zero, otherwise a pointer to the bucket in the neighborhood at
242  *         hop_offset - 1.
243  */
dereference_hop(struct bucket * neighborhood,unsigned int hop_offset)244 static struct bucket *dereference_hop(struct bucket *neighborhood, unsigned int hop_offset)
245 {
246 	BUILD_BUG_ON(NULL_HOP_OFFSET != 0);
247 	if (hop_offset == NULL_HOP_OFFSET)
248 		return NULL;
249 
250 	return &neighborhood[hop_offset - 1];
251 }
252 
253 /**
254  * insert_in_hop_list() - Add a bucket into the hop list for the neighborhood.
255  * @neighborhood: The first bucket in the neighborhood.
256  * @new_bucket: The bucket to add to the hop list.
257  *
258  * The bucket is inserted it into the list so the hop list remains sorted by hop offset.
259  */
insert_in_hop_list(struct bucket * neighborhood,struct bucket * new_bucket)260 static void insert_in_hop_list(struct bucket *neighborhood, struct bucket *new_bucket)
261 {
262 	/* Zero indicates a NULL hop offset, so bias the hop offset by one. */
263 	int hop_offset = 1 + (new_bucket - neighborhood);
264 
265 	/* Handle the special case of adding a bucket at the start of the list. */
266 	int next_hop = neighborhood->first_hop;
267 
268 	if ((next_hop == NULL_HOP_OFFSET) || (next_hop > hop_offset)) {
269 		new_bucket->next_hop = next_hop;
270 		neighborhood->first_hop = hop_offset;
271 		return;
272 	}
273 
274 	/* Search the hop list for the insertion point that maintains the sort order. */
275 	for (;;) {
276 		struct bucket *bucket = dereference_hop(neighborhood, next_hop);
277 
278 		next_hop = bucket->next_hop;
279 
280 		if ((next_hop == NULL_HOP_OFFSET) || (next_hop > hop_offset)) {
281 			new_bucket->next_hop = next_hop;
282 			bucket->next_hop = hop_offset;
283 			return;
284 		}
285 	}
286 }
287 
288 /**
289  * select_bucket() - Select and return the hash bucket for a given search key.
290  * @map: The map to search.
291  * @key: The mapping key.
292  */
select_bucket(const struct int_map * map,u64 key)293 static struct bucket *select_bucket(const struct int_map *map, u64 key)
294 {
295 	/*
296 	 * Calculate a good hash value for the provided key. We want exactly 32 bits, so mask the
297 	 * result.
298 	 */
299 	u64 hash = hash_key(key) & 0xFFFFFFFF;
300 
301 	/*
302 	 * Scale the 32-bit hash to a bucket index by treating it as a binary fraction and
303 	 * multiplying that by the capacity. If the hash is uniformly distributed over [0 ..
304 	 * 2^32-1], then (hash * capacity / 2^32) should be uniformly distributed over [0 ..
305 	 * capacity-1]. The multiply and shift is much faster than a divide (modulus) on X86 CPUs.
306 	 */
307 	return &map->buckets[(hash * map->capacity) >> 32];
308 }
309 
310 /**
311  * search_hop_list() - Search the hop list associated with given hash bucket for a given search
312  *                     key.
313  * @map: The map being searched.
314  * @bucket: The map bucket to search for the key.
315  * @key: The mapping key.
316  * @previous_ptr: Output. if not NULL, a pointer in which to store the bucket in the list preceding
317  *                the one that had the matching key
318  *
319  * If the key is found, returns a pointer to the entry (bucket or collision), otherwise returns
320  * NULL.
321  *
322  * Return: An entry that matches the key, or NULL if not found.
323  */
search_hop_list(struct int_map * map __always_unused,struct bucket * bucket,u64 key,struct bucket ** previous_ptr)324 static struct bucket *search_hop_list(struct int_map *map __always_unused,
325 				      struct bucket *bucket,
326 				      u64 key,
327 				      struct bucket **previous_ptr)
328 {
329 	struct bucket *previous = NULL;
330 	unsigned int next_hop = bucket->first_hop;
331 
332 	while (next_hop != NULL_HOP_OFFSET) {
333 		/*
334 		 * Check the neighboring bucket indexed by the offset for the
335 		 * desired key.
336 		 */
337 		struct bucket *entry = dereference_hop(bucket, next_hop);
338 
339 		if ((key == entry->key) && (entry->value != NULL)) {
340 			if (previous_ptr != NULL)
341 				*previous_ptr = previous;
342 			return entry;
343 		}
344 		next_hop = entry->next_hop;
345 		previous = entry;
346 	}
347 
348 	return NULL;
349 }
350 
351 /**
352  * vdo_int_map_get() - Retrieve the value associated with a given key from the int_map.
353  * @map: The int_map to query.
354  * @key: The key to look up.
355  *
356  * Return: The value associated with the given key, or NULL if the key is not mapped to any value.
357  */
vdo_int_map_get(struct int_map * map,u64 key)358 void *vdo_int_map_get(struct int_map *map, u64 key)
359 {
360 	struct bucket *match = search_hop_list(map, select_bucket(map, key), key, NULL);
361 
362 	return ((match != NULL) ? match->value : NULL);
363 }
364 
365 /**
366  * resize_buckets() - Increase the number of hash buckets.
367  * @map: The map to resize.
368  *
369  * Resizes and rehashes all the existing entries, storing them in the new buckets.
370  *
371  * Return: VDO_SUCCESS or an error code.
372  */
resize_buckets(struct int_map * map)373 static int resize_buckets(struct int_map *map)
374 {
375 	int result;
376 	size_t i;
377 
378 	/* Copy the top-level map data to the stack. */
379 	struct int_map old_map = *map;
380 
381 	/* Re-initialize the map to be empty and 50% larger. */
382 	size_t new_capacity = map->capacity / 2 * 3;
383 
384 	vdo_log_info("%s: attempting resize from %zu to %zu, current size=%zu",
385 		     __func__, map->capacity, new_capacity, map->size);
386 	result = allocate_buckets(map, new_capacity);
387 	if (result != VDO_SUCCESS) {
388 		*map = old_map;
389 		return result;
390 	}
391 
392 	/* Populate the new hash table from the entries in the old bucket array. */
393 	for (i = 0; i < old_map.bucket_count; i++) {
394 		struct bucket *entry = &old_map.buckets[i];
395 
396 		if (entry->value == NULL)
397 			continue;
398 
399 		result = vdo_int_map_put(map, entry->key, entry->value, true, NULL);
400 		if (result != VDO_SUCCESS) {
401 			/* Destroy the new partial map and restore the map from the stack. */
402 			vdo_free(vdo_forget(map->buckets));
403 			*map = old_map;
404 			return result;
405 		}
406 	}
407 
408 	/* Destroy the old bucket array. */
409 	vdo_free(vdo_forget(old_map.buckets));
410 	return VDO_SUCCESS;
411 }
412 
413 /**
414  * find_empty_bucket() - Probe the bucket array starting at the given bucket for the next empty
415  *                       bucket, returning a pointer to it.
416  * @map: The map containing the buckets to search.
417  * @bucket: The bucket at which to start probing.
418  * @max_probes: The maximum number of buckets to search.
419  *
420  * NULL will be returned if the search reaches the end of the bucket array or if the number of
421  * linear probes exceeds a specified limit.
422  *
423  * Return: The next empty bucket, or NULL if the search failed.
424  */
425 static struct bucket *
find_empty_bucket(struct int_map * map,struct bucket * bucket,unsigned int max_probes)426 find_empty_bucket(struct int_map *map, struct bucket *bucket, unsigned int max_probes)
427 {
428 	/*
429 	 * Limit the search to either the nearer of the end of the bucket array or a fixed distance
430 	 * beyond the initial bucket.
431 	 */
432 	ptrdiff_t remaining = &map->buckets[map->bucket_count] - bucket;
433 	struct bucket *sentinel = &bucket[min_t(ptrdiff_t, remaining, max_probes)];
434 	struct bucket *entry;
435 
436 	for (entry = bucket; entry < sentinel; entry++) {
437 		if (entry->value == NULL)
438 			return entry;
439 	}
440 
441 	return NULL;
442 }
443 
444 /**
445  * move_empty_bucket() - Move an empty bucket closer to the start of the bucket array.
446  * @map: The map containing the bucket.
447  * @hole: The empty bucket to fill with an entry that precedes it in one of its enclosing
448  *        neighborhoods.
449  *
450  * This searches the neighborhoods that contain the empty bucket for a non-empty bucket closer to
451  * the start of the array. If such a bucket is found, this swaps the two buckets by moving the
452  * entry to the empty bucket.
453  *
454  * Return: The bucket that was vacated by moving its entry to the provided hole, or NULL if no
455  *         entry could be moved.
456  */
move_empty_bucket(struct int_map * map __always_unused,struct bucket * hole)457 static struct bucket *move_empty_bucket(struct int_map *map __always_unused,
458 					struct bucket *hole)
459 {
460 	/*
461 	 * Examine every neighborhood that the empty bucket is part of, starting with the one in
462 	 * which it is the last bucket. No boundary check is needed for the negative array
463 	 * arithmetic since this function is only called when hole is at least NEIGHBORHOOD cells
464 	 * deeper into the array than a valid bucket.
465 	 */
466 	struct bucket *bucket;
467 
468 	for (bucket = &hole[1 - NEIGHBORHOOD]; bucket < hole; bucket++) {
469 		/*
470 		 * Find the entry that is nearest to the bucket, which means it will be nearest to
471 		 * the hash bucket whose neighborhood is full.
472 		 */
473 		struct bucket *new_hole = dereference_hop(bucket, bucket->first_hop);
474 
475 		if (new_hole == NULL) {
476 			/*
477 			 * There are no buckets in this neighborhood that are in use by this one
478 			 * (they must all be owned by overlapping neighborhoods).
479 			 */
480 			continue;
481 		}
482 
483 		/*
484 		 * Skip this bucket if its first entry is actually further away than the hole that
485 		 * we're already trying to fill.
486 		 */
487 		if (hole < new_hole)
488 			continue;
489 
490 		/*
491 		 * We've found an entry in this neighborhood that we can "hop" further away, moving
492 		 * the hole closer to the hash bucket, if not all the way into its neighborhood.
493 		 */
494 
495 		/*
496 		 * The entry that will be the new hole is the first bucket in the list, so setting
497 		 * first_hop is all that's needed remove it from the list.
498 		 */
499 		bucket->first_hop = new_hole->next_hop;
500 		new_hole->next_hop = NULL_HOP_OFFSET;
501 
502 		/* Move the entry into the original hole. */
503 		hole->key = new_hole->key;
504 		hole->value = new_hole->value;
505 		new_hole->value = NULL;
506 
507 		/* Insert the filled hole into the hop list for the neighborhood. */
508 		insert_in_hop_list(bucket, hole);
509 		return new_hole;
510 	}
511 
512 	/* We couldn't find an entry to relocate to the hole. */
513 	return NULL;
514 }
515 
516 /**
517  * update_mapping() - Find and update any existing mapping for a given key, returning the value
518  *                    associated with the key in the provided pointer.
519  * @map: The int_map to attempt to modify.
520  * @neighborhood: The first bucket in the neighborhood that would contain the search key
521  * @key: The key with which to associate the new value.
522  * @new_value: The value to be associated with the key.
523  * @update: Whether to overwrite an existing value.
524  * @old_value_ptr: a pointer in which to store the old value (unmodified if no mapping was found)
525  *
526  * Return: true if the map contains a mapping for the key, false if it does not.
527  */
update_mapping(struct int_map * map,struct bucket * neighborhood,u64 key,void * new_value,bool update,void ** old_value_ptr)528 static bool update_mapping(struct int_map *map, struct bucket *neighborhood,
529 			   u64 key, void *new_value, bool update, void **old_value_ptr)
530 {
531 	struct bucket *bucket = search_hop_list(map, neighborhood, key, NULL);
532 
533 	if (bucket == NULL) {
534 		/* There is no bucket containing the key in the neighborhood. */
535 		return false;
536 	}
537 
538 	/*
539 	 * Return the value of the current mapping (if desired) and update the mapping with the new
540 	 * value (if desired).
541 	 */
542 	if (old_value_ptr != NULL)
543 		*old_value_ptr = bucket->value;
544 	if (update)
545 		bucket->value = new_value;
546 	return true;
547 }
548 
549 /**
550  * find_or_make_vacancy() - Find an empty bucket.
551  * @map: The int_map to search or modify.
552  * @neighborhood: The first bucket in the neighborhood in which an empty bucket is needed for a new
553  *                mapping.
554  *
555  * Find an empty bucket in a specified neighborhood for a new mapping or attempt to re-arrange
556  * mappings so there is such a bucket. This operation may fail (returning NULL) if an empty bucket
557  * is not available or could not be relocated to the neighborhood.
558  *
559  * Return: a pointer to an empty bucket in the desired neighborhood, or NULL if a vacancy could not
560  *         be found or arranged.
561  */
find_or_make_vacancy(struct int_map * map,struct bucket * neighborhood)562 static struct bucket *find_or_make_vacancy(struct int_map *map,
563 					   struct bucket *neighborhood)
564 {
565 	/* Probe within and beyond the neighborhood for the first empty bucket. */
566 	struct bucket *hole = find_empty_bucket(map, neighborhood, MAX_PROBES);
567 
568 	/*
569 	 * Keep trying until the empty bucket is in the bucket's neighborhood or we are unable to
570 	 * move it any closer by swapping it with a filled bucket.
571 	 */
572 	while (hole != NULL) {
573 		int distance = hole - neighborhood;
574 
575 		if (distance < NEIGHBORHOOD) {
576 			/*
577 			 * We've found or relocated an empty bucket close enough to the initial
578 			 * hash bucket to be referenced by its hop vector.
579 			 */
580 			return hole;
581 		}
582 
583 		/*
584 		 * The nearest empty bucket isn't within the neighborhood that must contain the new
585 		 * entry, so try to swap it with bucket that is closer.
586 		 */
587 		hole = move_empty_bucket(map, hole);
588 	}
589 
590 	return NULL;
591 }
592 
593 /**
594  * vdo_int_map_put() - Try to associate a value with an integer.
595  * @map: The int_map to attempt to modify.
596  * @key: The key with which to associate the new value.
597  * @new_value: The value to be associated with the key.
598  * @update: Whether to overwrite an existing value.
599  * @old_value_ptr: A pointer in which to store either the old value (if the key was already mapped)
600  *                 or NULL if the map did not contain the key; NULL may be provided if the caller
601  *                 does not need to know the old value
602  *
603  * Try to associate a value (a pointer) with an integer in an int_map. If the map already contains
604  * a mapping for the provided key, the old value is only replaced with the specified value if
605  * update is true. In either case the old value is returned. If the map does not already contain a
606  * value for the specified key, the new value is added regardless of the value of update.
607  *
608  * Return: VDO_SUCCESS or an error code.
609  */
vdo_int_map_put(struct int_map * map,u64 key,void * new_value,bool update,void ** old_value_ptr)610 int vdo_int_map_put(struct int_map *map, u64 key, void *new_value, bool update,
611 		    void **old_value_ptr)
612 {
613 	struct bucket *neighborhood, *bucket;
614 
615 	if (unlikely(new_value == NULL))
616 		return -EINVAL;
617 
618 	/*
619 	 * Select the bucket at the start of the neighborhood that must contain any entry for the
620 	 * provided key.
621 	 */
622 	neighborhood = select_bucket(map, key);
623 
624 	/*
625 	 * Check whether the neighborhood already contains an entry for the key, in which case we
626 	 * optionally update it, returning the old value.
627 	 */
628 	if (update_mapping(map, neighborhood, key, new_value, update, old_value_ptr))
629 		return VDO_SUCCESS;
630 
631 	/*
632 	 * Find an empty bucket in the desired neighborhood for the new entry or re-arrange entries
633 	 * in the map so there is such a bucket. This operation will usually succeed; the loop body
634 	 * will only be executed on the rare occasions that we have to resize the map.
635 	 */
636 	while ((bucket = find_or_make_vacancy(map, neighborhood)) == NULL) {
637 		int result;
638 
639 		/*
640 		 * There is no empty bucket in which to put the new entry in the current map, so
641 		 * we're forced to allocate a new bucket array with a larger capacity, re-hash all
642 		 * the entries into those buckets, and try again (a very expensive operation for
643 		 * large maps).
644 		 */
645 		result = resize_buckets(map);
646 		if (result != VDO_SUCCESS)
647 			return result;
648 
649 		/*
650 		 * Resizing the map invalidates all pointers to buckets, so recalculate the
651 		 * neighborhood pointer.
652 		 */
653 		neighborhood = select_bucket(map, key);
654 	}
655 
656 	/* Put the new entry in the empty bucket, adding it to the neighborhood. */
657 	bucket->key = key;
658 	bucket->value = new_value;
659 	insert_in_hop_list(neighborhood, bucket);
660 	map->size += 1;
661 
662 	/* There was no existing entry, so there was no old value to be returned. */
663 	if (old_value_ptr != NULL)
664 		*old_value_ptr = NULL;
665 	return VDO_SUCCESS;
666 }
667 
668 /**
669  * vdo_int_map_remove() - Remove the mapping for a given key from the int_map.
670  * @map: The int_map from which to remove the mapping.
671  * @key: The key whose mapping is to be removed.
672  *
673  * Return: the value that was associated with the key, or NULL if it was not mapped.
674  */
vdo_int_map_remove(struct int_map * map,u64 key)675 void *vdo_int_map_remove(struct int_map *map, u64 key)
676 {
677 	void *value;
678 
679 	/* Select the bucket to search and search it for an existing entry. */
680 	struct bucket *bucket = select_bucket(map, key);
681 	struct bucket *previous;
682 	struct bucket *victim = search_hop_list(map, bucket, key, &previous);
683 
684 	if (victim == NULL) {
685 		/* There is no matching entry to remove. */
686 		return NULL;
687 	}
688 
689 	/*
690 	 * We found an entry to remove. Save the mapped value to return later and empty the bucket.
691 	 */
692 	map->size -= 1;
693 	value = victim->value;
694 	victim->value = NULL;
695 	victim->key = 0;
696 
697 	/* The victim bucket is now empty, but it still needs to be spliced out of the hop list. */
698 	if (previous == NULL) {
699 		/* The victim is the head of the list, so swing first_hop. */
700 		bucket->first_hop = victim->next_hop;
701 	} else {
702 		previous->next_hop = victim->next_hop;
703 	}
704 
705 	victim->next_hop = NULL_HOP_OFFSET;
706 	return value;
707 }
708