xref: /linux/include/linux/rhashtable.h (revision a6fcb6d4804b51ffcae7881c7f99483f4981ddf1)
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
5  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
6  *
7  * Based on the following paper by Josh Triplett, Paul E. McKenney
8  * and Jonathan Walpole:
9  * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
10  *
11  * Code partially derived from nft_hash
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License version 2 as
15  * published by the Free Software Foundation.
16  */
17 
18 #ifndef _LINUX_RHASHTABLE_H
19 #define _LINUX_RHASHTABLE_H
20 
21 #include <linux/compiler.h>
22 #include <linux/list_nulls.h>
23 #include <linux/workqueue.h>
24 #include <linux/mutex.h>
25 
26 /*
27  * The end of the chain is marked with a special nulls marks which has
28  * the following format:
29  *
30  * +-------+-----------------------------------------------------+-+
31  * | Base  |                      Hash                           |1|
32  * +-------+-----------------------------------------------------+-+
33  *
34  * Base (4 bits) : Reserved to distinguish between multiple tables.
35  *                 Specified via &struct rhashtable_params.nulls_base.
36  * Hash (27 bits): Full hash (unmasked) of first element added to bucket
37  * 1 (1 bit)     : Nulls marker (always set)
38  *
39  * The remaining bits of the next pointer remain unused for now.
40  */
41 #define RHT_BASE_BITS		4
42 #define RHT_HASH_BITS		27
43 #define RHT_BASE_SHIFT		RHT_HASH_BITS
44 
45 struct rhash_head {
46 	struct rhash_head __rcu		*next;
47 };
48 
49 /**
50  * struct bucket_table - Table of hash buckets
51  * @size: Number of hash buckets
52  * @locks_mask: Mask to apply before accessing locks[]
53  * @locks: Array of spinlocks protecting individual buckets
54  * @buckets: size * hash buckets
55  */
56 struct bucket_table {
57 	size_t			size;
58 	unsigned int		locks_mask;
59 	spinlock_t		*locks;
60 
61 	struct rhash_head __rcu	*buckets[] ____cacheline_aligned_in_smp;
62 };
63 
64 typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
65 typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 seed);
66 
67 struct rhashtable;
68 
69 /**
70  * struct rhashtable_params - Hash table construction parameters
71  * @nelem_hint: Hint on number of elements, should be 75% of desired size
72  * @key_len: Length of key
73  * @key_offset: Offset of key in struct to be hashed
74  * @head_offset: Offset of rhash_head in struct to be hashed
75  * @hash_rnd: Seed to use while hashing
76  * @max_shift: Maximum number of shifts while expanding
77  * @min_shift: Minimum number of shifts while shrinking
78  * @nulls_base: Base value to generate nulls marker
79  * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
80  * @hashfn: Function to hash key
81  * @obj_hashfn: Function to hash object
82  */
83 struct rhashtable_params {
84 	size_t			nelem_hint;
85 	size_t			key_len;
86 	size_t			key_offset;
87 	size_t			head_offset;
88 	u32			hash_rnd;
89 	size_t			max_shift;
90 	size_t			min_shift;
91 	u32			nulls_base;
92 	size_t			locks_mul;
93 	rht_hashfn_t		hashfn;
94 	rht_obj_hashfn_t	obj_hashfn;
95 };
96 
97 /**
98  * struct rhashtable - Hash table handle
99  * @tbl: Bucket table
100  * @future_tbl: Table under construction during expansion/shrinking
101  * @nelems: Number of elements in table
102  * @shift: Current size (1 << shift)
103  * @p: Configuration parameters
104  * @run_work: Deferred worker to expand/shrink asynchronously
105  * @mutex: Mutex to protect current/future table swapping
106  * @walkers: List of active walkers
107  * @being_destroyed: True if table is set up for destruction
108  */
109 struct rhashtable {
110 	struct bucket_table __rcu	*tbl;
111 	struct bucket_table __rcu       *future_tbl;
112 	atomic_t			nelems;
113 	atomic_t			shift;
114 	struct rhashtable_params	p;
115 	struct work_struct		run_work;
116 	struct mutex                    mutex;
117 	struct list_head		walkers;
118 	bool                            being_destroyed;
119 };
120 
121 /**
122  * struct rhashtable_walker - Hash table walker
123  * @list: List entry on list of walkers
124  * @resize: Resize event occured
125  */
126 struct rhashtable_walker {
127 	struct list_head list;
128 	bool resize;
129 };
130 
131 /**
132  * struct rhashtable_iter - Hash table iterator, fits into netlink cb
133  * @ht: Table to iterate through
134  * @p: Current pointer
135  * @walker: Associated rhashtable walker
136  * @slot: Current slot
137  * @skip: Number of entries to skip in slot
138  */
139 struct rhashtable_iter {
140 	struct rhashtable *ht;
141 	struct rhash_head *p;
142 	struct rhashtable_walker *walker;
143 	unsigned int slot;
144 	unsigned int skip;
145 };
146 
147 static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
148 {
149 	return NULLS_MARKER(ht->p.nulls_base + hash);
150 }
151 
152 #define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
153 	((ptr) = (typeof(ptr)) rht_marker(ht, hash))
154 
155 static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
156 {
157 	return ((unsigned long) ptr & 1);
158 }
159 
160 static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
161 {
162 	return ((unsigned long) ptr) >> 1;
163 }
164 
165 #ifdef CONFIG_PROVE_LOCKING
166 int lockdep_rht_mutex_is_held(struct rhashtable *ht);
167 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
168 #else
169 static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
170 {
171 	return 1;
172 }
173 
174 static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
175 					     u32 hash)
176 {
177 	return 1;
178 }
179 #endif /* CONFIG_PROVE_LOCKING */
180 
181 int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
182 
183 void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
184 bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
185 
186 int rhashtable_expand(struct rhashtable *ht);
187 int rhashtable_shrink(struct rhashtable *ht);
188 
189 void *rhashtable_lookup(struct rhashtable *ht, const void *key);
190 void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
191 				bool (*compare)(void *, void *), void *arg);
192 
193 bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj);
194 bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
195 				      struct rhash_head *obj,
196 				      bool (*compare)(void *, void *),
197 				      void *arg);
198 
199 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
200 void rhashtable_walk_exit(struct rhashtable_iter *iter);
201 int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
202 void *rhashtable_walk_next(struct rhashtable_iter *iter);
203 void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
204 
205 void rhashtable_destroy(struct rhashtable *ht);
206 
207 #define rht_dereference(p, ht) \
208 	rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
209 
210 #define rht_dereference_rcu(p, ht) \
211 	rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
212 
213 #define rht_dereference_bucket(p, tbl, hash) \
214 	rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
215 
216 #define rht_dereference_bucket_rcu(p, tbl, hash) \
217 	rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
218 
219 #define rht_entry(tpos, pos, member) \
220 	({ tpos = container_of(pos, typeof(*tpos), member); 1; })
221 
222 /**
223  * rht_for_each_continue - continue iterating over hash chain
224  * @pos:	the &struct rhash_head to use as a loop cursor.
225  * @head:	the previous &struct rhash_head to continue from
226  * @tbl:	the &struct bucket_table
227  * @hash:	the hash value / bucket index
228  */
229 #define rht_for_each_continue(pos, head, tbl, hash) \
230 	for (pos = rht_dereference_bucket(head, tbl, hash); \
231 	     !rht_is_a_nulls(pos); \
232 	     pos = rht_dereference_bucket((pos)->next, tbl, hash))
233 
234 /**
235  * rht_for_each - iterate over hash chain
236  * @pos:	the &struct rhash_head to use as a loop cursor.
237  * @tbl:	the &struct bucket_table
238  * @hash:	the hash value / bucket index
239  */
240 #define rht_for_each(pos, tbl, hash) \
241 	rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
242 
243 /**
244  * rht_for_each_entry_continue - continue iterating over hash chain
245  * @tpos:	the type * to use as a loop cursor.
246  * @pos:	the &struct rhash_head to use as a loop cursor.
247  * @head:	the previous &struct rhash_head to continue from
248  * @tbl:	the &struct bucket_table
249  * @hash:	the hash value / bucket index
250  * @member:	name of the &struct rhash_head within the hashable struct.
251  */
252 #define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member)	\
253 	for (pos = rht_dereference_bucket(head, tbl, hash);		\
254 	     (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);	\
255 	     pos = rht_dereference_bucket((pos)->next, tbl, hash))
256 
257 /**
258  * rht_for_each_entry - iterate over hash chain of given type
259  * @tpos:	the type * to use as a loop cursor.
260  * @pos:	the &struct rhash_head to use as a loop cursor.
261  * @tbl:	the &struct bucket_table
262  * @hash:	the hash value / bucket index
263  * @member:	name of the &struct rhash_head within the hashable struct.
264  */
265 #define rht_for_each_entry(tpos, pos, tbl, hash, member)		\
266 	rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash],	\
267 				    tbl, hash, member)
268 
269 /**
270  * rht_for_each_entry_safe - safely iterate over hash chain of given type
271  * @tpos:	the type * to use as a loop cursor.
272  * @pos:	the &struct rhash_head to use as a loop cursor.
273  * @next:	the &struct rhash_head to use as next in loop cursor.
274  * @tbl:	the &struct bucket_table
275  * @hash:	the hash value / bucket index
276  * @member:	name of the &struct rhash_head within the hashable struct.
277  *
278  * This hash chain list-traversal primitive allows for the looped code to
279  * remove the loop cursor from the list.
280  */
281 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member)	    \
282 	for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
283 	     next = !rht_is_a_nulls(pos) ?				    \
284 		       rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
285 	     (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);	    \
286 	     pos = next,						    \
287 	     next = !rht_is_a_nulls(pos) ?				    \
288 		       rht_dereference_bucket(pos->next, tbl, hash) : NULL)
289 
290 /**
291  * rht_for_each_rcu_continue - continue iterating over rcu hash chain
292  * @pos:	the &struct rhash_head to use as a loop cursor.
293  * @head:	the previous &struct rhash_head to continue from
294  * @tbl:	the &struct bucket_table
295  * @hash:	the hash value / bucket index
296  *
297  * This hash chain list-traversal primitive may safely run concurrently with
298  * the _rcu mutation primitives such as rhashtable_insert() as long as the
299  * traversal is guarded by rcu_read_lock().
300  */
301 #define rht_for_each_rcu_continue(pos, head, tbl, hash)			\
302 	for (({barrier(); }),						\
303 	     pos = rht_dereference_bucket_rcu(head, tbl, hash);		\
304 	     !rht_is_a_nulls(pos);					\
305 	     pos = rcu_dereference_raw(pos->next))
306 
307 /**
308  * rht_for_each_rcu - iterate over rcu hash chain
309  * @pos:	the &struct rhash_head to use as a loop cursor.
310  * @tbl:	the &struct bucket_table
311  * @hash:	the hash value / bucket index
312  *
313  * This hash chain list-traversal primitive may safely run concurrently with
314  * the _rcu mutation primitives such as rhashtable_insert() as long as the
315  * traversal is guarded by rcu_read_lock().
316  */
317 #define rht_for_each_rcu(pos, tbl, hash)				\
318 	rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
319 
320 /**
321  * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
322  * @tpos:	the type * to use as a loop cursor.
323  * @pos:	the &struct rhash_head to use as a loop cursor.
324  * @head:	the previous &struct rhash_head to continue from
325  * @tbl:	the &struct bucket_table
326  * @hash:	the hash value / bucket index
327  * @member:	name of the &struct rhash_head within the hashable struct.
328  *
329  * This hash chain list-traversal primitive may safely run concurrently with
330  * the _rcu mutation primitives such as rhashtable_insert() as long as the
331  * traversal is guarded by rcu_read_lock().
332  */
333 #define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
334 	for (({barrier(); }),						    \
335 	     pos = rht_dereference_bucket_rcu(head, tbl, hash);		    \
336 	     (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);	    \
337 	     pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
338 
339 /**
340  * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
341  * @tpos:	the type * to use as a loop cursor.
342  * @pos:	the &struct rhash_head to use as a loop cursor.
343  * @tbl:	the &struct bucket_table
344  * @hash:	the hash value / bucket index
345  * @member:	name of the &struct rhash_head within the hashable struct.
346  *
347  * This hash chain list-traversal primitive may safely run concurrently with
348  * the _rcu mutation primitives such as rhashtable_insert() as long as the
349  * traversal is guarded by rcu_read_lock().
350  */
351 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member)		\
352 	rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
353 					tbl, hash, member)
354 
355 #endif /* _LINUX_RHASHTABLE_H */
356