xref: /linux/include/linux/rhashtable.h (revision a68a9bd086c2822d0c629443bd16ad1317afe501)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Resizable, Scalable, Concurrent Hash Table
4  *
5  * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au>
6  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
7  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
8  *
9  * Code partially derived from nft_hash
10  * Rewritten with rehash code from br_multicast plus single list
11  * pointer as suggested by Josh Triplett
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License version 2 as
15  * published by the Free Software Foundation.
16  */
17 
18 #ifndef _LINUX_RHASHTABLE_H
19 #define _LINUX_RHASHTABLE_H
20 
21 #include <linux/err.h>
22 #include <linux/errno.h>
23 #include <linux/jhash.h>
24 #include <linux/list_nulls.h>
25 #include <linux/workqueue.h>
26 #include <linux/rculist.h>
27 #include <linux/bit_spinlock.h>
28 
29 #include <linux/rhashtable-types.h>
30 /*
31  * Objects in an rhashtable have an embedded struct rhash_head
32  * which is linked into as hash chain from the hash table - or one
33  * of two or more hash tables when the rhashtable is being resized.
34  * The end of the chain is marked with a special nulls marks which has
35  * the least significant bit set but otherwise stores the address of
36  * the hash bucket.  This allows us to be sure we've found the end
37  * of the right list.
38  * The value stored in the hash bucket has BIT(0) used as a lock bit.
39  * This bit must be atomically set before any changes are made to
40  * the chain.  To avoid dereferencing this pointer without clearing
41  * the bit first, we use an opaque 'struct rhash_lock_head *' for the
42  * pointer stored in the bucket.  This struct needs to be defined so
43  * that rcu_dereference() works on it, but it has no content so a
44  * cast is needed for it to be useful.  This ensures it isn't
45  * used by mistake with clearing the lock bit first.
46  */
47 struct rhash_lock_head {};
48 
49 /* Maximum chain length before rehash
50  *
51  * The maximum (not average) chain length grows with the size of the hash
52  * table, at a rate of (log N)/(log log N).
53  *
54  * The value of 16 is selected so that even if the hash table grew to
55  * 2^32 you would not expect the maximum chain length to exceed it
56  * unless we are under attack (or extremely unlucky).
57  *
58  * As this limit is only to detect attacks, we don't need to set it to a
59  * lower value as you'd need the chain length to vastly exceed 16 to have
60  * any real effect on the system.
61  */
62 #define RHT_ELASTICITY	16u
63 
64 /**
65  * struct bucket_table - Table of hash buckets
66  * @size: Number of hash buckets
67  * @nest: Number of bits of first-level nested table.
68  * @rehash: Current bucket being rehashed
69  * @hash_rnd: Random seed to fold into hash
70  * @walkers: List of active walkers
71  * @rcu: RCU structure for freeing the table
72  * @future_tbl: Table under construction during rehashing
73  * @ntbl: Nested table used when out of memory.
74  * @buckets: size * hash buckets
75  */
76 struct bucket_table {
77 	unsigned int		size;
78 	unsigned int		nest;
79 	u32			hash_rnd;
80 	struct list_head	walkers;
81 	struct rcu_head		rcu;
82 
83 	struct bucket_table __rcu *future_tbl;
84 
85 	struct lockdep_map	dep_map;
86 
87 	struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp;
88 };
89 
90 /*
91  * NULLS_MARKER() expects a hash value with the low
92  * bits mostly likely to be significant, and it discards
93  * the msb.
94  * We give it an address, in which the bottom bit is
95  * always 0, and the msb might be significant.
96  * So we shift the address down one bit to align with
97  * expectations and avoid losing a significant bit.
98  *
99  * We never store the NULLS_MARKER in the hash table
100  * itself as we need the lsb for locking.
101  * Instead we store a NULL
102  */
103 #define	RHT_NULLS_MARKER(ptr)	\
104 	((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1))
105 #define INIT_RHT_NULLS_HEAD(ptr)	\
106 	((ptr) = NULL)
107 
108 static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
109 {
110 	return ((unsigned long) ptr & 1);
111 }
112 
113 static inline void *rht_obj(const struct rhashtable *ht,
114 			    const struct rhash_head *he)
115 {
116 	return (char *)he - ht->p.head_offset;
117 }
118 
119 static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
120 					    unsigned int hash)
121 {
122 	return hash & (tbl->size - 1);
123 }
124 
125 static __always_inline unsigned int rht_key_get_hash(struct rhashtable *ht,
126 	const void *key, const struct rhashtable_params params,
127 	unsigned int hash_rnd)
128 {
129 	unsigned int hash;
130 
131 	/* params must be equal to ht->p if it isn't constant. */
132 	if (!__builtin_constant_p(params.key_len))
133 		hash = ht->p.hashfn(key, ht->key_len, hash_rnd);
134 	else if (params.key_len) {
135 		unsigned int key_len = params.key_len;
136 
137 		if (params.hashfn)
138 			hash = params.hashfn(key, key_len, hash_rnd);
139 		else if (key_len & (sizeof(u32) - 1))
140 			hash = jhash(key, key_len, hash_rnd);
141 		else
142 			hash = jhash2(key, key_len / sizeof(u32), hash_rnd);
143 	} else {
144 		unsigned int key_len = ht->p.key_len;
145 
146 		if (params.hashfn)
147 			hash = params.hashfn(key, key_len, hash_rnd);
148 		else
149 			hash = jhash(key, key_len, hash_rnd);
150 	}
151 
152 	return hash;
153 }
154 
155 static __always_inline unsigned int rht_key_hashfn(
156 	struct rhashtable *ht, const struct bucket_table *tbl,
157 	const void *key, const struct rhashtable_params params)
158 {
159 	unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd);
160 
161 	return rht_bucket_index(tbl, hash);
162 }
163 
164 static __always_inline unsigned int rht_head_hashfn(
165 	struct rhashtable *ht, const struct bucket_table *tbl,
166 	const struct rhash_head *he, const struct rhashtable_params params)
167 {
168 	const char *ptr = rht_obj(ht, he);
169 
170 	return likely(params.obj_hashfn) ?
171 	       rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
172 							    ht->p.key_len,
173 						       tbl->hash_rnd)) :
174 	       rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
175 }
176 
177 /**
178  * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
179  * @ht:		hash table
180  * @tbl:	current table
181  */
182 static inline bool rht_grow_above_75(const struct rhashtable *ht,
183 				     const struct bucket_table *tbl)
184 {
185 	/* Expand table when exceeding 75% load */
186 	return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
187 	       (!ht->p.max_size || tbl->size < ht->p.max_size);
188 }
189 
190 /**
191  * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
192  * @ht:		hash table
193  * @tbl:	current table
194  */
195 static inline bool rht_shrink_below_30(const struct rhashtable *ht,
196 				       const struct bucket_table *tbl)
197 {
198 	/* Shrink table beneath 30% load */
199 	return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
200 	       tbl->size > ht->p.min_size;
201 }
202 
203 /**
204  * rht_grow_above_100 - returns true if nelems > table-size
205  * @ht:		hash table
206  * @tbl:	current table
207  */
208 static inline bool rht_grow_above_100(const struct rhashtable *ht,
209 				      const struct bucket_table *tbl)
210 {
211 	return atomic_read(&ht->nelems) > tbl->size &&
212 		(!ht->p.max_size || tbl->size < ht->p.max_size);
213 }
214 
215 /**
216  * rht_grow_above_max - returns true if table is above maximum
217  * @ht:		hash table
218  * @tbl:	current table
219  */
220 static inline bool rht_grow_above_max(const struct rhashtable *ht,
221 				      const struct bucket_table *tbl)
222 {
223 	return atomic_read(&ht->nelems) >= ht->max_elems;
224 }
225 
226 #ifdef CONFIG_PROVE_LOCKING
227 int lockdep_rht_mutex_is_held(struct rhashtable *ht);
228 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
229 #else
230 static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
231 {
232 	return 1;
233 }
234 
235 static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
236 					     u32 hash)
237 {
238 	return 1;
239 }
240 #endif /* CONFIG_PROVE_LOCKING */
241 
242 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
243 			     struct rhash_head *obj);
244 
245 void rhashtable_walk_enter(struct rhashtable *ht,
246 			   struct rhashtable_iter *iter);
247 void rhashtable_walk_exit(struct rhashtable_iter *iter);
248 int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires_shared(RCU);
249 
250 static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
251 	__acquires_shared(RCU)
252 {
253 	(void)rhashtable_walk_start_check(iter);
254 }
255 
256 void *rhashtable_walk_next(struct rhashtable_iter *iter);
257 void *rhashtable_walk_peek(struct rhashtable_iter *iter);
258 void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases_shared(RCU);
259 
260 void rhashtable_free_and_destroy(struct rhashtable *ht,
261 				 void (*free_fn)(void *ptr, void *arg),
262 				 void *arg);
263 void rhashtable_destroy(struct rhashtable *ht);
264 
265 struct rhash_lock_head __rcu **rht_bucket_nested(
266 	const struct bucket_table *tbl, unsigned int hash);
267 struct rhash_lock_head __rcu **__rht_bucket_nested(
268 	const struct bucket_table *tbl, unsigned int hash);
269 struct rhash_lock_head __rcu **rht_bucket_nested_insert(
270 	struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash);
271 
272 #define rht_dereference(p, ht) \
273 	rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
274 
275 #define rht_dereference_rcu(p, ht) \
276 	rcu_dereference_all_check(p, lockdep_rht_mutex_is_held(ht))
277 
278 #define rht_dereference_bucket(p, tbl, hash) \
279 	rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
280 
281 #define rht_dereference_bucket_rcu(p, tbl, hash) \
282 	rcu_dereference_all_check(p, lockdep_rht_bucket_is_held(tbl, hash))
283 
284 #define rht_entry(tpos, pos, member) \
285 	({ tpos = container_of(pos, typeof(*tpos), member); 1; })
286 
287 static inline struct rhash_lock_head __rcu *const *rht_bucket(
288 	const struct bucket_table *tbl, unsigned int hash)
289 {
290 	return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
291 				     &tbl->buckets[hash];
292 }
293 
294 static inline struct rhash_lock_head __rcu **rht_bucket_var(
295 	struct bucket_table *tbl, unsigned int hash)
296 {
297 	return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) :
298 				     &tbl->buckets[hash];
299 }
300 
301 static inline struct rhash_lock_head __rcu **rht_bucket_insert(
302 	struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
303 {
304 	return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
305 				     &tbl->buckets[hash];
306 }
307 
308 /*
309  * We lock a bucket by setting BIT(0) in the pointer - this is always
310  * zero in real pointers.  The NULLS mark is never stored in the bucket,
311  * rather we store NULL if the bucket is empty.
312  * bit_spin_locks do not handle contention well, but the whole point
313  * of the hashtable design is to achieve minimum per-bucket contention.
314  * A nested hash table might not have a bucket pointer.  In that case
315  * we cannot get a lock.  For remove and replace the bucket cannot be
316  * interesting and doesn't need locking.
317  * For insert we allocate the bucket if this is the last bucket_table,
318  * and then take the lock.
319  * Sometimes we unlock a bucket by writing a new pointer there.  In that
320  * case we don't need to unlock, but we do need to reset state such as
321  * local_bh. For that we have rht_assign_unlock().  As rcu_assign_pointer()
322  * provides the same release semantics that bit_spin_unlock() provides,
323  * this is safe.
324  * When we write to a bucket without unlocking, we use rht_assign_locked().
325  */
326 
327 static inline unsigned long rht_lock(struct bucket_table *tbl,
328 				     struct rhash_lock_head __rcu **bkt)
329 	__acquires(__bitlock(0, bkt))
330 {
331 	unsigned long flags;
332 
333 	local_irq_save(flags);
334 	bit_spin_lock(0, (unsigned long *)bkt);
335 	lock_map_acquire(&tbl->dep_map);
336 	return flags;
337 }
338 
339 static inline unsigned long rht_lock_nested(struct bucket_table *tbl,
340 					struct rhash_lock_head __rcu **bucket,
341 					unsigned int subclass)
342 	__acquires(__bitlock(0, bucket))
343 {
344 	unsigned long flags;
345 
346 	local_irq_save(flags);
347 	bit_spin_lock(0, (unsigned long *)bucket);
348 	lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_);
349 	return flags;
350 }
351 
352 static inline void rht_unlock(struct bucket_table *tbl,
353 			      struct rhash_lock_head __rcu **bkt,
354 			      unsigned long flags)
355 	__releases(__bitlock(0, bkt))
356 {
357 	lock_map_release(&tbl->dep_map);
358 	bit_spin_unlock(0, (unsigned long *)bkt);
359 	local_irq_restore(flags);
360 }
361 
362 enum rht_lookup_freq {
363 	RHT_LOOKUP_NORMAL,
364 	RHT_LOOKUP_LIKELY,
365 };
366 
367 static __always_inline struct rhash_head *__rht_ptr(
368 	struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt,
369 	const enum rht_lookup_freq freq)
370 {
371 	unsigned long p_val = (unsigned long)p & ~BIT(0);
372 
373 	BUILD_BUG_ON(!__builtin_constant_p(freq));
374 
375 	if (freq == RHT_LOOKUP_LIKELY)
376 		return (struct rhash_head *)
377 			(likely(p_val) ? p_val : (unsigned long)RHT_NULLS_MARKER(bkt));
378 	else
379 		return (struct rhash_head *)
380 			(p_val ?: (unsigned long)RHT_NULLS_MARKER(bkt));
381 }
382 
383 /*
384  * Where 'bkt' is a bucket and might be locked:
385  *   rht_ptr_rcu() dereferences that pointer and clears the lock bit.
386  *   rht_ptr() dereferences in a context where the bucket is locked.
387  *   rht_ptr_exclusive() dereferences in a context where exclusive
388  *            access is guaranteed, such as when destroying the table.
389  */
390 static __always_inline struct rhash_head *__rht_ptr_rcu(
391 	struct rhash_lock_head __rcu *const *bkt,
392 	const enum rht_lookup_freq freq)
393 {
394 	return __rht_ptr(rcu_dereference_all(*bkt), bkt, freq);
395 }
396 
397 static inline struct rhash_head *rht_ptr_rcu(
398 	struct rhash_lock_head __rcu *const *bkt)
399 {
400 	return __rht_ptr_rcu(bkt, RHT_LOOKUP_NORMAL);
401 }
402 
403 static inline struct rhash_head *rht_ptr(
404 	struct rhash_lock_head __rcu *const *bkt,
405 	struct bucket_table *tbl,
406 	unsigned int hash)
407 {
408 	return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt,
409 			 RHT_LOOKUP_NORMAL);
410 }
411 
412 static inline struct rhash_head *rht_ptr_exclusive(
413 	struct rhash_lock_head __rcu *const *bkt)
414 {
415 	return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt,
416 			 RHT_LOOKUP_NORMAL);
417 }
418 
419 static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
420 				     struct rhash_head *obj)
421 {
422 	if (rht_is_a_nulls(obj))
423 		obj = NULL;
424 	rcu_assign_pointer(*bkt, (void *)((unsigned long)obj | BIT(0)));
425 }
426 
427 static inline void rht_assign_unlock(struct bucket_table *tbl,
428 				     struct rhash_lock_head __rcu **bkt,
429 				     struct rhash_head *obj,
430 				     unsigned long flags)
431 	__releases(__bitlock(0, bkt))
432 {
433 	if (rht_is_a_nulls(obj))
434 		obj = NULL;
435 	lock_map_release(&tbl->dep_map);
436 	rcu_assign_pointer(*bkt, (void *)obj);
437 	preempt_enable();
438 	__release(__bitlock(0, bkt));
439 	local_irq_restore(flags);
440 }
441 
442 /**
443  * rht_for_each_from - iterate over hash chain from given head
444  * @pos:	the &struct rhash_head to use as a loop cursor.
445  * @head:	the &struct rhash_head to start from
446  * @tbl:	the &struct bucket_table
447  * @hash:	the hash value / bucket index
448  */
449 #define rht_for_each_from(pos, head, tbl, hash) \
450 	for (pos = head;			\
451 	     !rht_is_a_nulls(pos);		\
452 	     pos = rht_dereference_bucket((pos)->next, tbl, hash))
453 
454 /**
455  * rht_for_each - iterate over hash chain
456  * @pos:	the &struct rhash_head to use as a loop cursor.
457  * @tbl:	the &struct bucket_table
458  * @hash:	the hash value / bucket index
459  */
460 #define rht_for_each(pos, tbl, hash) \
461 	rht_for_each_from(pos, rht_ptr(rht_bucket(tbl, hash), tbl, hash),  \
462 			  tbl, hash)
463 
464 /**
465  * rht_for_each_entry_from - iterate over hash chain from given head
466  * @tpos:	the type * to use as a loop cursor.
467  * @pos:	the &struct rhash_head to use as a loop cursor.
468  * @head:	the &struct rhash_head to start from
469  * @tbl:	the &struct bucket_table
470  * @hash:	the hash value / bucket index
471  * @member:	name of the &struct rhash_head within the hashable struct.
472  */
473 #define rht_for_each_entry_from(tpos, pos, head, tbl, hash, member)	\
474 	for (pos = head;						\
475 	     (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);	\
476 	     pos = rht_dereference_bucket((pos)->next, tbl, hash))
477 
478 /**
479  * rht_for_each_entry - iterate over hash chain of given type
480  * @tpos:	the type * to use as a loop cursor.
481  * @pos:	the &struct rhash_head to use as a loop cursor.
482  * @tbl:	the &struct bucket_table
483  * @hash:	the hash value / bucket index
484  * @member:	name of the &struct rhash_head within the hashable struct.
485  */
486 #define rht_for_each_entry(tpos, pos, tbl, hash, member)		\
487 	rht_for_each_entry_from(tpos, pos,				\
488 				rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
489 				tbl, hash, member)
490 
491 /**
492  * rht_for_each_entry_safe - safely iterate over hash chain of given type
493  * @tpos:	the type * to use as a loop cursor.
494  * @pos:	the &struct rhash_head to use as a loop cursor.
495  * @next:	the &struct rhash_head to use as next in loop cursor.
496  * @tbl:	the &struct bucket_table
497  * @hash:	the hash value / bucket index
498  * @member:	name of the &struct rhash_head within the hashable struct.
499  *
500  * This hash chain list-traversal primitive allows for the looped code to
501  * remove the loop cursor from the list.
502  */
503 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member)	      \
504 	for (pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash),		      \
505 	     next = !rht_is_a_nulls(pos) ?				      \
506 		       rht_dereference_bucket(pos->next, tbl, hash) : NULL;   \
507 	     (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);	      \
508 	     pos = next,						      \
509 	     next = !rht_is_a_nulls(pos) ?				      \
510 		       rht_dereference_bucket(pos->next, tbl, hash) : NULL)
511 
512 /**
513  * rht_for_each_rcu_from - iterate over rcu hash chain from given head
514  * @pos:	the &struct rhash_head to use as a loop cursor.
515  * @head:	the &struct rhash_head to start from
516  * @tbl:	the &struct bucket_table
517  * @hash:	the hash value / bucket index
518  *
519  * This hash chain list-traversal primitive may safely run concurrently with
520  * the _rcu mutation primitives such as rhashtable_insert() as long as the
521  * traversal is guarded by rcu_read_lock().
522  */
523 #define rht_for_each_rcu_from(pos, head, tbl, hash)			\
524 	for (({barrier(); }),						\
525 	     pos = head;						\
526 	     !rht_is_a_nulls(pos);					\
527 	     pos = rcu_dereference_all(pos->next))
528 
529 /**
530  * rht_for_each_rcu - iterate over rcu hash chain
531  * @pos:	the &struct rhash_head to use as a loop cursor.
532  * @tbl:	the &struct bucket_table
533  * @hash:	the hash value / bucket index
534  *
535  * This hash chain list-traversal primitive may safely run concurrently with
536  * the _rcu mutation primitives such as rhashtable_insert() as long as the
537  * traversal is guarded by rcu_read_lock().
538  */
539 #define rht_for_each_rcu(pos, tbl, hash)			\
540 	for (({barrier(); }),					\
541 	     pos = rht_ptr_rcu(rht_bucket(tbl, hash));		\
542 	     !rht_is_a_nulls(pos);				\
543 	     pos = rcu_dereference_all(pos->next))
544 
545 /**
546  * rht_for_each_entry_rcu_from - iterated over rcu hash chain from given head
547  * @tpos:	the type * to use as a loop cursor.
548  * @pos:	the &struct rhash_head to use as a loop cursor.
549  * @head:	the &struct rhash_head to start from
550  * @tbl:	the &struct bucket_table
551  * @hash:	the hash value / bucket index
552  * @member:	name of the &struct rhash_head within the hashable struct.
553  *
554  * This hash chain list-traversal primitive may safely run concurrently with
555  * the _rcu mutation primitives such as rhashtable_insert() as long as the
556  * traversal is guarded by rcu_read_lock().
557  */
558 #define rht_for_each_entry_rcu_from(tpos, pos, head, tbl, hash, member) \
559 	for (({barrier(); }),						    \
560 	     pos = head;						    \
561 	     (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);	    \
562 	     pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
563 
564 /**
565  * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
566  * @tpos:	the type * to use as a loop cursor.
567  * @pos:	the &struct rhash_head to use as a loop cursor.
568  * @tbl:	the &struct bucket_table
569  * @hash:	the hash value / bucket index
570  * @member:	name of the &struct rhash_head within the hashable struct.
571  *
572  * This hash chain list-traversal primitive may safely run concurrently with
573  * the _rcu mutation primitives such as rhashtable_insert() as long as the
574  * traversal is guarded by rcu_read_lock().
575  */
576 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member)		   \
577 	rht_for_each_entry_rcu_from(tpos, pos,				   \
578 				    rht_ptr_rcu(rht_bucket(tbl, hash)),	   \
579 				    tbl, hash, member)
580 
581 /**
582  * rhl_for_each_rcu - iterate over rcu hash table list
583  * @pos:	the &struct rlist_head to use as a loop cursor.
584  * @list:	the head of the list
585  *
586  * This hash chain list-traversal primitive should be used on the
587  * list returned by rhltable_lookup.
588  */
589 #define rhl_for_each_rcu(pos, list)					\
590 	for (pos = list; pos; pos = rcu_dereference_all(pos->next))
591 
592 /**
593  * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
594  * @tpos:	the type * to use as a loop cursor.
595  * @pos:	the &struct rlist_head to use as a loop cursor.
596  * @list:	the head of the list
597  * @member:	name of the &struct rlist_head within the hashable struct.
598  *
599  * This hash chain list-traversal primitive should be used on the
600  * list returned by rhltable_lookup.
601  */
602 #define rhl_for_each_entry_rcu(tpos, pos, list, member)			\
603 	for (pos = list; pos && rht_entry(tpos, pos, member);		\
604 	     pos = rcu_dereference_all(pos->next))
605 
606 static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
607 				     const void *obj)
608 {
609 	struct rhashtable *ht = arg->ht;
610 	const char *ptr = obj;
611 
612 	return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
613 }
614 
615 /* Internal function, do not use. */
616 static __always_inline struct rhash_head *__rhashtable_lookup(
617 	struct rhashtable *ht, const void *key,
618 	const struct rhashtable_params params,
619 	const enum rht_lookup_freq freq)
620 	__must_hold_shared(RCU)
621 {
622 	struct rhashtable_compare_arg arg = {
623 		.ht = ht,
624 		.key = key,
625 	};
626 	struct rhash_lock_head __rcu *const *bkt;
627 	struct bucket_table *tbl;
628 	struct rhash_head *he;
629 	unsigned int hash;
630 
631 	BUILD_BUG_ON(!__builtin_constant_p(freq));
632 	tbl = rht_dereference_rcu(ht->tbl, ht);
633 restart:
634 	hash = rht_key_hashfn(ht, tbl, key, params);
635 	bkt = rht_bucket(tbl, hash);
636 	do {
637 		rht_for_each_rcu_from(he, __rht_ptr_rcu(bkt, freq), tbl, hash) {
638 			if (params.obj_cmpfn ?
639 			    params.obj_cmpfn(&arg, rht_obj(ht, he)) :
640 			    rhashtable_compare(&arg, rht_obj(ht, he)))
641 				continue;
642 			return he;
643 		}
644 		/* An object might have been moved to a different hash chain,
645 		 * while we walk along it - better check and retry.
646 		 */
647 	} while (he != RHT_NULLS_MARKER(bkt));
648 
649 	/* Ensure we see any new tables. */
650 	smp_rmb();
651 
652 	tbl = rht_dereference_rcu(tbl->future_tbl, ht);
653 	if (unlikely(tbl))
654 		goto restart;
655 
656 	return NULL;
657 }
658 
659 /**
660  * rhashtable_lookup - search hash table
661  * @ht:		hash table
662  * @key:	the pointer to the key
663  * @params:	hash table parameters
664  *
665  * Computes the hash value for the key and traverses the bucket chain looking
666  * for an entry with an identical key. The first matching entry is returned.
667  *
668  * This must only be called under the RCU read lock.
669  *
670  * Returns the first entry on which the compare function returned true.
671  */
672 static __always_inline void *rhashtable_lookup(
673 	struct rhashtable *ht, const void *key,
674 	const struct rhashtable_params params)
675 	__must_hold_shared(RCU)
676 {
677 	struct rhash_head *he = __rhashtable_lookup(ht, key, params,
678 						    RHT_LOOKUP_NORMAL);
679 
680 	return he ? rht_obj(ht, he) : NULL;
681 }
682 
683 static __always_inline void *rhashtable_lookup_likely(
684 	struct rhashtable *ht, const void *key,
685 	const struct rhashtable_params params)
686 	__must_hold_shared(RCU)
687 {
688 	struct rhash_head *he = __rhashtable_lookup(ht, key, params,
689 						    RHT_LOOKUP_LIKELY);
690 
691 	return likely(he) ? rht_obj(ht, he) : NULL;
692 }
693 
694 /**
695  * rhashtable_lookup_fast - search hash table, without RCU read lock
696  * @ht:		hash table
697  * @key:	the pointer to the key
698  * @params:	hash table parameters
699  *
700  * Computes the hash value for the key and traverses the bucket chain looking
701  * for an entry with an identical key. The first matching entry is returned.
702  *
703  * Only use this function when you have other mechanisms guaranteeing
704  * that the object won't go away after the RCU read lock is released.
705  *
706  * Returns the first entry on which the compare function returned true.
707  */
708 static __always_inline void *rhashtable_lookup_fast(
709 	struct rhashtable *ht, const void *key,
710 	const struct rhashtable_params params)
711 {
712 	void *obj;
713 
714 	rcu_read_lock();
715 	obj = rhashtable_lookup(ht, key, params);
716 	rcu_read_unlock();
717 
718 	return obj;
719 }
720 
721 /**
722  * rhltable_lookup - search hash list table
723  * @hlt:	hash table
724  * @key:	the pointer to the key
725  * @params:	hash table parameters
726  *
727  * Computes the hash value for the key and traverses the bucket chain looking
728  * for an entry with an identical key.  All matching entries are returned
729  * in a list.
730  *
731  * This must only be called under the RCU read lock.
732  *
733  * Returns the list of entries that match the given key.
734  */
735 static __always_inline struct rhlist_head *rhltable_lookup(
736 	struct rhltable *hlt, const void *key,
737 	const struct rhashtable_params params)
738 	__must_hold_shared(RCU)
739 {
740 	struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
741 						    RHT_LOOKUP_NORMAL);
742 
743 	return he ? container_of(he, struct rhlist_head, rhead) : NULL;
744 }
745 
746 static __always_inline struct rhlist_head *rhltable_lookup_likely(
747 	struct rhltable *hlt, const void *key,
748 	const struct rhashtable_params params)
749 	__must_hold_shared(RCU)
750 {
751 	struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
752 						    RHT_LOOKUP_LIKELY);
753 
754 	return likely(he) ? container_of(he, struct rhlist_head, rhead) : NULL;
755 }
756 
757 /* Internal function, please use rhashtable_insert_fast() instead. This
758  * function returns the existing element already in hashes if there is a clash,
759  * otherwise it returns an error via ERR_PTR().
760  */
761 static __always_inline void *__rhashtable_insert_fast(
762 	struct rhashtable *ht, const void *key, struct rhash_head *obj,
763 	const struct rhashtable_params params, bool rhlist)
764 {
765 	struct rhashtable_compare_arg arg = {
766 		.ht = ht,
767 		.key = key,
768 	};
769 	struct rhash_lock_head __rcu **bkt;
770 	struct rhash_head __rcu **pprev;
771 	struct bucket_table *tbl;
772 	struct rhash_head *head;
773 	unsigned long flags;
774 	unsigned int hash;
775 	int elasticity;
776 	void *data;
777 
778 	rcu_read_lock();
779 
780 	tbl = rht_dereference_rcu(ht->tbl, ht);
781 	hash = rht_head_hashfn(ht, tbl, obj, params);
782 	elasticity = RHT_ELASTICITY;
783 	bkt = rht_bucket_insert(ht, tbl, hash);
784 	data = ERR_PTR(-ENOMEM);
785 	if (!bkt)
786 		goto out;
787 	pprev = NULL;
788 	flags = rht_lock(tbl, bkt);
789 
790 	if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
791 slow_path:
792 		rht_unlock(tbl, bkt, flags);
793 		rcu_read_unlock();
794 		return rhashtable_insert_slow(ht, key, obj);
795 	}
796 
797 	rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
798 		struct rhlist_head *plist;
799 		struct rhlist_head *list;
800 
801 		elasticity--;
802 		if (!key ||
803 		    (params.obj_cmpfn ?
804 		     params.obj_cmpfn(&arg, rht_obj(ht, head)) :
805 		     rhashtable_compare(&arg, rht_obj(ht, head)))) {
806 			pprev = &head->next;
807 			continue;
808 		}
809 
810 		data = rht_obj(ht, head);
811 
812 		if (!rhlist)
813 			goto out_unlock;
814 
815 
816 		list = container_of(obj, struct rhlist_head, rhead);
817 		plist = container_of(head, struct rhlist_head, rhead);
818 
819 		RCU_INIT_POINTER(list->next, plist);
820 		head = rht_dereference_bucket(head->next, tbl, hash);
821 		RCU_INIT_POINTER(list->rhead.next, head);
822 		if (pprev) {
823 			rcu_assign_pointer(*pprev, obj);
824 			rht_unlock(tbl, bkt, flags);
825 		} else
826 			rht_assign_unlock(tbl, bkt, obj, flags);
827 		data = NULL;
828 		goto out;
829 	}
830 
831 	if (elasticity <= 0)
832 		goto slow_path;
833 
834 	data = ERR_PTR(-E2BIG);
835 	if (unlikely(rht_grow_above_max(ht, tbl)))
836 		goto out_unlock;
837 
838 	if (unlikely(rht_grow_above_100(ht, tbl)))
839 		goto slow_path;
840 
841 	/* Inserting at head of list makes unlocking free. */
842 	head = rht_ptr(bkt, tbl, hash);
843 
844 	RCU_INIT_POINTER(obj->next, head);
845 	if (rhlist) {
846 		struct rhlist_head *list;
847 
848 		list = container_of(obj, struct rhlist_head, rhead);
849 		RCU_INIT_POINTER(list->next, NULL);
850 	}
851 
852 	atomic_inc(&ht->nelems);
853 	rht_assign_unlock(tbl, bkt, obj, flags);
854 
855 	if (rht_grow_above_75(ht, tbl))
856 		schedule_work(&ht->run_work);
857 
858 	data = NULL;
859 out:
860 	rcu_read_unlock();
861 
862 	return data;
863 
864 out_unlock:
865 	rht_unlock(tbl, bkt, flags);
866 	goto out;
867 }
868 
869 /**
870  * rhashtable_insert_fast - insert object into hash table
871  * @ht:		hash table
872  * @obj:	pointer to hash head inside object
873  * @params:	hash table parameters
874  *
875  * Will take the per bucket bitlock to protect against mutual mutations
876  * on the same bucket. Multiple insertions may occur in parallel unless
877  * they map to the same bucket.
878  *
879  * It is safe to call this function from atomic context.
880  *
881  * Will trigger an automatic deferred table resizing if residency in the
882  * table grows beyond 70%.
883  */
884 static __always_inline int rhashtable_insert_fast(
885 	struct rhashtable *ht, struct rhash_head *obj,
886 	const struct rhashtable_params params)
887 {
888 	void *ret;
889 
890 	ret = __rhashtable_insert_fast(ht, NULL, obj, params, false);
891 	if (IS_ERR(ret))
892 		return PTR_ERR(ret);
893 
894 	return ret == NULL ? 0 : -EEXIST;
895 }
896 
897 /**
898  * rhltable_insert_key - insert object into hash list table
899  * @hlt:	hash list table
900  * @key:	the pointer to the key
901  * @list:	pointer to hash list head inside object
902  * @params:	hash table parameters
903  *
904  * Will take the per bucket bitlock to protect against mutual mutations
905  * on the same bucket. Multiple insertions may occur in parallel unless
906  * they map to the same bucket.
907  *
908  * It is safe to call this function from atomic context.
909  *
910  * Will trigger an automatic deferred table resizing if residency in the
911  * table grows beyond 70%.
912  */
913 static __always_inline int rhltable_insert_key(
914 	struct rhltable *hlt, const void *key, struct rhlist_head *list,
915 	const struct rhashtable_params params)
916 {
917 	return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
918 						params, true));
919 }
920 
921 /**
922  * rhltable_insert - insert object into hash list table
923  * @hlt:	hash list table
924  * @list:	pointer to hash list head inside object
925  * @params:	hash table parameters
926  *
927  * Will take the per bucket bitlock to protect against mutual mutations
928  * on the same bucket. Multiple insertions may occur in parallel unless
929  * they map to the same bucket.
930  *
931  * It is safe to call this function from atomic context.
932  *
933  * Will trigger an automatic deferred table resizing if residency in the
934  * table grows beyond 70%.
935  */
936 static __always_inline int rhltable_insert(
937 	struct rhltable *hlt, struct rhlist_head *list,
938 	const struct rhashtable_params params)
939 {
940 	const char *key = rht_obj(&hlt->ht, &list->rhead);
941 
942 	key += params.key_offset;
943 
944 	return rhltable_insert_key(hlt, key, list, params);
945 }
946 
947 /**
948  * rhashtable_lookup_insert_fast - lookup and insert object into hash table
949  * @ht:		hash table
950  * @obj:	pointer to hash head inside object
951  * @params:	hash table parameters
952  *
953  * This lookup function may only be used for fixed key hash table (key_len
954  * parameter set). It will BUG() if used inappropriately.
955  *
956  * It is safe to call this function from atomic context.
957  *
958  * Will trigger an automatic deferred table resizing if residency in the
959  * table grows beyond 70%.
960  */
961 static __always_inline int rhashtable_lookup_insert_fast(
962 	struct rhashtable *ht, struct rhash_head *obj,
963 	const struct rhashtable_params params)
964 {
965 	const char *key = rht_obj(ht, obj);
966 	void *ret;
967 
968 	BUG_ON(ht->p.obj_hashfn);
969 
970 	ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
971 				       false);
972 	if (IS_ERR(ret))
973 		return PTR_ERR(ret);
974 
975 	return ret == NULL ? 0 : -EEXIST;
976 }
977 
978 /**
979  * rhashtable_lookup_get_insert_fast - lookup and insert object into hash table
980  * @ht:		hash table
981  * @obj:	pointer to hash head inside object
982  * @params:	hash table parameters
983  *
984  * Just like rhashtable_lookup_insert_fast(), but this function returns the
985  * object if it exists, NULL if it did not and the insertion was successful,
986  * and an ERR_PTR otherwise.
987  */
988 static __always_inline void *rhashtable_lookup_get_insert_fast(
989 	struct rhashtable *ht, struct rhash_head *obj,
990 	const struct rhashtable_params params)
991 {
992 	const char *key = rht_obj(ht, obj);
993 
994 	BUG_ON(ht->p.obj_hashfn);
995 
996 	return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
997 					false);
998 }
999 
1000 /**
1001  * rhashtable_lookup_insert_key - search and insert object to hash table
1002  *				  with explicit key
1003  * @ht:		hash table
1004  * @key:	key
1005  * @obj:	pointer to hash head inside object
1006  * @params:	hash table parameters
1007  *
1008  * Lookups may occur in parallel with hashtable mutations and resizing.
1009  *
1010  * Will trigger an automatic deferred table resizing if residency in the
1011  * table grows beyond 70%.
1012  *
1013  * Returns zero on success.
1014  */
1015 static __always_inline int rhashtable_lookup_insert_key(
1016 	struct rhashtable *ht, const void *key, struct rhash_head *obj,
1017 	const struct rhashtable_params params)
1018 {
1019 	void *ret;
1020 
1021 	BUG_ON(!ht->p.obj_hashfn || !key);
1022 
1023 	ret = __rhashtable_insert_fast(ht, key, obj, params, false);
1024 	if (IS_ERR(ret))
1025 		return PTR_ERR(ret);
1026 
1027 	return ret == NULL ? 0 : -EEXIST;
1028 }
1029 
1030 /**
1031  * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
1032  * @ht:		hash table
1033  * @key:	key
1034  * @obj:	pointer to hash head inside object
1035  * @params:	hash table parameters
1036  *
1037  * Just like rhashtable_lookup_insert_key(), but this function returns the
1038  * object if it exists, NULL if it does not and the insertion was successful,
1039  * and an ERR_PTR otherwise.
1040  */
1041 static __always_inline void *rhashtable_lookup_get_insert_key(
1042 	struct rhashtable *ht, const void *key, struct rhash_head *obj,
1043 	const struct rhashtable_params params)
1044 {
1045 	BUG_ON(!ht->p.obj_hashfn || !key);
1046 
1047 	return __rhashtable_insert_fast(ht, key, obj, params, false);
1048 }
1049 
1050 /* Internal function, please use rhashtable_remove_fast() instead */
1051 static __always_inline int __rhashtable_remove_fast_one(
1052 	struct rhashtable *ht, struct bucket_table *tbl,
1053 	struct rhash_head *obj, const struct rhashtable_params params,
1054 	bool rhlist)
1055 {
1056 	struct rhash_lock_head __rcu **bkt;
1057 	struct rhash_head __rcu **pprev;
1058 	struct rhash_head *he;
1059 	unsigned long flags;
1060 	unsigned int hash;
1061 	int err = -ENOENT;
1062 
1063 	hash = rht_head_hashfn(ht, tbl, obj, params);
1064 	bkt = rht_bucket_var(tbl, hash);
1065 	if (!bkt)
1066 		return -ENOENT;
1067 	pprev = NULL;
1068 	flags = rht_lock(tbl, bkt);
1069 
1070 	rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
1071 		struct rhlist_head *list;
1072 
1073 		list = container_of(he, struct rhlist_head, rhead);
1074 
1075 		if (he != obj) {
1076 			struct rhlist_head __rcu **lpprev;
1077 
1078 			pprev = &he->next;
1079 
1080 			if (!rhlist)
1081 				continue;
1082 
1083 			do {
1084 				lpprev = &list->next;
1085 				list = rht_dereference_bucket(list->next,
1086 							      tbl, hash);
1087 			} while (list && obj != &list->rhead);
1088 
1089 			if (!list)
1090 				continue;
1091 
1092 			list = rht_dereference_bucket(list->next, tbl, hash);
1093 			RCU_INIT_POINTER(*lpprev, list);
1094 			err = 0;
1095 			break;
1096 		}
1097 
1098 		obj = rht_dereference_bucket(obj->next, tbl, hash);
1099 		err = 1;
1100 
1101 		if (rhlist) {
1102 			list = rht_dereference_bucket(list->next, tbl, hash);
1103 			if (list) {
1104 				RCU_INIT_POINTER(list->rhead.next, obj);
1105 				obj = &list->rhead;
1106 				err = 0;
1107 			}
1108 		}
1109 
1110 		if (pprev) {
1111 			rcu_assign_pointer(*pprev, obj);
1112 			rht_unlock(tbl, bkt, flags);
1113 		} else {
1114 			rht_assign_unlock(tbl, bkt, obj, flags);
1115 		}
1116 		goto unlocked;
1117 	}
1118 
1119 	rht_unlock(tbl, bkt, flags);
1120 unlocked:
1121 	if (err > 0) {
1122 		atomic_dec(&ht->nelems);
1123 		if (unlikely(ht->p.automatic_shrinking &&
1124 			     rht_shrink_below_30(ht, tbl)))
1125 			schedule_work(&ht->run_work);
1126 		err = 0;
1127 	}
1128 
1129 	return err;
1130 }
1131 
1132 /* Internal function, please use rhashtable_remove_fast() instead */
1133 static __always_inline int __rhashtable_remove_fast(
1134 	struct rhashtable *ht, struct rhash_head *obj,
1135 	const struct rhashtable_params params, bool rhlist)
1136 {
1137 	struct bucket_table *tbl;
1138 	int err;
1139 
1140 	rcu_read_lock();
1141 
1142 	tbl = rht_dereference_rcu(ht->tbl, ht);
1143 
1144 	/* Because we have already taken (and released) the bucket
1145 	 * lock in old_tbl, if we find that future_tbl is not yet
1146 	 * visible then that guarantees the entry to still be in
1147 	 * the old tbl if it exists.
1148 	 */
1149 	while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params,
1150 						   rhlist)) &&
1151 	       (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
1152 		;
1153 
1154 	rcu_read_unlock();
1155 
1156 	return err;
1157 }
1158 
1159 /**
1160  * rhashtable_remove_fast - remove object from hash table
1161  * @ht:		hash table
1162  * @obj:	pointer to hash head inside object
1163  * @params:	hash table parameters
1164  *
1165  * Since the hash chain is single linked, the removal operation needs to
1166  * walk the bucket chain upon removal. The removal operation is thus
1167  * considerable slow if the hash table is not correctly sized.
1168  *
1169  * Will automatically shrink the table if permitted when residency drops
1170  * below 30%.
1171  *
1172  * Returns zero on success, -ENOENT if the entry could not be found.
1173  */
1174 static __always_inline int rhashtable_remove_fast(
1175 	struct rhashtable *ht, struct rhash_head *obj,
1176 	const struct rhashtable_params params)
1177 {
1178 	return __rhashtable_remove_fast(ht, obj, params, false);
1179 }
1180 
1181 /**
1182  * rhltable_remove - remove object from hash list table
1183  * @hlt:	hash list table
1184  * @list:	pointer to hash list head inside object
1185  * @params:	hash table parameters
1186  *
1187  * Since the hash chain is single linked, the removal operation needs to
1188  * walk the bucket chain upon removal. The removal operation is thus
1189  * considerably slower if the hash table is not correctly sized.
1190  *
1191  * Will automatically shrink the table if permitted when residency drops
1192  * below 30%
1193  *
1194  * Returns zero on success, -ENOENT if the entry could not be found.
1195  */
1196 static __always_inline int rhltable_remove(
1197 	struct rhltable *hlt, struct rhlist_head *list,
1198 	const struct rhashtable_params params)
1199 {
1200 	return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true);
1201 }
1202 
1203 /* Internal function, please use rhashtable_replace_fast() instead */
1204 static __always_inline int __rhashtable_replace_fast(
1205 	struct rhashtable *ht, struct bucket_table *tbl,
1206 	struct rhash_head *obj_old, struct rhash_head *obj_new,
1207 	const struct rhashtable_params params)
1208 {
1209 	struct rhash_lock_head __rcu **bkt;
1210 	struct rhash_head __rcu **pprev;
1211 	struct rhash_head *he;
1212 	unsigned long flags;
1213 	unsigned int hash;
1214 	int err = -ENOENT;
1215 
1216 	/* Minimally, the old and new objects must have same hash
1217 	 * (which should mean identifiers are the same).
1218 	 */
1219 	hash = rht_head_hashfn(ht, tbl, obj_old, params);
1220 	if (hash != rht_head_hashfn(ht, tbl, obj_new, params))
1221 		return -EINVAL;
1222 
1223 	bkt = rht_bucket_var(tbl, hash);
1224 	if (!bkt)
1225 		return -ENOENT;
1226 
1227 	pprev = NULL;
1228 	flags = rht_lock(tbl, bkt);
1229 
1230 	rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
1231 		if (he != obj_old) {
1232 			pprev = &he->next;
1233 			continue;
1234 		}
1235 
1236 		rcu_assign_pointer(obj_new->next, obj_old->next);
1237 		if (pprev) {
1238 			rcu_assign_pointer(*pprev, obj_new);
1239 			rht_unlock(tbl, bkt, flags);
1240 		} else {
1241 			rht_assign_unlock(tbl, bkt, obj_new, flags);
1242 		}
1243 		err = 0;
1244 		goto unlocked;
1245 	}
1246 
1247 	rht_unlock(tbl, bkt, flags);
1248 
1249 unlocked:
1250 	return err;
1251 }
1252 
1253 /**
1254  * rhashtable_replace_fast - replace an object in hash table
1255  * @ht:		hash table
1256  * @obj_old:	pointer to hash head inside object being replaced
1257  * @obj_new:	pointer to hash head inside object which is new
1258  * @params:	hash table parameters
1259  *
1260  * Replacing an object doesn't affect the number of elements in the hash table
1261  * or bucket, so we don't need to worry about shrinking or expanding the
1262  * table here.
1263  *
1264  * Returns zero on success, -ENOENT if the entry could not be found,
1265  * -EINVAL if hash is not the same for the old and new objects.
1266  */
1267 static __always_inline int rhashtable_replace_fast(
1268 	struct rhashtable *ht, struct rhash_head *obj_old,
1269 	struct rhash_head *obj_new,
1270 	const struct rhashtable_params params)
1271 {
1272 	struct bucket_table *tbl;
1273 	int err;
1274 
1275 	rcu_read_lock();
1276 
1277 	tbl = rht_dereference_rcu(ht->tbl, ht);
1278 
1279 	/* Because we have already taken (and released) the bucket
1280 	 * lock in old_tbl, if we find that future_tbl is not yet
1281 	 * visible then that guarantees the entry to still be in
1282 	 * the old tbl if it exists.
1283 	 */
1284 	while ((err = __rhashtable_replace_fast(ht, tbl, obj_old,
1285 						obj_new, params)) &&
1286 	       (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
1287 		;
1288 
1289 	rcu_read_unlock();
1290 
1291 	return err;
1292 }
1293 
1294 /**
1295  * rhltable_walk_enter - Initialise an iterator
1296  * @hlt:	Table to walk over
1297  * @iter:	Hash table Iterator
1298  *
1299  * This function prepares a hash table walk.
1300  *
1301  * Note that if you restart a walk after rhashtable_walk_stop you
1302  * may see the same object twice.  Also, you may miss objects if
1303  * there are removals in between rhashtable_walk_stop and the next
1304  * call to rhashtable_walk_start.
1305  *
1306  * For a completely stable walk you should construct your own data
1307  * structure outside the hash table.
1308  *
1309  * This function may be called from any process context, including
1310  * non-preemptable context, but cannot be called from softirq or
1311  * hardirq context.
1312  *
1313  * You must call rhashtable_walk_exit after this function returns.
1314  */
1315 static inline void rhltable_walk_enter(struct rhltable *hlt,
1316 				       struct rhashtable_iter *iter)
1317 {
1318 	rhashtable_walk_enter(&hlt->ht, iter);
1319 }
1320 
1321 /**
1322  * rhltable_free_and_destroy - free elements and destroy hash list table
1323  * @hlt:	the hash list table to destroy
1324  * @free_fn:	callback to release resources of element
1325  * @arg:	pointer passed to free_fn
1326  *
1327  * See documentation for rhashtable_free_and_destroy.
1328  */
1329 static inline void rhltable_free_and_destroy(struct rhltable *hlt,
1330 					     void (*free_fn)(void *ptr,
1331 							     void *arg),
1332 					     void *arg)
1333 {
1334 	rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
1335 }
1336 
1337 static inline void rhltable_destroy(struct rhltable *hlt)
1338 {
1339 	rhltable_free_and_destroy(hlt, NULL, NULL);
1340 }
1341 
1342 #endif /* _LINUX_RHASHTABLE_H */
1343