xref: /linux/include/linux/rhashtable.h (revision b9b77222d4ff6b5bb8f5d87fca20de0910618bb9)
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au>
5  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7  *
8  * Code partially derived from nft_hash
9  * Rewritten with rehash code from br_multicast plus single list
10  * pointer as suggested by Josh Triplett
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16 
17 #ifndef _LINUX_RHASHTABLE_H
18 #define _LINUX_RHASHTABLE_H
19 
20 #include <linux/atomic.h>
21 #include <linux/compiler.h>
22 #include <linux/err.h>
23 #include <linux/errno.h>
24 #include <linux/jhash.h>
25 #include <linux/list_nulls.h>
26 #include <linux/workqueue.h>
27 #include <linux/mutex.h>
28 #include <linux/rculist.h>
29 
30 /*
31  * The end of the chain is marked with a special nulls marks which has
32  * the following format:
33  *
34  * +-------+-----------------------------------------------------+-+
35  * | Base  |                      Hash                           |1|
36  * +-------+-----------------------------------------------------+-+
37  *
38  * Base (4 bits) : Reserved to distinguish between multiple tables.
39  *                 Specified via &struct rhashtable_params.nulls_base.
40  * Hash (27 bits): Full hash (unmasked) of first element added to bucket
41  * 1 (1 bit)     : Nulls marker (always set)
42  *
43  * The remaining bits of the next pointer remain unused for now.
44  */
45 #define RHT_BASE_BITS		4
46 #define RHT_HASH_BITS		27
47 #define RHT_BASE_SHIFT		RHT_HASH_BITS
48 
49 /* Base bits plus 1 bit for nulls marker */
50 #define RHT_HASH_RESERVED_SPACE	(RHT_BASE_BITS + 1)
51 
52 /* Maximum chain length before rehash
53  *
54  * The maximum (not average) chain length grows with the size of the hash
55  * table, at a rate of (log N)/(log log N).
56  *
57  * The value of 16 is selected so that even if the hash table grew to
58  * 2^32 you would not expect the maximum chain length to exceed it
59  * unless we are under attack (or extremely unlucky).
60  *
61  * As this limit is only to detect attacks, we don't need to set it to a
62  * lower value as you'd need the chain length to vastly exceed 16 to have
63  * any real effect on the system.
64  */
65 #define RHT_ELASTICITY	16u
66 
67 struct rhash_head {
68 	struct rhash_head __rcu		*next;
69 };
70 
71 struct rhlist_head {
72 	struct rhash_head		rhead;
73 	struct rhlist_head __rcu	*next;
74 };
75 
76 /**
77  * struct bucket_table - Table of hash buckets
78  * @size: Number of hash buckets
79  * @nest: Number of bits of first-level nested table.
80  * @rehash: Current bucket being rehashed
81  * @hash_rnd: Random seed to fold into hash
82  * @locks_mask: Mask to apply before accessing locks[]
83  * @locks: Array of spinlocks protecting individual buckets
84  * @walkers: List of active walkers
85  * @rcu: RCU structure for freeing the table
86  * @future_tbl: Table under construction during rehashing
87  * @ntbl: Nested table used when out of memory.
88  * @buckets: size * hash buckets
89  */
90 struct bucket_table {
91 	unsigned int		size;
92 	unsigned int		nest;
93 	unsigned int		rehash;
94 	u32			hash_rnd;
95 	unsigned int		locks_mask;
96 	spinlock_t		*locks;
97 	struct list_head	walkers;
98 	struct rcu_head		rcu;
99 
100 	struct bucket_table __rcu *future_tbl;
101 
102 	struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
103 };
104 
105 /**
106  * struct rhashtable_compare_arg - Key for the function rhashtable_compare
107  * @ht: Hash table
108  * @key: Key to compare against
109  */
110 struct rhashtable_compare_arg {
111 	struct rhashtable *ht;
112 	const void *key;
113 };
114 
115 typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
116 typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
117 typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
118 			       const void *obj);
119 
120 struct rhashtable;
121 
122 /**
123  * struct rhashtable_params - Hash table construction parameters
124  * @nelem_hint: Hint on number of elements, should be 75% of desired size
125  * @key_len: Length of key
126  * @key_offset: Offset of key in struct to be hashed
127  * @head_offset: Offset of rhash_head in struct to be hashed
128  * @max_size: Maximum size while expanding
129  * @min_size: Minimum size while shrinking
130  * @locks_mul: Number of bucket locks to allocate per cpu (default: 32)
131  * @automatic_shrinking: Enable automatic shrinking of tables
132  * @nulls_base: Base value to generate nulls marker
133  * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
134  * @obj_hashfn: Function to hash object
135  * @obj_cmpfn: Function to compare key with object
136  */
137 struct rhashtable_params {
138 	u16			nelem_hint;
139 	u16			key_len;
140 	u16			key_offset;
141 	u16			head_offset;
142 	unsigned int		max_size;
143 	u16			min_size;
144 	bool			automatic_shrinking;
145 	u8			locks_mul;
146 	u32			nulls_base;
147 	rht_hashfn_t		hashfn;
148 	rht_obj_hashfn_t	obj_hashfn;
149 	rht_obj_cmpfn_t		obj_cmpfn;
150 };
151 
152 /**
153  * struct rhashtable - Hash table handle
154  * @tbl: Bucket table
155  * @key_len: Key length for hashfn
156  * @max_elems: Maximum number of elements in table
157  * @p: Configuration parameters
158  * @rhlist: True if this is an rhltable
159  * @run_work: Deferred worker to expand/shrink asynchronously
160  * @mutex: Mutex to protect current/future table swapping
161  * @lock: Spin lock to protect walker list
162  * @nelems: Number of elements in table
163  */
164 struct rhashtable {
165 	struct bucket_table __rcu	*tbl;
166 	unsigned int			key_len;
167 	unsigned int			max_elems;
168 	struct rhashtable_params	p;
169 	bool				rhlist;
170 	struct work_struct		run_work;
171 	struct mutex                    mutex;
172 	spinlock_t			lock;
173 	atomic_t			nelems;
174 };
175 
176 /**
177  * struct rhltable - Hash table with duplicate objects in a list
178  * @ht: Underlying rhtable
179  */
180 struct rhltable {
181 	struct rhashtable ht;
182 };
183 
184 /**
185  * struct rhashtable_walker - Hash table walker
186  * @list: List entry on list of walkers
187  * @tbl: The table that we were walking over
188  */
189 struct rhashtable_walker {
190 	struct list_head list;
191 	struct bucket_table *tbl;
192 };
193 
194 /**
195  * struct rhashtable_iter - Hash table iterator
196  * @ht: Table to iterate through
197  * @p: Current pointer
198  * @list: Current hash list pointer
199  * @walker: Associated rhashtable walker
200  * @slot: Current slot
201  * @skip: Number of entries to skip in slot
202  */
203 struct rhashtable_iter {
204 	struct rhashtable *ht;
205 	struct rhash_head *p;
206 	struct rhlist_head *list;
207 	struct rhashtable_walker walker;
208 	unsigned int slot;
209 	unsigned int skip;
210 	bool end_of_table;
211 };
212 
213 static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
214 {
215 	return NULLS_MARKER(ht->p.nulls_base + hash);
216 }
217 
218 #define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
219 	((ptr) = (typeof(ptr)) rht_marker(ht, hash))
220 
221 static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
222 {
223 	return ((unsigned long) ptr & 1);
224 }
225 
226 static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
227 {
228 	return ((unsigned long) ptr) >> 1;
229 }
230 
231 static inline void *rht_obj(const struct rhashtable *ht,
232 			    const struct rhash_head *he)
233 {
234 	return (char *)he - ht->p.head_offset;
235 }
236 
237 static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
238 					    unsigned int hash)
239 {
240 	return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
241 }
242 
243 static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
244 	const void *key, const struct rhashtable_params params,
245 	unsigned int hash_rnd)
246 {
247 	unsigned int hash;
248 
249 	/* params must be equal to ht->p if it isn't constant. */
250 	if (!__builtin_constant_p(params.key_len))
251 		hash = ht->p.hashfn(key, ht->key_len, hash_rnd);
252 	else if (params.key_len) {
253 		unsigned int key_len = params.key_len;
254 
255 		if (params.hashfn)
256 			hash = params.hashfn(key, key_len, hash_rnd);
257 		else if (key_len & (sizeof(u32) - 1))
258 			hash = jhash(key, key_len, hash_rnd);
259 		else
260 			hash = jhash2(key, key_len / sizeof(u32), hash_rnd);
261 	} else {
262 		unsigned int key_len = ht->p.key_len;
263 
264 		if (params.hashfn)
265 			hash = params.hashfn(key, key_len, hash_rnd);
266 		else
267 			hash = jhash(key, key_len, hash_rnd);
268 	}
269 
270 	return hash;
271 }
272 
273 static inline unsigned int rht_key_hashfn(
274 	struct rhashtable *ht, const struct bucket_table *tbl,
275 	const void *key, const struct rhashtable_params params)
276 {
277 	unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd);
278 
279 	return rht_bucket_index(tbl, hash);
280 }
281 
282 static inline unsigned int rht_head_hashfn(
283 	struct rhashtable *ht, const struct bucket_table *tbl,
284 	const struct rhash_head *he, const struct rhashtable_params params)
285 {
286 	const char *ptr = rht_obj(ht, he);
287 
288 	return likely(params.obj_hashfn) ?
289 	       rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
290 							    ht->p.key_len,
291 						       tbl->hash_rnd)) :
292 	       rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
293 }
294 
295 /**
296  * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
297  * @ht:		hash table
298  * @tbl:	current table
299  */
300 static inline bool rht_grow_above_75(const struct rhashtable *ht,
301 				     const struct bucket_table *tbl)
302 {
303 	/* Expand table when exceeding 75% load */
304 	return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
305 	       (!ht->p.max_size || tbl->size < ht->p.max_size);
306 }
307 
308 /**
309  * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
310  * @ht:		hash table
311  * @tbl:	current table
312  */
313 static inline bool rht_shrink_below_30(const struct rhashtable *ht,
314 				       const struct bucket_table *tbl)
315 {
316 	/* Shrink table beneath 30% load */
317 	return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
318 	       tbl->size > ht->p.min_size;
319 }
320 
321 /**
322  * rht_grow_above_100 - returns true if nelems > table-size
323  * @ht:		hash table
324  * @tbl:	current table
325  */
326 static inline bool rht_grow_above_100(const struct rhashtable *ht,
327 				      const struct bucket_table *tbl)
328 {
329 	return atomic_read(&ht->nelems) > tbl->size &&
330 		(!ht->p.max_size || tbl->size < ht->p.max_size);
331 }
332 
333 /**
334  * rht_grow_above_max - returns true if table is above maximum
335  * @ht:		hash table
336  * @tbl:	current table
337  */
338 static inline bool rht_grow_above_max(const struct rhashtable *ht,
339 				      const struct bucket_table *tbl)
340 {
341 	return atomic_read(&ht->nelems) >= ht->max_elems;
342 }
343 
344 /* The bucket lock is selected based on the hash and protects mutations
345  * on a group of hash buckets.
346  *
347  * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
348  * a single lock always covers both buckets which may both contains
349  * entries which link to the same bucket of the old table during resizing.
350  * This allows to simplify the locking as locking the bucket in both
351  * tables during resize always guarantee protection.
352  *
353  * IMPORTANT: When holding the bucket lock of both the old and new table
354  * during expansions and shrinking, the old bucket lock must always be
355  * acquired first.
356  */
357 static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
358 					  unsigned int hash)
359 {
360 	return &tbl->locks[hash & tbl->locks_mask];
361 }
362 
363 #ifdef CONFIG_PROVE_LOCKING
364 int lockdep_rht_mutex_is_held(struct rhashtable *ht);
365 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
366 #else
367 static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
368 {
369 	return 1;
370 }
371 
372 static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
373 					     u32 hash)
374 {
375 	return 1;
376 }
377 #endif /* CONFIG_PROVE_LOCKING */
378 
379 int rhashtable_init(struct rhashtable *ht,
380 		    const struct rhashtable_params *params);
381 int rhltable_init(struct rhltable *hlt,
382 		  const struct rhashtable_params *params);
383 
384 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
385 			     struct rhash_head *obj);
386 
387 void rhashtable_walk_enter(struct rhashtable *ht,
388 			   struct rhashtable_iter *iter);
389 void rhashtable_walk_exit(struct rhashtable_iter *iter);
390 int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);
391 
392 static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
393 {
394 	(void)rhashtable_walk_start_check(iter);
395 }
396 
397 void *rhashtable_walk_next(struct rhashtable_iter *iter);
398 void *rhashtable_walk_peek(struct rhashtable_iter *iter);
399 void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
400 
401 void rhashtable_free_and_destroy(struct rhashtable *ht,
402 				 void (*free_fn)(void *ptr, void *arg),
403 				 void *arg);
404 void rhashtable_destroy(struct rhashtable *ht);
405 
406 struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
407 					    unsigned int hash);
408 struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
409 						   struct bucket_table *tbl,
410 						   unsigned int hash);
411 
412 #define rht_dereference(p, ht) \
413 	rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
414 
415 #define rht_dereference_rcu(p, ht) \
416 	rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
417 
418 #define rht_dereference_bucket(p, tbl, hash) \
419 	rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
420 
421 #define rht_dereference_bucket_rcu(p, tbl, hash) \
422 	rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
423 
424 #define rht_entry(tpos, pos, member) \
425 	({ tpos = container_of(pos, typeof(*tpos), member); 1; })
426 
427 static inline struct rhash_head __rcu *const *rht_bucket(
428 	const struct bucket_table *tbl, unsigned int hash)
429 {
430 	return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
431 				     &tbl->buckets[hash];
432 }
433 
434 static inline struct rhash_head __rcu **rht_bucket_var(
435 	struct bucket_table *tbl, unsigned int hash)
436 {
437 	return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
438 				     &tbl->buckets[hash];
439 }
440 
441 static inline struct rhash_head __rcu **rht_bucket_insert(
442 	struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
443 {
444 	return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
445 				     &tbl->buckets[hash];
446 }
447 
448 /**
449  * rht_for_each_continue - continue iterating over hash chain
450  * @pos:	the &struct rhash_head to use as a loop cursor.
451  * @head:	the previous &struct rhash_head to continue from
452  * @tbl:	the &struct bucket_table
453  * @hash:	the hash value / bucket index
454  */
455 #define rht_for_each_continue(pos, head, tbl, hash) \
456 	for (pos = rht_dereference_bucket(head, tbl, hash); \
457 	     !rht_is_a_nulls(pos); \
458 	     pos = rht_dereference_bucket((pos)->next, tbl, hash))
459 
460 /**
461  * rht_for_each - iterate over hash chain
462  * @pos:	the &struct rhash_head to use as a loop cursor.
463  * @tbl:	the &struct bucket_table
464  * @hash:	the hash value / bucket index
465  */
466 #define rht_for_each(pos, tbl, hash) \
467 	rht_for_each_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
468 
469 /**
470  * rht_for_each_entry_continue - continue iterating over hash chain
471  * @tpos:	the type * to use as a loop cursor.
472  * @pos:	the &struct rhash_head to use as a loop cursor.
473  * @head:	the previous &struct rhash_head to continue from
474  * @tbl:	the &struct bucket_table
475  * @hash:	the hash value / bucket index
476  * @member:	name of the &struct rhash_head within the hashable struct.
477  */
478 #define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member)	\
479 	for (pos = rht_dereference_bucket(head, tbl, hash);		\
480 	     (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);	\
481 	     pos = rht_dereference_bucket((pos)->next, tbl, hash))
482 
483 /**
484  * rht_for_each_entry - iterate over hash chain of given type
485  * @tpos:	the type * to use as a loop cursor.
486  * @pos:	the &struct rhash_head to use as a loop cursor.
487  * @tbl:	the &struct bucket_table
488  * @hash:	the hash value / bucket index
489  * @member:	name of the &struct rhash_head within the hashable struct.
490  */
491 #define rht_for_each_entry(tpos, pos, tbl, hash, member)		\
492 	rht_for_each_entry_continue(tpos, pos, *rht_bucket(tbl, hash),	\
493 				    tbl, hash, member)
494 
495 /**
496  * rht_for_each_entry_safe - safely iterate over hash chain of given type
497  * @tpos:	the type * to use as a loop cursor.
498  * @pos:	the &struct rhash_head to use as a loop cursor.
499  * @next:	the &struct rhash_head to use as next in loop cursor.
500  * @tbl:	the &struct bucket_table
501  * @hash:	the hash value / bucket index
502  * @member:	name of the &struct rhash_head within the hashable struct.
503  *
504  * This hash chain list-traversal primitive allows for the looped code to
505  * remove the loop cursor from the list.
506  */
507 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member)	      \
508 	for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \
509 	     next = !rht_is_a_nulls(pos) ?				      \
510 		       rht_dereference_bucket(pos->next, tbl, hash) : NULL;   \
511 	     (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);	      \
512 	     pos = next,						      \
513 	     next = !rht_is_a_nulls(pos) ?				      \
514 		       rht_dereference_bucket(pos->next, tbl, hash) : NULL)
515 
516 /**
517  * rht_for_each_rcu_continue - continue iterating over rcu hash chain
518  * @pos:	the &struct rhash_head to use as a loop cursor.
519  * @head:	the previous &struct rhash_head to continue from
520  * @tbl:	the &struct bucket_table
521  * @hash:	the hash value / bucket index
522  *
523  * This hash chain list-traversal primitive may safely run concurrently with
524  * the _rcu mutation primitives such as rhashtable_insert() as long as the
525  * traversal is guarded by rcu_read_lock().
526  */
527 #define rht_for_each_rcu_continue(pos, head, tbl, hash)			\
528 	for (({barrier(); }),						\
529 	     pos = rht_dereference_bucket_rcu(head, tbl, hash);		\
530 	     !rht_is_a_nulls(pos);					\
531 	     pos = rcu_dereference_raw(pos->next))
532 
533 /**
534  * rht_for_each_rcu - iterate over rcu hash chain
535  * @pos:	the &struct rhash_head to use as a loop cursor.
536  * @tbl:	the &struct bucket_table
537  * @hash:	the hash value / bucket index
538  *
539  * This hash chain list-traversal primitive may safely run concurrently with
540  * the _rcu mutation primitives such as rhashtable_insert() as long as the
541  * traversal is guarded by rcu_read_lock().
542  */
543 #define rht_for_each_rcu(pos, tbl, hash)				\
544 	rht_for_each_rcu_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
545 
546 /**
547  * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
548  * @tpos:	the type * to use as a loop cursor.
549  * @pos:	the &struct rhash_head to use as a loop cursor.
550  * @head:	the previous &struct rhash_head to continue from
551  * @tbl:	the &struct bucket_table
552  * @hash:	the hash value / bucket index
553  * @member:	name of the &struct rhash_head within the hashable struct.
554  *
555  * This hash chain list-traversal primitive may safely run concurrently with
556  * the _rcu mutation primitives such as rhashtable_insert() as long as the
557  * traversal is guarded by rcu_read_lock().
558  */
559 #define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
560 	for (({barrier(); }),						    \
561 	     pos = rht_dereference_bucket_rcu(head, tbl, hash);		    \
562 	     (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);	    \
563 	     pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
564 
565 /**
566  * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
567  * @tpos:	the type * to use as a loop cursor.
568  * @pos:	the &struct rhash_head to use as a loop cursor.
569  * @tbl:	the &struct bucket_table
570  * @hash:	the hash value / bucket index
571  * @member:	name of the &struct rhash_head within the hashable struct.
572  *
573  * This hash chain list-traversal primitive may safely run concurrently with
574  * the _rcu mutation primitives such as rhashtable_insert() as long as the
575  * traversal is guarded by rcu_read_lock().
576  */
577 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member)		   \
578 	rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket(tbl, hash), \
579 					tbl, hash, member)
580 
581 /**
582  * rhl_for_each_rcu - iterate over rcu hash table list
583  * @pos:	the &struct rlist_head to use as a loop cursor.
584  * @list:	the head of the list
585  *
586  * This hash chain list-traversal primitive should be used on the
587  * list returned by rhltable_lookup.
588  */
589 #define rhl_for_each_rcu(pos, list)					\
590 	for (pos = list; pos; pos = rcu_dereference_raw(pos->next))
591 
592 /**
593  * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
594  * @tpos:	the type * to use as a loop cursor.
595  * @pos:	the &struct rlist_head to use as a loop cursor.
596  * @list:	the head of the list
597  * @member:	name of the &struct rlist_head within the hashable struct.
598  *
599  * This hash chain list-traversal primitive should be used on the
600  * list returned by rhltable_lookup.
601  */
602 #define rhl_for_each_entry_rcu(tpos, pos, list, member)			\
603 	for (pos = list; pos && rht_entry(tpos, pos, member);		\
604 	     pos = rcu_dereference_raw(pos->next))
605 
606 static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
607 				     const void *obj)
608 {
609 	struct rhashtable *ht = arg->ht;
610 	const char *ptr = obj;
611 
612 	return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
613 }
614 
615 /* Internal function, do not use. */
616 static inline struct rhash_head *__rhashtable_lookup(
617 	struct rhashtable *ht, const void *key,
618 	const struct rhashtable_params params)
619 {
620 	struct rhashtable_compare_arg arg = {
621 		.ht = ht,
622 		.key = key,
623 	};
624 	struct bucket_table *tbl;
625 	struct rhash_head *he;
626 	unsigned int hash;
627 
628 	tbl = rht_dereference_rcu(ht->tbl, ht);
629 restart:
630 	hash = rht_key_hashfn(ht, tbl, key, params);
631 	rht_for_each_rcu(he, tbl, hash) {
632 		if (params.obj_cmpfn ?
633 		    params.obj_cmpfn(&arg, rht_obj(ht, he)) :
634 		    rhashtable_compare(&arg, rht_obj(ht, he)))
635 			continue;
636 		return he;
637 	}
638 
639 	/* Ensure we see any new tables. */
640 	smp_rmb();
641 
642 	tbl = rht_dereference_rcu(tbl->future_tbl, ht);
643 	if (unlikely(tbl))
644 		goto restart;
645 
646 	return NULL;
647 }
648 
649 /**
650  * rhashtable_lookup - search hash table
651  * @ht:		hash table
652  * @key:	the pointer to the key
653  * @params:	hash table parameters
654  *
655  * Computes the hash value for the key and traverses the bucket chain looking
656  * for a entry with an identical key. The first matching entry is returned.
657  *
658  * This must only be called under the RCU read lock.
659  *
660  * Returns the first entry on which the compare function returned true.
661  */
662 static inline void *rhashtable_lookup(
663 	struct rhashtable *ht, const void *key,
664 	const struct rhashtable_params params)
665 {
666 	struct rhash_head *he = __rhashtable_lookup(ht, key, params);
667 
668 	return he ? rht_obj(ht, he) : NULL;
669 }
670 
671 /**
672  * rhashtable_lookup_fast - search hash table, without RCU read lock
673  * @ht:		hash table
674  * @key:	the pointer to the key
675  * @params:	hash table parameters
676  *
677  * Computes the hash value for the key and traverses the bucket chain looking
678  * for a entry with an identical key. The first matching entry is returned.
679  *
680  * Only use this function when you have other mechanisms guaranteeing
681  * that the object won't go away after the RCU read lock is released.
682  *
683  * Returns the first entry on which the compare function returned true.
684  */
685 static inline void *rhashtable_lookup_fast(
686 	struct rhashtable *ht, const void *key,
687 	const struct rhashtable_params params)
688 {
689 	void *obj;
690 
691 	rcu_read_lock();
692 	obj = rhashtable_lookup(ht, key, params);
693 	rcu_read_unlock();
694 
695 	return obj;
696 }
697 
698 /**
699  * rhltable_lookup - search hash list table
700  * @hlt:	hash table
701  * @key:	the pointer to the key
702  * @params:	hash table parameters
703  *
704  * Computes the hash value for the key and traverses the bucket chain looking
705  * for a entry with an identical key.  All matching entries are returned
706  * in a list.
707  *
708  * This must only be called under the RCU read lock.
709  *
710  * Returns the list of entries that match the given key.
711  */
712 static inline struct rhlist_head *rhltable_lookup(
713 	struct rhltable *hlt, const void *key,
714 	const struct rhashtable_params params)
715 {
716 	struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
717 
718 	return he ? container_of(he, struct rhlist_head, rhead) : NULL;
719 }
720 
721 /* Internal function, please use rhashtable_insert_fast() instead. This
722  * function returns the existing element already in hashes in there is a clash,
723  * otherwise it returns an error via ERR_PTR().
724  */
725 static inline void *__rhashtable_insert_fast(
726 	struct rhashtable *ht, const void *key, struct rhash_head *obj,
727 	const struct rhashtable_params params, bool rhlist)
728 {
729 	struct rhashtable_compare_arg arg = {
730 		.ht = ht,
731 		.key = key,
732 	};
733 	struct rhash_head __rcu **pprev;
734 	struct bucket_table *tbl;
735 	struct rhash_head *head;
736 	spinlock_t *lock;
737 	unsigned int hash;
738 	int elasticity;
739 	void *data;
740 
741 	rcu_read_lock();
742 
743 	tbl = rht_dereference_rcu(ht->tbl, ht);
744 	hash = rht_head_hashfn(ht, tbl, obj, params);
745 	lock = rht_bucket_lock(tbl, hash);
746 	spin_lock_bh(lock);
747 
748 	if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) {
749 slow_path:
750 		spin_unlock_bh(lock);
751 		rcu_read_unlock();
752 		return rhashtable_insert_slow(ht, key, obj);
753 	}
754 
755 	elasticity = RHT_ELASTICITY;
756 	pprev = rht_bucket_insert(ht, tbl, hash);
757 	data = ERR_PTR(-ENOMEM);
758 	if (!pprev)
759 		goto out;
760 
761 	rht_for_each_continue(head, *pprev, tbl, hash) {
762 		struct rhlist_head *plist;
763 		struct rhlist_head *list;
764 
765 		elasticity--;
766 		if (!key ||
767 		    (params.obj_cmpfn ?
768 		     params.obj_cmpfn(&arg, rht_obj(ht, head)) :
769 		     rhashtable_compare(&arg, rht_obj(ht, head)))) {
770 			pprev = &head->next;
771 			continue;
772 		}
773 
774 		data = rht_obj(ht, head);
775 
776 		if (!rhlist)
777 			goto out;
778 
779 
780 		list = container_of(obj, struct rhlist_head, rhead);
781 		plist = container_of(head, struct rhlist_head, rhead);
782 
783 		RCU_INIT_POINTER(list->next, plist);
784 		head = rht_dereference_bucket(head->next, tbl, hash);
785 		RCU_INIT_POINTER(list->rhead.next, head);
786 		rcu_assign_pointer(*pprev, obj);
787 
788 		goto good;
789 	}
790 
791 	if (elasticity <= 0)
792 		goto slow_path;
793 
794 	data = ERR_PTR(-E2BIG);
795 	if (unlikely(rht_grow_above_max(ht, tbl)))
796 		goto out;
797 
798 	if (unlikely(rht_grow_above_100(ht, tbl)))
799 		goto slow_path;
800 
801 	head = rht_dereference_bucket(*pprev, tbl, hash);
802 
803 	RCU_INIT_POINTER(obj->next, head);
804 	if (rhlist) {
805 		struct rhlist_head *list;
806 
807 		list = container_of(obj, struct rhlist_head, rhead);
808 		RCU_INIT_POINTER(list->next, NULL);
809 	}
810 
811 	rcu_assign_pointer(*pprev, obj);
812 
813 	atomic_inc(&ht->nelems);
814 	if (rht_grow_above_75(ht, tbl))
815 		schedule_work(&ht->run_work);
816 
817 good:
818 	data = NULL;
819 
820 out:
821 	spin_unlock_bh(lock);
822 	rcu_read_unlock();
823 
824 	return data;
825 }
826 
827 /**
828  * rhashtable_insert_fast - insert object into hash table
829  * @ht:		hash table
830  * @obj:	pointer to hash head inside object
831  * @params:	hash table parameters
832  *
833  * Will take a per bucket spinlock to protect against mutual mutations
834  * on the same bucket. Multiple insertions may occur in parallel unless
835  * they map to the same bucket lock.
836  *
837  * It is safe to call this function from atomic context.
838  *
839  * Will trigger an automatic deferred table resizing if residency in the
840  * table grows beyond 70%.
841  */
842 static inline int rhashtable_insert_fast(
843 	struct rhashtable *ht, struct rhash_head *obj,
844 	const struct rhashtable_params params)
845 {
846 	void *ret;
847 
848 	ret = __rhashtable_insert_fast(ht, NULL, obj, params, false);
849 	if (IS_ERR(ret))
850 		return PTR_ERR(ret);
851 
852 	return ret == NULL ? 0 : -EEXIST;
853 }
854 
855 /**
856  * rhltable_insert_key - insert object into hash list table
857  * @hlt:	hash list table
858  * @key:	the pointer to the key
859  * @list:	pointer to hash list head inside object
860  * @params:	hash table parameters
861  *
862  * Will take a per bucket spinlock to protect against mutual mutations
863  * on the same bucket. Multiple insertions may occur in parallel unless
864  * they map to the same bucket lock.
865  *
866  * It is safe to call this function from atomic context.
867  *
868  * Will trigger an automatic deferred table resizing if residency in the
869  * table grows beyond 70%.
870  */
871 static inline int rhltable_insert_key(
872 	struct rhltable *hlt, const void *key, struct rhlist_head *list,
873 	const struct rhashtable_params params)
874 {
875 	return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
876 						params, true));
877 }
878 
879 /**
880  * rhltable_insert - insert object into hash list table
881  * @hlt:	hash list table
882  * @list:	pointer to hash list head inside object
883  * @params:	hash table parameters
884  *
885  * Will take a per bucket spinlock to protect against mutual mutations
886  * on the same bucket. Multiple insertions may occur in parallel unless
887  * they map to the same bucket lock.
888  *
889  * It is safe to call this function from atomic context.
890  *
891  * Will trigger an automatic deferred table resizing if residency in the
892  * table grows beyond 70%.
893  */
894 static inline int rhltable_insert(
895 	struct rhltable *hlt, struct rhlist_head *list,
896 	const struct rhashtable_params params)
897 {
898 	const char *key = rht_obj(&hlt->ht, &list->rhead);
899 
900 	key += params.key_offset;
901 
902 	return rhltable_insert_key(hlt, key, list, params);
903 }
904 
905 /**
906  * rhashtable_lookup_insert_fast - lookup and insert object into hash table
907  * @ht:		hash table
908  * @obj:	pointer to hash head inside object
909  * @params:	hash table parameters
910  *
911  * Locks down the bucket chain in both the old and new table if a resize
912  * is in progress to ensure that writers can't remove from the old table
913  * and can't insert to the new table during the atomic operation of search
914  * and insertion. Searches for duplicates in both the old and new table if
915  * a resize is in progress.
916  *
917  * This lookup function may only be used for fixed key hash table (key_len
918  * parameter set). It will BUG() if used inappropriately.
919  *
920  * It is safe to call this function from atomic context.
921  *
922  * Will trigger an automatic deferred table resizing if residency in the
923  * table grows beyond 70%.
924  */
925 static inline int rhashtable_lookup_insert_fast(
926 	struct rhashtable *ht, struct rhash_head *obj,
927 	const struct rhashtable_params params)
928 {
929 	const char *key = rht_obj(ht, obj);
930 	void *ret;
931 
932 	BUG_ON(ht->p.obj_hashfn);
933 
934 	ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
935 				       false);
936 	if (IS_ERR(ret))
937 		return PTR_ERR(ret);
938 
939 	return ret == NULL ? 0 : -EEXIST;
940 }
941 
942 /**
943  * rhashtable_lookup_get_insert_fast - lookup and insert object into hash table
944  * @ht:		hash table
945  * @obj:	pointer to hash head inside object
946  * @params:	hash table parameters
947  *
948  * Just like rhashtable_lookup_insert_fast(), but this function returns the
949  * object if it exists, NULL if it did not and the insertion was successful,
950  * and an ERR_PTR otherwise.
951  */
952 static inline void *rhashtable_lookup_get_insert_fast(
953 	struct rhashtable *ht, struct rhash_head *obj,
954 	const struct rhashtable_params params)
955 {
956 	const char *key = rht_obj(ht, obj);
957 
958 	BUG_ON(ht->p.obj_hashfn);
959 
960 	return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
961 					false);
962 }
963 
964 /**
965  * rhashtable_lookup_insert_key - search and insert object to hash table
966  *				  with explicit key
967  * @ht:		hash table
968  * @key:	key
969  * @obj:	pointer to hash head inside object
970  * @params:	hash table parameters
971  *
972  * Locks down the bucket chain in both the old and new table if a resize
973  * is in progress to ensure that writers can't remove from the old table
974  * and can't insert to the new table during the atomic operation of search
975  * and insertion. Searches for duplicates in both the old and new table if
976  * a resize is in progress.
977  *
978  * Lookups may occur in parallel with hashtable mutations and resizing.
979  *
980  * Will trigger an automatic deferred table resizing if residency in the
981  * table grows beyond 70%.
982  *
983  * Returns zero on success.
984  */
985 static inline int rhashtable_lookup_insert_key(
986 	struct rhashtable *ht, const void *key, struct rhash_head *obj,
987 	const struct rhashtable_params params)
988 {
989 	void *ret;
990 
991 	BUG_ON(!ht->p.obj_hashfn || !key);
992 
993 	ret = __rhashtable_insert_fast(ht, key, obj, params, false);
994 	if (IS_ERR(ret))
995 		return PTR_ERR(ret);
996 
997 	return ret == NULL ? 0 : -EEXIST;
998 }
999 
1000 /**
1001  * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
1002  * @ht:		hash table
1003  * @obj:	pointer to hash head inside object
1004  * @params:	hash table parameters
1005  * @data:	pointer to element data already in hashes
1006  *
1007  * Just like rhashtable_lookup_insert_key(), but this function returns the
1008  * object if it exists, NULL if it does not and the insertion was successful,
1009  * and an ERR_PTR otherwise.
1010  */
1011 static inline void *rhashtable_lookup_get_insert_key(
1012 	struct rhashtable *ht, const void *key, struct rhash_head *obj,
1013 	const struct rhashtable_params params)
1014 {
1015 	BUG_ON(!ht->p.obj_hashfn || !key);
1016 
1017 	return __rhashtable_insert_fast(ht, key, obj, params, false);
1018 }
1019 
1020 /* Internal function, please use rhashtable_remove_fast() instead */
1021 static inline int __rhashtable_remove_fast_one(
1022 	struct rhashtable *ht, struct bucket_table *tbl,
1023 	struct rhash_head *obj, const struct rhashtable_params params,
1024 	bool rhlist)
1025 {
1026 	struct rhash_head __rcu **pprev;
1027 	struct rhash_head *he;
1028 	spinlock_t * lock;
1029 	unsigned int hash;
1030 	int err = -ENOENT;
1031 
1032 	hash = rht_head_hashfn(ht, tbl, obj, params);
1033 	lock = rht_bucket_lock(tbl, hash);
1034 
1035 	spin_lock_bh(lock);
1036 
1037 	pprev = rht_bucket_var(tbl, hash);
1038 	rht_for_each_continue(he, *pprev, tbl, hash) {
1039 		struct rhlist_head *list;
1040 
1041 		list = container_of(he, struct rhlist_head, rhead);
1042 
1043 		if (he != obj) {
1044 			struct rhlist_head __rcu **lpprev;
1045 
1046 			pprev = &he->next;
1047 
1048 			if (!rhlist)
1049 				continue;
1050 
1051 			do {
1052 				lpprev = &list->next;
1053 				list = rht_dereference_bucket(list->next,
1054 							      tbl, hash);
1055 			} while (list && obj != &list->rhead);
1056 
1057 			if (!list)
1058 				continue;
1059 
1060 			list = rht_dereference_bucket(list->next, tbl, hash);
1061 			RCU_INIT_POINTER(*lpprev, list);
1062 			err = 0;
1063 			break;
1064 		}
1065 
1066 		obj = rht_dereference_bucket(obj->next, tbl, hash);
1067 		err = 1;
1068 
1069 		if (rhlist) {
1070 			list = rht_dereference_bucket(list->next, tbl, hash);
1071 			if (list) {
1072 				RCU_INIT_POINTER(list->rhead.next, obj);
1073 				obj = &list->rhead;
1074 				err = 0;
1075 			}
1076 		}
1077 
1078 		rcu_assign_pointer(*pprev, obj);
1079 		break;
1080 	}
1081 
1082 	spin_unlock_bh(lock);
1083 
1084 	if (err > 0) {
1085 		atomic_dec(&ht->nelems);
1086 		if (unlikely(ht->p.automatic_shrinking &&
1087 			     rht_shrink_below_30(ht, tbl)))
1088 			schedule_work(&ht->run_work);
1089 		err = 0;
1090 	}
1091 
1092 	return err;
1093 }
1094 
1095 /* Internal function, please use rhashtable_remove_fast() instead */
1096 static inline int __rhashtable_remove_fast(
1097 	struct rhashtable *ht, struct rhash_head *obj,
1098 	const struct rhashtable_params params, bool rhlist)
1099 {
1100 	struct bucket_table *tbl;
1101 	int err;
1102 
1103 	rcu_read_lock();
1104 
1105 	tbl = rht_dereference_rcu(ht->tbl, ht);
1106 
1107 	/* Because we have already taken (and released) the bucket
1108 	 * lock in old_tbl, if we find that future_tbl is not yet
1109 	 * visible then that guarantees the entry to still be in
1110 	 * the old tbl if it exists.
1111 	 */
1112 	while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params,
1113 						   rhlist)) &&
1114 	       (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
1115 		;
1116 
1117 	rcu_read_unlock();
1118 
1119 	return err;
1120 }
1121 
1122 /**
1123  * rhashtable_remove_fast - remove object from hash table
1124  * @ht:		hash table
1125  * @obj:	pointer to hash head inside object
1126  * @params:	hash table parameters
1127  *
1128  * Since the hash chain is single linked, the removal operation needs to
1129  * walk the bucket chain upon removal. The removal operation is thus
1130  * considerable slow if the hash table is not correctly sized.
1131  *
1132  * Will automatically shrink the table if permitted when residency drops
1133  * below 30%.
1134  *
1135  * Returns zero on success, -ENOENT if the entry could not be found.
1136  */
1137 static inline int rhashtable_remove_fast(
1138 	struct rhashtable *ht, struct rhash_head *obj,
1139 	const struct rhashtable_params params)
1140 {
1141 	return __rhashtable_remove_fast(ht, obj, params, false);
1142 }
1143 
1144 /**
1145  * rhltable_remove - remove object from hash list table
1146  * @hlt:	hash list table
1147  * @list:	pointer to hash list head inside object
1148  * @params:	hash table parameters
1149  *
1150  * Since the hash chain is single linked, the removal operation needs to
1151  * walk the bucket chain upon removal. The removal operation is thus
1152  * considerable slow if the hash table is not correctly sized.
1153  *
1154  * Will automatically shrink the table if permitted when residency drops
1155  * below 30%
1156  *
1157  * Returns zero on success, -ENOENT if the entry could not be found.
1158  */
1159 static inline int rhltable_remove(
1160 	struct rhltable *hlt, struct rhlist_head *list,
1161 	const struct rhashtable_params params)
1162 {
1163 	return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true);
1164 }
1165 
1166 /* Internal function, please use rhashtable_replace_fast() instead */
1167 static inline int __rhashtable_replace_fast(
1168 	struct rhashtable *ht, struct bucket_table *tbl,
1169 	struct rhash_head *obj_old, struct rhash_head *obj_new,
1170 	const struct rhashtable_params params)
1171 {
1172 	struct rhash_head __rcu **pprev;
1173 	struct rhash_head *he;
1174 	spinlock_t *lock;
1175 	unsigned int hash;
1176 	int err = -ENOENT;
1177 
1178 	/* Minimally, the old and new objects must have same hash
1179 	 * (which should mean identifiers are the same).
1180 	 */
1181 	hash = rht_head_hashfn(ht, tbl, obj_old, params);
1182 	if (hash != rht_head_hashfn(ht, tbl, obj_new, params))
1183 		return -EINVAL;
1184 
1185 	lock = rht_bucket_lock(tbl, hash);
1186 
1187 	spin_lock_bh(lock);
1188 
1189 	pprev = rht_bucket_var(tbl, hash);
1190 	rht_for_each_continue(he, *pprev, tbl, hash) {
1191 		if (he != obj_old) {
1192 			pprev = &he->next;
1193 			continue;
1194 		}
1195 
1196 		rcu_assign_pointer(obj_new->next, obj_old->next);
1197 		rcu_assign_pointer(*pprev, obj_new);
1198 		err = 0;
1199 		break;
1200 	}
1201 
1202 	spin_unlock_bh(lock);
1203 
1204 	return err;
1205 }
1206 
1207 /**
1208  * rhashtable_replace_fast - replace an object in hash table
1209  * @ht:		hash table
1210  * @obj_old:	pointer to hash head inside object being replaced
1211  * @obj_new:	pointer to hash head inside object which is new
1212  * @params:	hash table parameters
1213  *
1214  * Replacing an object doesn't affect the number of elements in the hash table
1215  * or bucket, so we don't need to worry about shrinking or expanding the
1216  * table here.
1217  *
1218  * Returns zero on success, -ENOENT if the entry could not be found,
1219  * -EINVAL if hash is not the same for the old and new objects.
1220  */
1221 static inline int rhashtable_replace_fast(
1222 	struct rhashtable *ht, struct rhash_head *obj_old,
1223 	struct rhash_head *obj_new,
1224 	const struct rhashtable_params params)
1225 {
1226 	struct bucket_table *tbl;
1227 	int err;
1228 
1229 	rcu_read_lock();
1230 
1231 	tbl = rht_dereference_rcu(ht->tbl, ht);
1232 
1233 	/* Because we have already taken (and released) the bucket
1234 	 * lock in old_tbl, if we find that future_tbl is not yet
1235 	 * visible then that guarantees the entry to still be in
1236 	 * the old tbl if it exists.
1237 	 */
1238 	while ((err = __rhashtable_replace_fast(ht, tbl, obj_old,
1239 						obj_new, params)) &&
1240 	       (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
1241 		;
1242 
1243 	rcu_read_unlock();
1244 
1245 	return err;
1246 }
1247 
1248 /* Obsolete function, do not use in new code. */
1249 static inline int rhashtable_walk_init(struct rhashtable *ht,
1250 				       struct rhashtable_iter *iter, gfp_t gfp)
1251 {
1252 	rhashtable_walk_enter(ht, iter);
1253 	return 0;
1254 }
1255 
1256 /**
1257  * rhltable_walk_enter - Initialise an iterator
1258  * @hlt:	Table to walk over
1259  * @iter:	Hash table Iterator
1260  *
1261  * This function prepares a hash table walk.
1262  *
1263  * Note that if you restart a walk after rhashtable_walk_stop you
1264  * may see the same object twice.  Also, you may miss objects if
1265  * there are removals in between rhashtable_walk_stop and the next
1266  * call to rhashtable_walk_start.
1267  *
1268  * For a completely stable walk you should construct your own data
1269  * structure outside the hash table.
1270  *
1271  * This function may be called from any process context, including
1272  * non-preemptable context, but cannot be called from softirq or
1273  * hardirq context.
1274  *
1275  * You must call rhashtable_walk_exit after this function returns.
1276  */
1277 static inline void rhltable_walk_enter(struct rhltable *hlt,
1278 				       struct rhashtable_iter *iter)
1279 {
1280 	return rhashtable_walk_enter(&hlt->ht, iter);
1281 }
1282 
1283 /**
1284  * rhltable_free_and_destroy - free elements and destroy hash list table
1285  * @hlt:	the hash list table to destroy
1286  * @free_fn:	callback to release resources of element
1287  * @arg:	pointer passed to free_fn
1288  *
1289  * See documentation for rhashtable_free_and_destroy.
1290  */
1291 static inline void rhltable_free_and_destroy(struct rhltable *hlt,
1292 					     void (*free_fn)(void *ptr,
1293 							     void *arg),
1294 					     void *arg)
1295 {
1296 	return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
1297 }
1298 
1299 static inline void rhltable_destroy(struct rhltable *hlt)
1300 {
1301 	return rhltable_free_and_destroy(hlt, NULL, NULL);
1302 }
1303 
1304 #endif /* _LINUX_RHASHTABLE_H */
1305