xref: /linux/lib/rhashtable.c (revision 7e1e77636e36075ebf118298855268468f1028e8)
1*7e1e7763SThomas Graf /*
2*7e1e7763SThomas Graf  * Resizable, Scalable, Concurrent Hash Table
3*7e1e7763SThomas Graf  *
4*7e1e7763SThomas Graf  * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
5*7e1e7763SThomas Graf  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
6*7e1e7763SThomas Graf  *
7*7e1e7763SThomas Graf  * Based on the following paper:
8*7e1e7763SThomas Graf  * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
9*7e1e7763SThomas Graf  *
10*7e1e7763SThomas Graf  * Code partially derived from nft_hash
11*7e1e7763SThomas Graf  *
12*7e1e7763SThomas Graf  * This program is free software; you can redistribute it and/or modify
13*7e1e7763SThomas Graf  * it under the terms of the GNU General Public License version 2 as
14*7e1e7763SThomas Graf  * published by the Free Software Foundation.
15*7e1e7763SThomas Graf  */
16*7e1e7763SThomas Graf 
17*7e1e7763SThomas Graf #include <linux/kernel.h>
18*7e1e7763SThomas Graf #include <linux/init.h>
19*7e1e7763SThomas Graf #include <linux/log2.h>
20*7e1e7763SThomas Graf #include <linux/slab.h>
21*7e1e7763SThomas Graf #include <linux/vmalloc.h>
22*7e1e7763SThomas Graf #include <linux/mm.h>
23*7e1e7763SThomas Graf #include <linux/hash.h>
24*7e1e7763SThomas Graf #include <linux/random.h>
25*7e1e7763SThomas Graf #include <linux/rhashtable.h>
26*7e1e7763SThomas Graf #include <linux/log2.h>
27*7e1e7763SThomas Graf 
28*7e1e7763SThomas Graf #define HASH_DEFAULT_SIZE	64UL
29*7e1e7763SThomas Graf #define HASH_MIN_SIZE		4UL
30*7e1e7763SThomas Graf 
31*7e1e7763SThomas Graf #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
32*7e1e7763SThomas Graf 
33*7e1e7763SThomas Graf #ifdef CONFIG_PROVE_LOCKING
34*7e1e7763SThomas Graf int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
35*7e1e7763SThomas Graf {
36*7e1e7763SThomas Graf 	return ht->p.mutex_is_held();
37*7e1e7763SThomas Graf }
38*7e1e7763SThomas Graf EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
39*7e1e7763SThomas Graf #endif
40*7e1e7763SThomas Graf 
41*7e1e7763SThomas Graf /**
42*7e1e7763SThomas Graf  * rht_obj - cast hash head to outer object
43*7e1e7763SThomas Graf  * @ht:		hash table
44*7e1e7763SThomas Graf  * @he:		hashed node
45*7e1e7763SThomas Graf  */
46*7e1e7763SThomas Graf void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
47*7e1e7763SThomas Graf {
48*7e1e7763SThomas Graf 	return (void *) he - ht->p.head_offset;
49*7e1e7763SThomas Graf }
50*7e1e7763SThomas Graf EXPORT_SYMBOL_GPL(rht_obj);
51*7e1e7763SThomas Graf 
52*7e1e7763SThomas Graf static u32 __hashfn(const struct rhashtable *ht, const void *key,
53*7e1e7763SThomas Graf 		      u32 len, u32 hsize)
54*7e1e7763SThomas Graf {
55*7e1e7763SThomas Graf 	u32 h;
56*7e1e7763SThomas Graf 
57*7e1e7763SThomas Graf 	h = ht->p.hashfn(key, len, ht->p.hash_rnd);
58*7e1e7763SThomas Graf 
59*7e1e7763SThomas Graf 	return h & (hsize - 1);
60*7e1e7763SThomas Graf }
61*7e1e7763SThomas Graf 
62*7e1e7763SThomas Graf /**
63*7e1e7763SThomas Graf  * rhashtable_hashfn - compute hash for key of given length
64*7e1e7763SThomas Graf  * @ht:		hash table to compuate for
65*7e1e7763SThomas Graf  * @key:	pointer to key
66*7e1e7763SThomas Graf  * @len:	length of key
67*7e1e7763SThomas Graf  *
68*7e1e7763SThomas Graf  * Computes the hash value using the hash function provided in the 'hashfn'
69*7e1e7763SThomas Graf  * of struct rhashtable_params. The returned value is guaranteed to be
70*7e1e7763SThomas Graf  * smaller than the number of buckets in the hash table.
71*7e1e7763SThomas Graf  */
72*7e1e7763SThomas Graf u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len)
73*7e1e7763SThomas Graf {
74*7e1e7763SThomas Graf 	struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
75*7e1e7763SThomas Graf 
76*7e1e7763SThomas Graf 	return __hashfn(ht, key, len, tbl->size);
77*7e1e7763SThomas Graf }
78*7e1e7763SThomas Graf EXPORT_SYMBOL_GPL(rhashtable_hashfn);
79*7e1e7763SThomas Graf 
80*7e1e7763SThomas Graf static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize)
81*7e1e7763SThomas Graf {
82*7e1e7763SThomas Graf 	if (unlikely(!ht->p.key_len)) {
83*7e1e7763SThomas Graf 		u32 h;
84*7e1e7763SThomas Graf 
85*7e1e7763SThomas Graf 		h = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
86*7e1e7763SThomas Graf 
87*7e1e7763SThomas Graf 		return h & (hsize - 1);
88*7e1e7763SThomas Graf 	}
89*7e1e7763SThomas Graf 
90*7e1e7763SThomas Graf 	return __hashfn(ht, ptr + ht->p.key_offset, ht->p.key_len, hsize);
91*7e1e7763SThomas Graf }
92*7e1e7763SThomas Graf 
93*7e1e7763SThomas Graf /**
94*7e1e7763SThomas Graf  * rhashtable_obj_hashfn - compute hash for hashed object
95*7e1e7763SThomas Graf  * @ht:		hash table to compuate for
96*7e1e7763SThomas Graf  * @ptr:	pointer to hashed object
97*7e1e7763SThomas Graf  *
98*7e1e7763SThomas Graf  * Computes the hash value using the hash function `hashfn` respectively
99*7e1e7763SThomas Graf  * 'obj_hashfn' depending on whether the hash table is set up to work with
100*7e1e7763SThomas Graf  * a fixed length key. The returned value is guaranteed to be smaller than
101*7e1e7763SThomas Graf  * the number of buckets in the hash table.
102*7e1e7763SThomas Graf  */
103*7e1e7763SThomas Graf u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr)
104*7e1e7763SThomas Graf {
105*7e1e7763SThomas Graf 	struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
106*7e1e7763SThomas Graf 
107*7e1e7763SThomas Graf 	return obj_hashfn(ht, ptr, tbl->size);
108*7e1e7763SThomas Graf }
109*7e1e7763SThomas Graf EXPORT_SYMBOL_GPL(rhashtable_obj_hashfn);
110*7e1e7763SThomas Graf 
111*7e1e7763SThomas Graf static u32 head_hashfn(const struct rhashtable *ht,
112*7e1e7763SThomas Graf 		       const struct rhash_head *he, u32 hsize)
113*7e1e7763SThomas Graf {
114*7e1e7763SThomas Graf 	return obj_hashfn(ht, rht_obj(ht, he), hsize);
115*7e1e7763SThomas Graf }
116*7e1e7763SThomas Graf 
117*7e1e7763SThomas Graf static struct bucket_table *bucket_table_alloc(size_t nbuckets, gfp_t flags)
118*7e1e7763SThomas Graf {
119*7e1e7763SThomas Graf 	struct bucket_table *tbl;
120*7e1e7763SThomas Graf 	size_t size;
121*7e1e7763SThomas Graf 
122*7e1e7763SThomas Graf 	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
123*7e1e7763SThomas Graf 	tbl = kzalloc(size, flags);
124*7e1e7763SThomas Graf 	if (tbl == NULL)
125*7e1e7763SThomas Graf 		tbl = vzalloc(size);
126*7e1e7763SThomas Graf 
127*7e1e7763SThomas Graf 	if (tbl == NULL)
128*7e1e7763SThomas Graf 		return NULL;
129*7e1e7763SThomas Graf 
130*7e1e7763SThomas Graf 	tbl->size = nbuckets;
131*7e1e7763SThomas Graf 
132*7e1e7763SThomas Graf 	return tbl;
133*7e1e7763SThomas Graf }
134*7e1e7763SThomas Graf 
135*7e1e7763SThomas Graf static void bucket_table_free(const struct bucket_table *tbl)
136*7e1e7763SThomas Graf {
137*7e1e7763SThomas Graf 	kvfree(tbl);
138*7e1e7763SThomas Graf }
139*7e1e7763SThomas Graf 
140*7e1e7763SThomas Graf /**
141*7e1e7763SThomas Graf  * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
142*7e1e7763SThomas Graf  * @ht:		hash table
143*7e1e7763SThomas Graf  * @new_size:	new table size
144*7e1e7763SThomas Graf  */
145*7e1e7763SThomas Graf bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
146*7e1e7763SThomas Graf {
147*7e1e7763SThomas Graf 	/* Expand table when exceeding 75% load */
148*7e1e7763SThomas Graf 	return ht->nelems > (new_size / 4 * 3);
149*7e1e7763SThomas Graf }
150*7e1e7763SThomas Graf EXPORT_SYMBOL_GPL(rht_grow_above_75);
151*7e1e7763SThomas Graf 
152*7e1e7763SThomas Graf /**
153*7e1e7763SThomas Graf  * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
154*7e1e7763SThomas Graf  * @ht:		hash table
155*7e1e7763SThomas Graf  * @new_size:	new table size
156*7e1e7763SThomas Graf  */
157*7e1e7763SThomas Graf bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
158*7e1e7763SThomas Graf {
159*7e1e7763SThomas Graf 	/* Shrink table beneath 30% load */
160*7e1e7763SThomas Graf 	return ht->nelems < (new_size * 3 / 10);
161*7e1e7763SThomas Graf }
162*7e1e7763SThomas Graf EXPORT_SYMBOL_GPL(rht_shrink_below_30);
163*7e1e7763SThomas Graf 
164*7e1e7763SThomas Graf static void hashtable_chain_unzip(const struct rhashtable *ht,
165*7e1e7763SThomas Graf 				  const struct bucket_table *new_tbl,
166*7e1e7763SThomas Graf 				  struct bucket_table *old_tbl, size_t n)
167*7e1e7763SThomas Graf {
168*7e1e7763SThomas Graf 	struct rhash_head *he, *p, *next;
169*7e1e7763SThomas Graf 	unsigned int h;
170*7e1e7763SThomas Graf 
171*7e1e7763SThomas Graf 	/* Old bucket empty, no work needed. */
172*7e1e7763SThomas Graf 	p = rht_dereference(old_tbl->buckets[n], ht);
173*7e1e7763SThomas Graf 	if (!p)
174*7e1e7763SThomas Graf 		return;
175*7e1e7763SThomas Graf 
176*7e1e7763SThomas Graf 	/* Advance the old bucket pointer one or more times until it
177*7e1e7763SThomas Graf 	 * reaches a node that doesn't hash to the same bucket as the
178*7e1e7763SThomas Graf 	 * previous node p. Call the previous node p;
179*7e1e7763SThomas Graf 	 */
180*7e1e7763SThomas Graf 	h = head_hashfn(ht, p, new_tbl->size);
181*7e1e7763SThomas Graf 	rht_for_each(he, p->next, ht) {
182*7e1e7763SThomas Graf 		if (head_hashfn(ht, he, new_tbl->size) != h)
183*7e1e7763SThomas Graf 			break;
184*7e1e7763SThomas Graf 		p = he;
185*7e1e7763SThomas Graf 	}
186*7e1e7763SThomas Graf 	RCU_INIT_POINTER(old_tbl->buckets[n], p->next);
187*7e1e7763SThomas Graf 
188*7e1e7763SThomas Graf 	/* Find the subsequent node which does hash to the same
189*7e1e7763SThomas Graf 	 * bucket as node P, or NULL if no such node exists.
190*7e1e7763SThomas Graf 	 */
191*7e1e7763SThomas Graf 	next = NULL;
192*7e1e7763SThomas Graf 	if (he) {
193*7e1e7763SThomas Graf 		rht_for_each(he, he->next, ht) {
194*7e1e7763SThomas Graf 			if (head_hashfn(ht, he, new_tbl->size) == h) {
195*7e1e7763SThomas Graf 				next = he;
196*7e1e7763SThomas Graf 				break;
197*7e1e7763SThomas Graf 			}
198*7e1e7763SThomas Graf 		}
199*7e1e7763SThomas Graf 	}
200*7e1e7763SThomas Graf 
201*7e1e7763SThomas Graf 	/* Set p's next pointer to that subsequent node pointer,
202*7e1e7763SThomas Graf 	 * bypassing the nodes which do not hash to p's bucket
203*7e1e7763SThomas Graf 	 */
204*7e1e7763SThomas Graf 	RCU_INIT_POINTER(p->next, next);
205*7e1e7763SThomas Graf }
206*7e1e7763SThomas Graf 
207*7e1e7763SThomas Graf /**
208*7e1e7763SThomas Graf  * rhashtable_expand - Expand hash table while allowing concurrent lookups
209*7e1e7763SThomas Graf  * @ht:		the hash table to expand
210*7e1e7763SThomas Graf  * @flags:	allocation flags
211*7e1e7763SThomas Graf  *
212*7e1e7763SThomas Graf  * A secondary bucket array is allocated and the hash entries are migrated
213*7e1e7763SThomas Graf  * while keeping them on both lists until the end of the RCU grace period.
214*7e1e7763SThomas Graf  *
215*7e1e7763SThomas Graf  * This function may only be called in a context where it is safe to call
216*7e1e7763SThomas Graf  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
217*7e1e7763SThomas Graf  *
218*7e1e7763SThomas Graf  * The caller must ensure that no concurrent table mutations take place.
219*7e1e7763SThomas Graf  * It is however valid to have concurrent lookups if they are RCU protected.
220*7e1e7763SThomas Graf  */
221*7e1e7763SThomas Graf int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
222*7e1e7763SThomas Graf {
223*7e1e7763SThomas Graf 	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
224*7e1e7763SThomas Graf 	struct rhash_head *he;
225*7e1e7763SThomas Graf 	unsigned int i, h;
226*7e1e7763SThomas Graf 	bool complete;
227*7e1e7763SThomas Graf 
228*7e1e7763SThomas Graf 	ASSERT_RHT_MUTEX(ht);
229*7e1e7763SThomas Graf 
230*7e1e7763SThomas Graf 	if (ht->p.max_shift && ht->shift >= ht->p.max_shift)
231*7e1e7763SThomas Graf 		return 0;
232*7e1e7763SThomas Graf 
233*7e1e7763SThomas Graf 	new_tbl = bucket_table_alloc(old_tbl->size * 2, flags);
234*7e1e7763SThomas Graf 	if (new_tbl == NULL)
235*7e1e7763SThomas Graf 		return -ENOMEM;
236*7e1e7763SThomas Graf 
237*7e1e7763SThomas Graf 	ht->shift++;
238*7e1e7763SThomas Graf 
239*7e1e7763SThomas Graf 	/* For each new bucket, search the corresponding old bucket
240*7e1e7763SThomas Graf 	 * for the first entry that hashes to the new bucket, and
241*7e1e7763SThomas Graf 	 * link the new bucket to that entry. Since all the entries
242*7e1e7763SThomas Graf 	 * which will end up in the new bucket appear in the same
243*7e1e7763SThomas Graf 	 * old bucket, this constructs an entirely valid new hash
244*7e1e7763SThomas Graf 	 * table, but with multiple buckets "zipped" together into a
245*7e1e7763SThomas Graf 	 * single imprecise chain.
246*7e1e7763SThomas Graf 	 */
247*7e1e7763SThomas Graf 	for (i = 0; i < new_tbl->size; i++) {
248*7e1e7763SThomas Graf 		h = i & (old_tbl->size - 1);
249*7e1e7763SThomas Graf 		rht_for_each(he, old_tbl->buckets[h], ht) {
250*7e1e7763SThomas Graf 			if (head_hashfn(ht, he, new_tbl->size) == i) {
251*7e1e7763SThomas Graf 				RCU_INIT_POINTER(new_tbl->buckets[i], he);
252*7e1e7763SThomas Graf 				break;
253*7e1e7763SThomas Graf 			}
254*7e1e7763SThomas Graf 		}
255*7e1e7763SThomas Graf 	}
256*7e1e7763SThomas Graf 
257*7e1e7763SThomas Graf 	/* Publish the new table pointer. Lookups may now traverse
258*7e1e7763SThomas Graf 	 * the new table, but they will not benefit from any
259*7e1e7763SThomas Graf 	 * additional efficiency until later steps unzip the buckets.
260*7e1e7763SThomas Graf 	 */
261*7e1e7763SThomas Graf 	rcu_assign_pointer(ht->tbl, new_tbl);
262*7e1e7763SThomas Graf 
263*7e1e7763SThomas Graf 	/* Unzip interleaved hash chains */
264*7e1e7763SThomas Graf 	do {
265*7e1e7763SThomas Graf 		/* Wait for readers. All new readers will see the new
266*7e1e7763SThomas Graf 		 * table, and thus no references to the old table will
267*7e1e7763SThomas Graf 		 * remain.
268*7e1e7763SThomas Graf 		 */
269*7e1e7763SThomas Graf 		synchronize_rcu();
270*7e1e7763SThomas Graf 
271*7e1e7763SThomas Graf 		/* For each bucket in the old table (each of which
272*7e1e7763SThomas Graf 		 * contains items from multiple buckets of the new
273*7e1e7763SThomas Graf 		 * table): ...
274*7e1e7763SThomas Graf 		 */
275*7e1e7763SThomas Graf 		complete = true;
276*7e1e7763SThomas Graf 		for (i = 0; i < old_tbl->size; i++) {
277*7e1e7763SThomas Graf 			hashtable_chain_unzip(ht, new_tbl, old_tbl, i);
278*7e1e7763SThomas Graf 			if (old_tbl->buckets[i] != NULL)
279*7e1e7763SThomas Graf 				complete = false;
280*7e1e7763SThomas Graf 		}
281*7e1e7763SThomas Graf 	} while (!complete);
282*7e1e7763SThomas Graf 
283*7e1e7763SThomas Graf 	bucket_table_free(old_tbl);
284*7e1e7763SThomas Graf 	return 0;
285*7e1e7763SThomas Graf }
286*7e1e7763SThomas Graf EXPORT_SYMBOL_GPL(rhashtable_expand);
287*7e1e7763SThomas Graf 
288*7e1e7763SThomas Graf /**
289*7e1e7763SThomas Graf  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
290*7e1e7763SThomas Graf  * @ht:		the hash table to shrink
291*7e1e7763SThomas Graf  * @flags:	allocation flags
292*7e1e7763SThomas Graf  *
293*7e1e7763SThomas Graf  * This function may only be called in a context where it is safe to call
294*7e1e7763SThomas Graf  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
295*7e1e7763SThomas Graf  *
296*7e1e7763SThomas Graf  * The caller must ensure that no concurrent table mutations take place.
297*7e1e7763SThomas Graf  * It is however valid to have concurrent lookups if they are RCU protected.
298*7e1e7763SThomas Graf  */
299*7e1e7763SThomas Graf int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
300*7e1e7763SThomas Graf {
301*7e1e7763SThomas Graf 	struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht);
302*7e1e7763SThomas Graf 	struct rhash_head __rcu **pprev;
303*7e1e7763SThomas Graf 	unsigned int i;
304*7e1e7763SThomas Graf 
305*7e1e7763SThomas Graf 	ASSERT_RHT_MUTEX(ht);
306*7e1e7763SThomas Graf 
307*7e1e7763SThomas Graf 	if (tbl->size <= HASH_MIN_SIZE)
308*7e1e7763SThomas Graf 		return 0;
309*7e1e7763SThomas Graf 
310*7e1e7763SThomas Graf 	ntbl = bucket_table_alloc(tbl->size / 2, flags);
311*7e1e7763SThomas Graf 	if (ntbl == NULL)
312*7e1e7763SThomas Graf 		return -ENOMEM;
313*7e1e7763SThomas Graf 
314*7e1e7763SThomas Graf 	ht->shift--;
315*7e1e7763SThomas Graf 
316*7e1e7763SThomas Graf 	/* Link each bucket in the new table to the first bucket
317*7e1e7763SThomas Graf 	 * in the old table that contains entries which will hash
318*7e1e7763SThomas Graf 	 * to the new bucket.
319*7e1e7763SThomas Graf 	 */
320*7e1e7763SThomas Graf 	for (i = 0; i < ntbl->size; i++) {
321*7e1e7763SThomas Graf 		ntbl->buckets[i] = tbl->buckets[i];
322*7e1e7763SThomas Graf 
323*7e1e7763SThomas Graf 		/* Link each bucket in the new table to the first bucket
324*7e1e7763SThomas Graf 		 * in the old table that contains entries which will hash
325*7e1e7763SThomas Graf 		 * to the new bucket.
326*7e1e7763SThomas Graf 		 */
327*7e1e7763SThomas Graf 		for (pprev = &ntbl->buckets[i]; *pprev != NULL;
328*7e1e7763SThomas Graf 		     pprev = &rht_dereference(*pprev, ht)->next)
329*7e1e7763SThomas Graf 			;
330*7e1e7763SThomas Graf 		RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
331*7e1e7763SThomas Graf 	}
332*7e1e7763SThomas Graf 
333*7e1e7763SThomas Graf 	/* Publish the new, valid hash table */
334*7e1e7763SThomas Graf 	rcu_assign_pointer(ht->tbl, ntbl);
335*7e1e7763SThomas Graf 
336*7e1e7763SThomas Graf 	/* Wait for readers. No new readers will have references to the
337*7e1e7763SThomas Graf 	 * old hash table.
338*7e1e7763SThomas Graf 	 */
339*7e1e7763SThomas Graf 	synchronize_rcu();
340*7e1e7763SThomas Graf 
341*7e1e7763SThomas Graf 	bucket_table_free(tbl);
342*7e1e7763SThomas Graf 
343*7e1e7763SThomas Graf 	return 0;
344*7e1e7763SThomas Graf }
345*7e1e7763SThomas Graf EXPORT_SYMBOL_GPL(rhashtable_shrink);
346*7e1e7763SThomas Graf 
347*7e1e7763SThomas Graf /**
348*7e1e7763SThomas Graf  * rhashtable_insert - insert object into hash hash table
349*7e1e7763SThomas Graf  * @ht:		hash table
350*7e1e7763SThomas Graf  * @obj:	pointer to hash head inside object
351*7e1e7763SThomas Graf  * @flags:	allocation flags (table expansion)
352*7e1e7763SThomas Graf  *
353*7e1e7763SThomas Graf  * Will automatically grow the table via rhashtable_expand() if the the
354*7e1e7763SThomas Graf  * grow_decision function specified at rhashtable_init() returns true.
355*7e1e7763SThomas Graf  *
356*7e1e7763SThomas Graf  * The caller must ensure that no concurrent table mutations occur. It is
357*7e1e7763SThomas Graf  * however valid to have concurrent lookups if they are RCU protected.
358*7e1e7763SThomas Graf  */
359*7e1e7763SThomas Graf void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
360*7e1e7763SThomas Graf 		       gfp_t flags)
361*7e1e7763SThomas Graf {
362*7e1e7763SThomas Graf 	struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
363*7e1e7763SThomas Graf 	u32 hash;
364*7e1e7763SThomas Graf 
365*7e1e7763SThomas Graf 	ASSERT_RHT_MUTEX(ht);
366*7e1e7763SThomas Graf 
367*7e1e7763SThomas Graf 	hash = head_hashfn(ht, obj, tbl->size);
368*7e1e7763SThomas Graf 	RCU_INIT_POINTER(obj->next, tbl->buckets[hash]);
369*7e1e7763SThomas Graf 	rcu_assign_pointer(tbl->buckets[hash], obj);
370*7e1e7763SThomas Graf 	ht->nelems++;
371*7e1e7763SThomas Graf 
372*7e1e7763SThomas Graf 	if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
373*7e1e7763SThomas Graf 		rhashtable_expand(ht, flags);
374*7e1e7763SThomas Graf }
375*7e1e7763SThomas Graf EXPORT_SYMBOL_GPL(rhashtable_insert);
376*7e1e7763SThomas Graf 
377*7e1e7763SThomas Graf /**
378*7e1e7763SThomas Graf  * rhashtable_remove_pprev - remove object from hash table given previous element
379*7e1e7763SThomas Graf  * @ht:		hash table
380*7e1e7763SThomas Graf  * @obj:	pointer to hash head inside object
381*7e1e7763SThomas Graf  * @pprev:	pointer to previous element
382*7e1e7763SThomas Graf  * @flags:	allocation flags (table expansion)
383*7e1e7763SThomas Graf  *
384*7e1e7763SThomas Graf  * Identical to rhashtable_remove() but caller is alreayd aware of the element
385*7e1e7763SThomas Graf  * in front of the element to be deleted. This is in particular useful for
386*7e1e7763SThomas Graf  * deletion when combined with walking or lookup.
387*7e1e7763SThomas Graf  */
388*7e1e7763SThomas Graf void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
389*7e1e7763SThomas Graf 			     struct rhash_head **pprev, gfp_t flags)
390*7e1e7763SThomas Graf {
391*7e1e7763SThomas Graf 	struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
392*7e1e7763SThomas Graf 
393*7e1e7763SThomas Graf 	ASSERT_RHT_MUTEX(ht);
394*7e1e7763SThomas Graf 
395*7e1e7763SThomas Graf 	RCU_INIT_POINTER(*pprev, obj->next);
396*7e1e7763SThomas Graf 	ht->nelems--;
397*7e1e7763SThomas Graf 
398*7e1e7763SThomas Graf 	if (ht->p.shrink_decision &&
399*7e1e7763SThomas Graf 	    ht->p.shrink_decision(ht, tbl->size))
400*7e1e7763SThomas Graf 		rhashtable_shrink(ht, flags);
401*7e1e7763SThomas Graf }
402*7e1e7763SThomas Graf EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
403*7e1e7763SThomas Graf 
404*7e1e7763SThomas Graf /**
405*7e1e7763SThomas Graf  * rhashtable_remove - remove object from hash table
406*7e1e7763SThomas Graf  * @ht:		hash table
407*7e1e7763SThomas Graf  * @obj:	pointer to hash head inside object
408*7e1e7763SThomas Graf  * @flags:	allocation flags (table expansion)
409*7e1e7763SThomas Graf  *
410*7e1e7763SThomas Graf  * Since the hash chain is single linked, the removal operation needs to
411*7e1e7763SThomas Graf  * walk the bucket chain upon removal. The removal operation is thus
412*7e1e7763SThomas Graf  * considerable slow if the hash table is not correctly sized.
413*7e1e7763SThomas Graf  *
414*7e1e7763SThomas Graf  * Will automatically shrink the table via rhashtable_expand() if the the
415*7e1e7763SThomas Graf  * shrink_decision function specified at rhashtable_init() returns true.
416*7e1e7763SThomas Graf  *
417*7e1e7763SThomas Graf  * The caller must ensure that no concurrent table mutations occur. It is
418*7e1e7763SThomas Graf  * however valid to have concurrent lookups if they are RCU protected.
419*7e1e7763SThomas Graf  */
420*7e1e7763SThomas Graf bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj,
421*7e1e7763SThomas Graf 		       gfp_t flags)
422*7e1e7763SThomas Graf {
423*7e1e7763SThomas Graf 	struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
424*7e1e7763SThomas Graf 	struct rhash_head __rcu **pprev;
425*7e1e7763SThomas Graf 	struct rhash_head *he;
426*7e1e7763SThomas Graf 	u32 h;
427*7e1e7763SThomas Graf 
428*7e1e7763SThomas Graf 	ASSERT_RHT_MUTEX(ht);
429*7e1e7763SThomas Graf 
430*7e1e7763SThomas Graf 	h = head_hashfn(ht, obj, tbl->size);
431*7e1e7763SThomas Graf 
432*7e1e7763SThomas Graf 	pprev = &tbl->buckets[h];
433*7e1e7763SThomas Graf 	rht_for_each(he, tbl->buckets[h], ht) {
434*7e1e7763SThomas Graf 		if (he != obj) {
435*7e1e7763SThomas Graf 			pprev = &he->next;
436*7e1e7763SThomas Graf 			continue;
437*7e1e7763SThomas Graf 		}
438*7e1e7763SThomas Graf 
439*7e1e7763SThomas Graf 		rhashtable_remove_pprev(ht, he, pprev, flags);
440*7e1e7763SThomas Graf 		return true;
441*7e1e7763SThomas Graf 	}
442*7e1e7763SThomas Graf 
443*7e1e7763SThomas Graf 	return false;
444*7e1e7763SThomas Graf }
445*7e1e7763SThomas Graf EXPORT_SYMBOL_GPL(rhashtable_remove);
446*7e1e7763SThomas Graf 
447*7e1e7763SThomas Graf /**
448*7e1e7763SThomas Graf  * rhashtable_lookup - lookup key in hash table
449*7e1e7763SThomas Graf  * @ht:		hash table
450*7e1e7763SThomas Graf  * @key:	pointer to key
451*7e1e7763SThomas Graf  *
452*7e1e7763SThomas Graf  * Computes the hash value for the key and traverses the bucket chain looking
453*7e1e7763SThomas Graf  * for a entry with an identical key. The first matching entry is returned.
454*7e1e7763SThomas Graf  *
455*7e1e7763SThomas Graf  * This lookup function may only be used for fixed key hash table (key_len
456*7e1e7763SThomas Graf  * paramter set). It will BUG() if used inappropriately.
457*7e1e7763SThomas Graf  *
458*7e1e7763SThomas Graf  * Lookups may occur in parallel with hash mutations as long as the lookup is
459*7e1e7763SThomas Graf  * guarded by rcu_read_lock(). The caller must take care of this.
460*7e1e7763SThomas Graf  */
461*7e1e7763SThomas Graf void *rhashtable_lookup(const struct rhashtable *ht, const void *key)
462*7e1e7763SThomas Graf {
463*7e1e7763SThomas Graf 	const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
464*7e1e7763SThomas Graf 	struct rhash_head *he;
465*7e1e7763SThomas Graf 	u32 h;
466*7e1e7763SThomas Graf 
467*7e1e7763SThomas Graf 	BUG_ON(!ht->p.key_len);
468*7e1e7763SThomas Graf 
469*7e1e7763SThomas Graf 	h = __hashfn(ht, key, ht->p.key_len, tbl->size);
470*7e1e7763SThomas Graf 	rht_for_each_rcu(he, tbl->buckets[h], ht) {
471*7e1e7763SThomas Graf 		if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key,
472*7e1e7763SThomas Graf 			   ht->p.key_len))
473*7e1e7763SThomas Graf 			continue;
474*7e1e7763SThomas Graf 		return (void *) he - ht->p.head_offset;
475*7e1e7763SThomas Graf 	}
476*7e1e7763SThomas Graf 
477*7e1e7763SThomas Graf 	return NULL;
478*7e1e7763SThomas Graf }
479*7e1e7763SThomas Graf EXPORT_SYMBOL_GPL(rhashtable_lookup);
480*7e1e7763SThomas Graf 
481*7e1e7763SThomas Graf /**
482*7e1e7763SThomas Graf  * rhashtable_lookup_compare - search hash table with compare function
483*7e1e7763SThomas Graf  * @ht:		hash table
484*7e1e7763SThomas Graf  * @hash:	hash value of desired entry
485*7e1e7763SThomas Graf  * @compare:	compare function, must return true on match
486*7e1e7763SThomas Graf  * @arg:	argument passed on to compare function
487*7e1e7763SThomas Graf  *
488*7e1e7763SThomas Graf  * Traverses the bucket chain behind the provided hash value and calls the
489*7e1e7763SThomas Graf  * specified compare function for each entry.
490*7e1e7763SThomas Graf  *
491*7e1e7763SThomas Graf  * Lookups may occur in parallel with hash mutations as long as the lookup is
492*7e1e7763SThomas Graf  * guarded by rcu_read_lock(). The caller must take care of this.
493*7e1e7763SThomas Graf  *
494*7e1e7763SThomas Graf  * Returns the first entry on which the compare function returned true.
495*7e1e7763SThomas Graf  */
496*7e1e7763SThomas Graf void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
497*7e1e7763SThomas Graf 				bool (*compare)(void *, void *), void *arg)
498*7e1e7763SThomas Graf {
499*7e1e7763SThomas Graf 	const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
500*7e1e7763SThomas Graf 	struct rhash_head *he;
501*7e1e7763SThomas Graf 
502*7e1e7763SThomas Graf 	if (unlikely(hash >= tbl->size))
503*7e1e7763SThomas Graf 		return NULL;
504*7e1e7763SThomas Graf 
505*7e1e7763SThomas Graf 	rht_for_each_rcu(he, tbl->buckets[hash], ht) {
506*7e1e7763SThomas Graf 		if (!compare(rht_obj(ht, he), arg))
507*7e1e7763SThomas Graf 			continue;
508*7e1e7763SThomas Graf 		return (void *) he - ht->p.head_offset;
509*7e1e7763SThomas Graf 	}
510*7e1e7763SThomas Graf 
511*7e1e7763SThomas Graf 	return NULL;
512*7e1e7763SThomas Graf }
513*7e1e7763SThomas Graf EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
514*7e1e7763SThomas Graf 
515*7e1e7763SThomas Graf static size_t rounded_hashtable_size(unsigned int nelem)
516*7e1e7763SThomas Graf {
517*7e1e7763SThomas Graf 	return max(roundup_pow_of_two(nelem * 4 / 3), HASH_MIN_SIZE);
518*7e1e7763SThomas Graf }
519*7e1e7763SThomas Graf 
520*7e1e7763SThomas Graf /**
521*7e1e7763SThomas Graf  * rhashtable_init - initialize a new hash table
522*7e1e7763SThomas Graf  * @ht:		hash table to be initialized
523*7e1e7763SThomas Graf  * @params:	configuration parameters
524*7e1e7763SThomas Graf  *
525*7e1e7763SThomas Graf  * Initializes a new hash table based on the provided configuration
526*7e1e7763SThomas Graf  * parameters. A table can be configured either with a variable or
527*7e1e7763SThomas Graf  * fixed length key:
528*7e1e7763SThomas Graf  *
529*7e1e7763SThomas Graf  * Configuration Example 1: Fixed length keys
530*7e1e7763SThomas Graf  * struct test_obj {
531*7e1e7763SThomas Graf  *	int			key;
532*7e1e7763SThomas Graf  *	void *			my_member;
533*7e1e7763SThomas Graf  *	struct rhash_head	node;
534*7e1e7763SThomas Graf  * };
535*7e1e7763SThomas Graf  *
536*7e1e7763SThomas Graf  * struct rhashtable_params params = {
537*7e1e7763SThomas Graf  *	.head_offset = offsetof(struct test_obj, node),
538*7e1e7763SThomas Graf  *	.key_offset = offsetof(struct test_obj, key),
539*7e1e7763SThomas Graf  *	.key_len = sizeof(int),
540*7e1e7763SThomas Graf  *	.hashfn = arch_fast_hash,
541*7e1e7763SThomas Graf  *	.mutex_is_held = &my_mutex_is_held,
542*7e1e7763SThomas Graf  * };
543*7e1e7763SThomas Graf  *
544*7e1e7763SThomas Graf  * Configuration Example 2: Variable length keys
545*7e1e7763SThomas Graf  * struct test_obj {
546*7e1e7763SThomas Graf  *	[...]
547*7e1e7763SThomas Graf  *	struct rhash_head	node;
548*7e1e7763SThomas Graf  * };
549*7e1e7763SThomas Graf  *
550*7e1e7763SThomas Graf  * u32 my_hash_fn(const void *data, u32 seed)
551*7e1e7763SThomas Graf  * {
552*7e1e7763SThomas Graf  *	struct test_obj *obj = data;
553*7e1e7763SThomas Graf  *
554*7e1e7763SThomas Graf  *	return [... hash ...];
555*7e1e7763SThomas Graf  * }
556*7e1e7763SThomas Graf  *
557*7e1e7763SThomas Graf  * struct rhashtable_params params = {
558*7e1e7763SThomas Graf  *	.head_offset = offsetof(struct test_obj, node),
559*7e1e7763SThomas Graf  *	.hashfn = arch_fast_hash,
560*7e1e7763SThomas Graf  *	.obj_hashfn = my_hash_fn,
561*7e1e7763SThomas Graf  *	.mutex_is_held = &my_mutex_is_held,
562*7e1e7763SThomas Graf  * };
563*7e1e7763SThomas Graf  */
564*7e1e7763SThomas Graf int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
565*7e1e7763SThomas Graf {
566*7e1e7763SThomas Graf 	struct bucket_table *tbl;
567*7e1e7763SThomas Graf 	size_t size;
568*7e1e7763SThomas Graf 
569*7e1e7763SThomas Graf 	size = HASH_DEFAULT_SIZE;
570*7e1e7763SThomas Graf 
571*7e1e7763SThomas Graf 	if ((params->key_len && !params->hashfn) ||
572*7e1e7763SThomas Graf 	    (!params->key_len && !params->obj_hashfn))
573*7e1e7763SThomas Graf 		return -EINVAL;
574*7e1e7763SThomas Graf 
575*7e1e7763SThomas Graf 	if (params->nelem_hint)
576*7e1e7763SThomas Graf 		size = rounded_hashtable_size(params->nelem_hint);
577*7e1e7763SThomas Graf 
578*7e1e7763SThomas Graf 	tbl = bucket_table_alloc(size, GFP_KERNEL);
579*7e1e7763SThomas Graf 	if (tbl == NULL)
580*7e1e7763SThomas Graf 		return -ENOMEM;
581*7e1e7763SThomas Graf 
582*7e1e7763SThomas Graf 	memset(ht, 0, sizeof(*ht));
583*7e1e7763SThomas Graf 	ht->shift = ilog2(tbl->size);
584*7e1e7763SThomas Graf 	memcpy(&ht->p, params, sizeof(*params));
585*7e1e7763SThomas Graf 	RCU_INIT_POINTER(ht->tbl, tbl);
586*7e1e7763SThomas Graf 
587*7e1e7763SThomas Graf 	if (!ht->p.hash_rnd)
588*7e1e7763SThomas Graf 		get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
589*7e1e7763SThomas Graf 
590*7e1e7763SThomas Graf 	return 0;
591*7e1e7763SThomas Graf }
592*7e1e7763SThomas Graf EXPORT_SYMBOL_GPL(rhashtable_init);
593*7e1e7763SThomas Graf 
594*7e1e7763SThomas Graf /**
595*7e1e7763SThomas Graf  * rhashtable_destroy - destroy hash table
596*7e1e7763SThomas Graf  * @ht:		the hash table to destroy
597*7e1e7763SThomas Graf  *
598*7e1e7763SThomas Graf  * Frees the bucket array.
599*7e1e7763SThomas Graf  */
600*7e1e7763SThomas Graf void rhashtable_destroy(const struct rhashtable *ht)
601*7e1e7763SThomas Graf {
602*7e1e7763SThomas Graf 	const struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
603*7e1e7763SThomas Graf 
604*7e1e7763SThomas Graf 	bucket_table_free(tbl);
605*7e1e7763SThomas Graf }
606*7e1e7763SThomas Graf EXPORT_SYMBOL_GPL(rhashtable_destroy);
607*7e1e7763SThomas Graf 
608*7e1e7763SThomas Graf /**************************************************************************
609*7e1e7763SThomas Graf  * Self Test
610*7e1e7763SThomas Graf  **************************************************************************/
611*7e1e7763SThomas Graf 
612*7e1e7763SThomas Graf #ifdef CONFIG_TEST_RHASHTABLE
613*7e1e7763SThomas Graf 
614*7e1e7763SThomas Graf #define TEST_HT_SIZE	8
615*7e1e7763SThomas Graf #define TEST_ENTRIES	2048
616*7e1e7763SThomas Graf #define TEST_PTR	((void *) 0xdeadbeef)
617*7e1e7763SThomas Graf #define TEST_NEXPANDS	4
618*7e1e7763SThomas Graf 
619*7e1e7763SThomas Graf static int test_mutex_is_held(void)
620*7e1e7763SThomas Graf {
621*7e1e7763SThomas Graf 	return 1;
622*7e1e7763SThomas Graf }
623*7e1e7763SThomas Graf 
624*7e1e7763SThomas Graf struct test_obj {
625*7e1e7763SThomas Graf 	void			*ptr;
626*7e1e7763SThomas Graf 	int			value;
627*7e1e7763SThomas Graf 	struct rhash_head	node;
628*7e1e7763SThomas Graf };
629*7e1e7763SThomas Graf 
630*7e1e7763SThomas Graf static int __init test_rht_lookup(struct rhashtable *ht)
631*7e1e7763SThomas Graf {
632*7e1e7763SThomas Graf 	unsigned int i;
633*7e1e7763SThomas Graf 
634*7e1e7763SThomas Graf 	for (i = 0; i < TEST_ENTRIES * 2; i++) {
635*7e1e7763SThomas Graf 		struct test_obj *obj;
636*7e1e7763SThomas Graf 		bool expected = !(i % 2);
637*7e1e7763SThomas Graf 		u32 key = i;
638*7e1e7763SThomas Graf 
639*7e1e7763SThomas Graf 		obj = rhashtable_lookup(ht, &key);
640*7e1e7763SThomas Graf 
641*7e1e7763SThomas Graf 		if (expected && !obj) {
642*7e1e7763SThomas Graf 			pr_warn("Test failed: Could not find key %u\n", key);
643*7e1e7763SThomas Graf 			return -ENOENT;
644*7e1e7763SThomas Graf 		} else if (!expected && obj) {
645*7e1e7763SThomas Graf 			pr_warn("Test failed: Unexpected entry found for key %u\n",
646*7e1e7763SThomas Graf 				key);
647*7e1e7763SThomas Graf 			return -EEXIST;
648*7e1e7763SThomas Graf 		} else if (expected && obj) {
649*7e1e7763SThomas Graf 			if (obj->ptr != TEST_PTR || obj->value != i) {
650*7e1e7763SThomas Graf 				pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
651*7e1e7763SThomas Graf 					obj->ptr, TEST_PTR, obj->value, i);
652*7e1e7763SThomas Graf 				return -EINVAL;
653*7e1e7763SThomas Graf 			}
654*7e1e7763SThomas Graf 		}
655*7e1e7763SThomas Graf 	}
656*7e1e7763SThomas Graf 
657*7e1e7763SThomas Graf 	return 0;
658*7e1e7763SThomas Graf }
659*7e1e7763SThomas Graf 
660*7e1e7763SThomas Graf static void test_bucket_stats(struct rhashtable *ht,
661*7e1e7763SThomas Graf 				     struct bucket_table *tbl,
662*7e1e7763SThomas Graf 				     bool quiet)
663*7e1e7763SThomas Graf {
664*7e1e7763SThomas Graf 	unsigned int cnt, i, total = 0;
665*7e1e7763SThomas Graf 	struct test_obj *obj;
666*7e1e7763SThomas Graf 
667*7e1e7763SThomas Graf 	for (i = 0; i < tbl->size; i++) {
668*7e1e7763SThomas Graf 		cnt = 0;
669*7e1e7763SThomas Graf 
670*7e1e7763SThomas Graf 		if (!quiet)
671*7e1e7763SThomas Graf 			pr_info(" [%#4x/%zu]", i, tbl->size);
672*7e1e7763SThomas Graf 
673*7e1e7763SThomas Graf 		rht_for_each_entry_rcu(obj, tbl->buckets[i], node) {
674*7e1e7763SThomas Graf 			cnt++;
675*7e1e7763SThomas Graf 			total++;
676*7e1e7763SThomas Graf 			if (!quiet)
677*7e1e7763SThomas Graf 				pr_cont(" [%p],", obj);
678*7e1e7763SThomas Graf 		}
679*7e1e7763SThomas Graf 
680*7e1e7763SThomas Graf 		if (!quiet)
681*7e1e7763SThomas Graf 			pr_cont("\n  [%#x] first element: %p, chain length: %u\n",
682*7e1e7763SThomas Graf 				i, tbl->buckets[i], cnt);
683*7e1e7763SThomas Graf 	}
684*7e1e7763SThomas Graf 
685*7e1e7763SThomas Graf 	pr_info("  Traversal complete: counted=%u, nelems=%zu, entries=%d\n",
686*7e1e7763SThomas Graf 		total, ht->nelems, TEST_ENTRIES);
687*7e1e7763SThomas Graf }
688*7e1e7763SThomas Graf 
689*7e1e7763SThomas Graf static int __init test_rhashtable(struct rhashtable *ht)
690*7e1e7763SThomas Graf {
691*7e1e7763SThomas Graf 	struct bucket_table *tbl;
692*7e1e7763SThomas Graf 	struct test_obj *obj, *next;
693*7e1e7763SThomas Graf 	int err;
694*7e1e7763SThomas Graf 	unsigned int i;
695*7e1e7763SThomas Graf 
696*7e1e7763SThomas Graf 	/*
697*7e1e7763SThomas Graf 	 * Insertion Test:
698*7e1e7763SThomas Graf 	 * Insert TEST_ENTRIES into table with all keys even numbers
699*7e1e7763SThomas Graf 	 */
700*7e1e7763SThomas Graf 	pr_info("  Adding %d keys\n", TEST_ENTRIES);
701*7e1e7763SThomas Graf 	for (i = 0; i < TEST_ENTRIES; i++) {
702*7e1e7763SThomas Graf 		struct test_obj *obj;
703*7e1e7763SThomas Graf 
704*7e1e7763SThomas Graf 		obj = kzalloc(sizeof(*obj), GFP_KERNEL);
705*7e1e7763SThomas Graf 		if (!obj) {
706*7e1e7763SThomas Graf 			err = -ENOMEM;
707*7e1e7763SThomas Graf 			goto error;
708*7e1e7763SThomas Graf 		}
709*7e1e7763SThomas Graf 
710*7e1e7763SThomas Graf 		obj->ptr = TEST_PTR;
711*7e1e7763SThomas Graf 		obj->value = i * 2;
712*7e1e7763SThomas Graf 
713*7e1e7763SThomas Graf 		rhashtable_insert(ht, &obj->node, GFP_KERNEL);
714*7e1e7763SThomas Graf 	}
715*7e1e7763SThomas Graf 
716*7e1e7763SThomas Graf 	rcu_read_lock();
717*7e1e7763SThomas Graf 	tbl = rht_dereference_rcu(ht->tbl, ht);
718*7e1e7763SThomas Graf 	test_bucket_stats(ht, tbl, true);
719*7e1e7763SThomas Graf 	test_rht_lookup(ht);
720*7e1e7763SThomas Graf 	rcu_read_unlock();
721*7e1e7763SThomas Graf 
722*7e1e7763SThomas Graf 	for (i = 0; i < TEST_NEXPANDS; i++) {
723*7e1e7763SThomas Graf 		pr_info("  Table expansion iteration %u...\n", i);
724*7e1e7763SThomas Graf 		rhashtable_expand(ht, GFP_KERNEL);
725*7e1e7763SThomas Graf 
726*7e1e7763SThomas Graf 		rcu_read_lock();
727*7e1e7763SThomas Graf 		pr_info("  Verifying lookups...\n");
728*7e1e7763SThomas Graf 		test_rht_lookup(ht);
729*7e1e7763SThomas Graf 		rcu_read_unlock();
730*7e1e7763SThomas Graf 	}
731*7e1e7763SThomas Graf 
732*7e1e7763SThomas Graf 	for (i = 0; i < TEST_NEXPANDS; i++) {
733*7e1e7763SThomas Graf 		pr_info("  Table shrinkage iteration %u...\n", i);
734*7e1e7763SThomas Graf 		rhashtable_shrink(ht, GFP_KERNEL);
735*7e1e7763SThomas Graf 
736*7e1e7763SThomas Graf 		rcu_read_lock();
737*7e1e7763SThomas Graf 		pr_info("  Verifying lookups...\n");
738*7e1e7763SThomas Graf 		test_rht_lookup(ht);
739*7e1e7763SThomas Graf 		rcu_read_unlock();
740*7e1e7763SThomas Graf 	}
741*7e1e7763SThomas Graf 
742*7e1e7763SThomas Graf 	pr_info("  Deleting %d keys\n", TEST_ENTRIES);
743*7e1e7763SThomas Graf 	for (i = 0; i < TEST_ENTRIES; i++) {
744*7e1e7763SThomas Graf 		u32 key = i * 2;
745*7e1e7763SThomas Graf 
746*7e1e7763SThomas Graf 		obj = rhashtable_lookup(ht, &key);
747*7e1e7763SThomas Graf 		BUG_ON(!obj);
748*7e1e7763SThomas Graf 
749*7e1e7763SThomas Graf 		rhashtable_remove(ht, &obj->node, GFP_KERNEL);
750*7e1e7763SThomas Graf 		kfree(obj);
751*7e1e7763SThomas Graf 	}
752*7e1e7763SThomas Graf 
753*7e1e7763SThomas Graf 	return 0;
754*7e1e7763SThomas Graf 
755*7e1e7763SThomas Graf error:
756*7e1e7763SThomas Graf 	tbl = rht_dereference_rcu(ht->tbl, ht);
757*7e1e7763SThomas Graf 	for (i = 0; i < tbl->size; i++)
758*7e1e7763SThomas Graf 		rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node)
759*7e1e7763SThomas Graf 			kfree(obj);
760*7e1e7763SThomas Graf 
761*7e1e7763SThomas Graf 	return err;
762*7e1e7763SThomas Graf }
763*7e1e7763SThomas Graf 
764*7e1e7763SThomas Graf static int __init test_rht_init(void)
765*7e1e7763SThomas Graf {
766*7e1e7763SThomas Graf 	struct rhashtable ht;
767*7e1e7763SThomas Graf 	struct rhashtable_params params = {
768*7e1e7763SThomas Graf 		.nelem_hint = TEST_HT_SIZE,
769*7e1e7763SThomas Graf 		.head_offset = offsetof(struct test_obj, node),
770*7e1e7763SThomas Graf 		.key_offset = offsetof(struct test_obj, value),
771*7e1e7763SThomas Graf 		.key_len = sizeof(int),
772*7e1e7763SThomas Graf 		.hashfn = arch_fast_hash,
773*7e1e7763SThomas Graf 		.mutex_is_held = &test_mutex_is_held,
774*7e1e7763SThomas Graf 		.grow_decision = rht_grow_above_75,
775*7e1e7763SThomas Graf 		.shrink_decision = rht_shrink_below_30,
776*7e1e7763SThomas Graf 	};
777*7e1e7763SThomas Graf 	int err;
778*7e1e7763SThomas Graf 
779*7e1e7763SThomas Graf 	pr_info("Running resizable hashtable tests...\n");
780*7e1e7763SThomas Graf 
781*7e1e7763SThomas Graf 	err = rhashtable_init(&ht, &params);
782*7e1e7763SThomas Graf 	if (err < 0) {
783*7e1e7763SThomas Graf 		pr_warn("Test failed: Unable to initialize hashtable: %d\n",
784*7e1e7763SThomas Graf 			err);
785*7e1e7763SThomas Graf 		return err;
786*7e1e7763SThomas Graf 	}
787*7e1e7763SThomas Graf 
788*7e1e7763SThomas Graf 	err = test_rhashtable(&ht);
789*7e1e7763SThomas Graf 
790*7e1e7763SThomas Graf 	rhashtable_destroy(&ht);
791*7e1e7763SThomas Graf 
792*7e1e7763SThomas Graf 	return err;
793*7e1e7763SThomas Graf }
794*7e1e7763SThomas Graf 
795*7e1e7763SThomas Graf subsys_initcall(test_rht_init);
796*7e1e7763SThomas Graf 
797*7e1e7763SThomas Graf #endif /* CONFIG_TEST_RHASHTABLE */
798