xref: /linux/lib/rhashtable.c (revision 2e06285655b59362847b610a7cfad204fee9640b)
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7  *
8  * Code partially derived from nft_hash
9  * Rewritten with rehash code from br_multicast plus single list
10  * pointer as suggested by Josh Triplett
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16 
17 #include <linux/atomic.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/log2.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/mm.h>
25 #include <linux/jhash.h>
26 #include <linux/random.h>
27 #include <linux/rhashtable.h>
28 #include <linux/err.h>
29 #include <linux/export.h>
30 
31 #define HASH_DEFAULT_SIZE	64UL
32 #define HASH_MIN_SIZE		4U
33 #define BUCKET_LOCKS_PER_CPU	32UL
34 
35 static u32 head_hashfn(struct rhashtable *ht,
36 		       const struct bucket_table *tbl,
37 		       const struct rhash_head *he)
38 {
39 	return rht_head_hashfn(ht, tbl, he, ht->p);
40 }
41 
42 #ifdef CONFIG_PROVE_LOCKING
43 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
44 
45 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
46 {
47 	return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
48 }
49 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
50 
51 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
52 {
53 	spinlock_t *lock = rht_bucket_lock(tbl, hash);
54 
55 	return (debug_locks) ? lockdep_is_held(lock) : 1;
56 }
57 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
58 #else
59 #define ASSERT_RHT_MUTEX(HT)
60 #endif
61 
62 
63 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
64 			      gfp_t gfp)
65 {
66 	unsigned int i, size;
67 #if defined(CONFIG_PROVE_LOCKING)
68 	unsigned int nr_pcpus = 2;
69 #else
70 	unsigned int nr_pcpus = num_possible_cpus();
71 #endif
72 
73 	nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
74 	size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
75 
76 	/* Never allocate more than 0.5 locks per bucket */
77 	size = min_t(unsigned int, size, tbl->size >> 1);
78 
79 	if (sizeof(spinlock_t) != 0) {
80 #ifdef CONFIG_NUMA
81 		if (size * sizeof(spinlock_t) > PAGE_SIZE &&
82 		    gfp == GFP_KERNEL)
83 			tbl->locks = vmalloc(size * sizeof(spinlock_t));
84 		else
85 #endif
86 		if (gfp != GFP_KERNEL)
87 			gfp |= __GFP_NOWARN | __GFP_NORETRY;
88 
89 		tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
90 					   gfp);
91 		if (!tbl->locks)
92 			return -ENOMEM;
93 		for (i = 0; i < size; i++)
94 			spin_lock_init(&tbl->locks[i]);
95 	}
96 	tbl->locks_mask = size - 1;
97 
98 	return 0;
99 }
100 
101 static void bucket_table_free(const struct bucket_table *tbl)
102 {
103 	if (tbl)
104 		kvfree(tbl->locks);
105 
106 	kvfree(tbl);
107 }
108 
109 static void bucket_table_free_rcu(struct rcu_head *head)
110 {
111 	bucket_table_free(container_of(head, struct bucket_table, rcu));
112 }
113 
114 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
115 					       size_t nbuckets,
116 					       gfp_t gfp)
117 {
118 	struct bucket_table *tbl = NULL;
119 	size_t size;
120 	int i;
121 
122 	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
123 	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
124 	    gfp != GFP_KERNEL)
125 		tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
126 	if (tbl == NULL && gfp == GFP_KERNEL)
127 		tbl = vzalloc(size);
128 	if (tbl == NULL)
129 		return NULL;
130 
131 	tbl->size = nbuckets;
132 
133 	if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
134 		bucket_table_free(tbl);
135 		return NULL;
136 	}
137 
138 	INIT_LIST_HEAD(&tbl->walkers);
139 
140 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
141 
142 	for (i = 0; i < nbuckets; i++)
143 		INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
144 
145 	return tbl;
146 }
147 
148 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
149 						  struct bucket_table *tbl)
150 {
151 	struct bucket_table *new_tbl;
152 
153 	do {
154 		new_tbl = tbl;
155 		tbl = rht_dereference_rcu(tbl->future_tbl, ht);
156 	} while (tbl);
157 
158 	return new_tbl;
159 }
160 
161 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
162 {
163 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
164 	struct bucket_table *new_tbl = rhashtable_last_table(ht,
165 		rht_dereference_rcu(old_tbl->future_tbl, ht));
166 	struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
167 	int err = -ENOENT;
168 	struct rhash_head *head, *next, *entry;
169 	spinlock_t *new_bucket_lock;
170 	unsigned int new_hash;
171 
172 	rht_for_each(entry, old_tbl, old_hash) {
173 		err = 0;
174 		next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
175 
176 		if (rht_is_a_nulls(next))
177 			break;
178 
179 		pprev = &entry->next;
180 	}
181 
182 	if (err)
183 		goto out;
184 
185 	new_hash = head_hashfn(ht, new_tbl, entry);
186 
187 	new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
188 
189 	spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
190 	head = rht_dereference_bucket(new_tbl->buckets[new_hash],
191 				      new_tbl, new_hash);
192 
193 	RCU_INIT_POINTER(entry->next, head);
194 
195 	rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
196 	spin_unlock(new_bucket_lock);
197 
198 	rcu_assign_pointer(*pprev, next);
199 
200 out:
201 	return err;
202 }
203 
204 static void rhashtable_rehash_chain(struct rhashtable *ht,
205 				    unsigned int old_hash)
206 {
207 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
208 	spinlock_t *old_bucket_lock;
209 
210 	old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
211 
212 	spin_lock_bh(old_bucket_lock);
213 	while (!rhashtable_rehash_one(ht, old_hash))
214 		;
215 	old_tbl->rehash++;
216 	spin_unlock_bh(old_bucket_lock);
217 }
218 
219 static int rhashtable_rehash_attach(struct rhashtable *ht,
220 				    struct bucket_table *old_tbl,
221 				    struct bucket_table *new_tbl)
222 {
223 	/* Protect future_tbl using the first bucket lock. */
224 	spin_lock_bh(old_tbl->locks);
225 
226 	/* Did somebody beat us to it? */
227 	if (rcu_access_pointer(old_tbl->future_tbl)) {
228 		spin_unlock_bh(old_tbl->locks);
229 		return -EEXIST;
230 	}
231 
232 	/* Make insertions go into the new, empty table right away. Deletions
233 	 * and lookups will be attempted in both tables until we synchronize.
234 	 */
235 	rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
236 
237 	spin_unlock_bh(old_tbl->locks);
238 
239 	return 0;
240 }
241 
242 static int rhashtable_rehash_table(struct rhashtable *ht)
243 {
244 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
245 	struct bucket_table *new_tbl;
246 	struct rhashtable_walker *walker;
247 	unsigned int old_hash;
248 
249 	new_tbl = rht_dereference(old_tbl->future_tbl, ht);
250 	if (!new_tbl)
251 		return 0;
252 
253 	for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
254 		rhashtable_rehash_chain(ht, old_hash);
255 
256 	/* Publish the new table pointer. */
257 	rcu_assign_pointer(ht->tbl, new_tbl);
258 
259 	spin_lock(&ht->lock);
260 	list_for_each_entry(walker, &old_tbl->walkers, list)
261 		walker->tbl = NULL;
262 	spin_unlock(&ht->lock);
263 
264 	/* Wait for readers. All new readers will see the new
265 	 * table, and thus no references to the old table will
266 	 * remain.
267 	 */
268 	call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
269 
270 	return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
271 }
272 
273 /**
274  * rhashtable_expand - Expand hash table while allowing concurrent lookups
275  * @ht:		the hash table to expand
276  *
277  * A secondary bucket array is allocated and the hash entries are migrated.
278  *
279  * This function may only be called in a context where it is safe to call
280  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
281  *
282  * The caller must ensure that no concurrent resizing occurs by holding
283  * ht->mutex.
284  *
285  * It is valid to have concurrent insertions and deletions protected by per
286  * bucket locks or concurrent RCU protected lookups and traversals.
287  */
288 static int rhashtable_expand(struct rhashtable *ht)
289 {
290 	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
291 	int err;
292 
293 	ASSERT_RHT_MUTEX(ht);
294 
295 	old_tbl = rhashtable_last_table(ht, old_tbl);
296 
297 	new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
298 	if (new_tbl == NULL)
299 		return -ENOMEM;
300 
301 	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
302 	if (err)
303 		bucket_table_free(new_tbl);
304 
305 	return err;
306 }
307 
308 /**
309  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
310  * @ht:		the hash table to shrink
311  *
312  * This function shrinks the hash table to fit, i.e., the smallest
313  * size would not cause it to expand right away automatically.
314  *
315  * The caller must ensure that no concurrent resizing occurs by holding
316  * ht->mutex.
317  *
318  * The caller must ensure that no concurrent table mutations take place.
319  * It is however valid to have concurrent lookups if they are RCU protected.
320  *
321  * It is valid to have concurrent insertions and deletions protected by per
322  * bucket locks or concurrent RCU protected lookups and traversals.
323  */
324 static int rhashtable_shrink(struct rhashtable *ht)
325 {
326 	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
327 	unsigned int nelems = atomic_read(&ht->nelems);
328 	unsigned int size = 0;
329 	int err;
330 
331 	ASSERT_RHT_MUTEX(ht);
332 
333 	if (nelems)
334 		size = roundup_pow_of_two(nelems * 3 / 2);
335 	if (size < ht->p.min_size)
336 		size = ht->p.min_size;
337 
338 	if (old_tbl->size <= size)
339 		return 0;
340 
341 	if (rht_dereference(old_tbl->future_tbl, ht))
342 		return -EEXIST;
343 
344 	new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
345 	if (new_tbl == NULL)
346 		return -ENOMEM;
347 
348 	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
349 	if (err)
350 		bucket_table_free(new_tbl);
351 
352 	return err;
353 }
354 
355 static void rht_deferred_worker(struct work_struct *work)
356 {
357 	struct rhashtable *ht;
358 	struct bucket_table *tbl;
359 	int err = 0;
360 
361 	ht = container_of(work, struct rhashtable, run_work);
362 	mutex_lock(&ht->mutex);
363 
364 	tbl = rht_dereference(ht->tbl, ht);
365 	tbl = rhashtable_last_table(ht, tbl);
366 
367 	if (rht_grow_above_75(ht, tbl))
368 		rhashtable_expand(ht);
369 	else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
370 		rhashtable_shrink(ht);
371 
372 	err = rhashtable_rehash_table(ht);
373 
374 	mutex_unlock(&ht->mutex);
375 
376 	if (err)
377 		schedule_work(&ht->run_work);
378 }
379 
380 static bool rhashtable_check_elasticity(struct rhashtable *ht,
381 					struct bucket_table *tbl,
382 					unsigned int hash)
383 {
384 	unsigned int elasticity = ht->elasticity;
385 	struct rhash_head *head;
386 
387 	rht_for_each(head, tbl, hash)
388 		if (!--elasticity)
389 			return true;
390 
391 	return false;
392 }
393 
394 int rhashtable_insert_rehash(struct rhashtable *ht,
395 			     struct bucket_table *tbl)
396 {
397 	struct bucket_table *old_tbl;
398 	struct bucket_table *new_tbl;
399 	unsigned int size;
400 	int err;
401 
402 	old_tbl = rht_dereference_rcu(ht->tbl, ht);
403 
404 	size = tbl->size;
405 
406 	err = -EBUSY;
407 
408 	if (rht_grow_above_75(ht, tbl))
409 		size *= 2;
410 	/* Do not schedule more than one rehash */
411 	else if (old_tbl != tbl)
412 		goto fail;
413 
414 	err = -ENOMEM;
415 
416 	new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
417 	if (new_tbl == NULL)
418 		goto fail;
419 
420 	err = rhashtable_rehash_attach(ht, tbl, new_tbl);
421 	if (err) {
422 		bucket_table_free(new_tbl);
423 		if (err == -EEXIST)
424 			err = 0;
425 	} else
426 		schedule_work(&ht->run_work);
427 
428 	return err;
429 
430 fail:
431 	/* Do not fail the insert if someone else did a rehash. */
432 	if (likely(rcu_dereference_raw(tbl->future_tbl)))
433 		return 0;
434 
435 	/* Schedule async rehash to retry allocation in process context. */
436 	if (err == -ENOMEM)
437 		schedule_work(&ht->run_work);
438 
439 	return err;
440 }
441 EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
442 
443 struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
444 					    const void *key,
445 					    struct rhash_head *obj,
446 					    struct bucket_table *tbl)
447 {
448 	struct rhash_head *head;
449 	unsigned int hash;
450 	int err;
451 
452 	tbl = rhashtable_last_table(ht, tbl);
453 	hash = head_hashfn(ht, tbl, obj);
454 	spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
455 
456 	err = -EEXIST;
457 	if (key && rhashtable_lookup_fast(ht, key, ht->p))
458 		goto exit;
459 
460 	err = -E2BIG;
461 	if (unlikely(rht_grow_above_max(ht, tbl)))
462 		goto exit;
463 
464 	err = -EAGAIN;
465 	if (rhashtable_check_elasticity(ht, tbl, hash) ||
466 	    rht_grow_above_100(ht, tbl))
467 		goto exit;
468 
469 	err = 0;
470 
471 	head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
472 
473 	RCU_INIT_POINTER(obj->next, head);
474 
475 	rcu_assign_pointer(tbl->buckets[hash], obj);
476 
477 	atomic_inc(&ht->nelems);
478 
479 exit:
480 	spin_unlock(rht_bucket_lock(tbl, hash));
481 
482 	if (err == 0)
483 		return NULL;
484 	else if (err == -EAGAIN)
485 		return tbl;
486 	else
487 		return ERR_PTR(err);
488 }
489 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
490 
491 /**
492  * rhashtable_walk_init - Initialise an iterator
493  * @ht:		Table to walk over
494  * @iter:	Hash table Iterator
495  * @gfp:	GFP flags for allocations
496  *
497  * This function prepares a hash table walk.
498  *
499  * Note that if you restart a walk after rhashtable_walk_stop you
500  * may see the same object twice.  Also, you may miss objects if
501  * there are removals in between rhashtable_walk_stop and the next
502  * call to rhashtable_walk_start.
503  *
504  * For a completely stable walk you should construct your own data
505  * structure outside the hash table.
506  *
507  * This function may sleep so you must not call it from interrupt
508  * context or with spin locks held.
509  *
510  * You must call rhashtable_walk_exit if this function returns
511  * successfully.
512  */
513 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter,
514 			 gfp_t gfp)
515 {
516 	iter->ht = ht;
517 	iter->p = NULL;
518 	iter->slot = 0;
519 	iter->skip = 0;
520 
521 	iter->walker = kmalloc(sizeof(*iter->walker), gfp);
522 	if (!iter->walker)
523 		return -ENOMEM;
524 
525 	spin_lock(&ht->lock);
526 	iter->walker->tbl =
527 		rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
528 	list_add(&iter->walker->list, &iter->walker->tbl->walkers);
529 	spin_unlock(&ht->lock);
530 
531 	return 0;
532 }
533 EXPORT_SYMBOL_GPL(rhashtable_walk_init);
534 
535 /**
536  * rhashtable_walk_exit - Free an iterator
537  * @iter:	Hash table Iterator
538  *
539  * This function frees resources allocated by rhashtable_walk_init.
540  */
541 void rhashtable_walk_exit(struct rhashtable_iter *iter)
542 {
543 	spin_lock(&iter->ht->lock);
544 	if (iter->walker->tbl)
545 		list_del(&iter->walker->list);
546 	spin_unlock(&iter->ht->lock);
547 	kfree(iter->walker);
548 }
549 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
550 
551 /**
552  * rhashtable_walk_start - Start a hash table walk
553  * @iter:	Hash table iterator
554  *
555  * Start a hash table walk.  Note that we take the RCU lock in all
556  * cases including when we return an error.  So you must always call
557  * rhashtable_walk_stop to clean up.
558  *
559  * Returns zero if successful.
560  *
561  * Returns -EAGAIN if resize event occured.  Note that the iterator
562  * will rewind back to the beginning and you may use it immediately
563  * by calling rhashtable_walk_next.
564  */
565 int rhashtable_walk_start(struct rhashtable_iter *iter)
566 	__acquires(RCU)
567 {
568 	struct rhashtable *ht = iter->ht;
569 
570 	rcu_read_lock();
571 
572 	spin_lock(&ht->lock);
573 	if (iter->walker->tbl)
574 		list_del(&iter->walker->list);
575 	spin_unlock(&ht->lock);
576 
577 	if (!iter->walker->tbl) {
578 		iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
579 		return -EAGAIN;
580 	}
581 
582 	return 0;
583 }
584 EXPORT_SYMBOL_GPL(rhashtable_walk_start);
585 
586 /**
587  * rhashtable_walk_next - Return the next object and advance the iterator
588  * @iter:	Hash table iterator
589  *
590  * Note that you must call rhashtable_walk_stop when you are finished
591  * with the walk.
592  *
593  * Returns the next object or NULL when the end of the table is reached.
594  *
595  * Returns -EAGAIN if resize event occured.  Note that the iterator
596  * will rewind back to the beginning and you may continue to use it.
597  */
598 void *rhashtable_walk_next(struct rhashtable_iter *iter)
599 {
600 	struct bucket_table *tbl = iter->walker->tbl;
601 	struct rhashtable *ht = iter->ht;
602 	struct rhash_head *p = iter->p;
603 
604 	if (p) {
605 		p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
606 		goto next;
607 	}
608 
609 	for (; iter->slot < tbl->size; iter->slot++) {
610 		int skip = iter->skip;
611 
612 		rht_for_each_rcu(p, tbl, iter->slot) {
613 			if (!skip)
614 				break;
615 			skip--;
616 		}
617 
618 next:
619 		if (!rht_is_a_nulls(p)) {
620 			iter->skip++;
621 			iter->p = p;
622 			return rht_obj(ht, p);
623 		}
624 
625 		iter->skip = 0;
626 	}
627 
628 	iter->p = NULL;
629 
630 	/* Ensure we see any new tables. */
631 	smp_rmb();
632 
633 	iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
634 	if (iter->walker->tbl) {
635 		iter->slot = 0;
636 		iter->skip = 0;
637 		return ERR_PTR(-EAGAIN);
638 	}
639 
640 	return NULL;
641 }
642 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
643 
644 /**
645  * rhashtable_walk_stop - Finish a hash table walk
646  * @iter:	Hash table iterator
647  *
648  * Finish a hash table walk.
649  */
650 void rhashtable_walk_stop(struct rhashtable_iter *iter)
651 	__releases(RCU)
652 {
653 	struct rhashtable *ht;
654 	struct bucket_table *tbl = iter->walker->tbl;
655 
656 	if (!tbl)
657 		goto out;
658 
659 	ht = iter->ht;
660 
661 	spin_lock(&ht->lock);
662 	if (tbl->rehash < tbl->size)
663 		list_add(&iter->walker->list, &tbl->walkers);
664 	else
665 		iter->walker->tbl = NULL;
666 	spin_unlock(&ht->lock);
667 
668 	iter->p = NULL;
669 
670 out:
671 	rcu_read_unlock();
672 }
673 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
674 
675 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
676 {
677 	return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
678 		   (unsigned long)params->min_size);
679 }
680 
681 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
682 {
683 	return jhash2(key, length, seed);
684 }
685 
686 /**
687  * rhashtable_init - initialize a new hash table
688  * @ht:		hash table to be initialized
689  * @params:	configuration parameters
690  *
691  * Initializes a new hash table based on the provided configuration
692  * parameters. A table can be configured either with a variable or
693  * fixed length key:
694  *
695  * Configuration Example 1: Fixed length keys
696  * struct test_obj {
697  *	int			key;
698  *	void *			my_member;
699  *	struct rhash_head	node;
700  * };
701  *
702  * struct rhashtable_params params = {
703  *	.head_offset = offsetof(struct test_obj, node),
704  *	.key_offset = offsetof(struct test_obj, key),
705  *	.key_len = sizeof(int),
706  *	.hashfn = jhash,
707  *	.nulls_base = (1U << RHT_BASE_SHIFT),
708  * };
709  *
710  * Configuration Example 2: Variable length keys
711  * struct test_obj {
712  *	[...]
713  *	struct rhash_head	node;
714  * };
715  *
716  * u32 my_hash_fn(const void *data, u32 len, u32 seed)
717  * {
718  *	struct test_obj *obj = data;
719  *
720  *	return [... hash ...];
721  * }
722  *
723  * struct rhashtable_params params = {
724  *	.head_offset = offsetof(struct test_obj, node),
725  *	.hashfn = jhash,
726  *	.obj_hashfn = my_hash_fn,
727  * };
728  */
729 int rhashtable_init(struct rhashtable *ht,
730 		    const struct rhashtable_params *params)
731 {
732 	struct bucket_table *tbl;
733 	size_t size;
734 
735 	size = HASH_DEFAULT_SIZE;
736 
737 	if ((!params->key_len && !params->obj_hashfn) ||
738 	    (params->obj_hashfn && !params->obj_cmpfn))
739 		return -EINVAL;
740 
741 	if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
742 		return -EINVAL;
743 
744 	memset(ht, 0, sizeof(*ht));
745 	mutex_init(&ht->mutex);
746 	spin_lock_init(&ht->lock);
747 	memcpy(&ht->p, params, sizeof(*params));
748 
749 	if (params->min_size)
750 		ht->p.min_size = roundup_pow_of_two(params->min_size);
751 
752 	if (params->max_size)
753 		ht->p.max_size = rounddown_pow_of_two(params->max_size);
754 
755 	if (params->insecure_max_entries)
756 		ht->p.insecure_max_entries =
757 			rounddown_pow_of_two(params->insecure_max_entries);
758 	else
759 		ht->p.insecure_max_entries = ht->p.max_size * 2;
760 
761 	ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
762 
763 	if (params->nelem_hint)
764 		size = rounded_hashtable_size(&ht->p);
765 
766 	/* The maximum (not average) chain length grows with the
767 	 * size of the hash table, at a rate of (log N)/(log log N).
768 	 * The value of 16 is selected so that even if the hash
769 	 * table grew to 2^32 you would not expect the maximum
770 	 * chain length to exceed it unless we are under attack
771 	 * (or extremely unlucky).
772 	 *
773 	 * As this limit is only to detect attacks, we don't need
774 	 * to set it to a lower value as you'd need the chain
775 	 * length to vastly exceed 16 to have any real effect
776 	 * on the system.
777 	 */
778 	if (!params->insecure_elasticity)
779 		ht->elasticity = 16;
780 
781 	if (params->locks_mul)
782 		ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
783 	else
784 		ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
785 
786 	ht->key_len = ht->p.key_len;
787 	if (!params->hashfn) {
788 		ht->p.hashfn = jhash;
789 
790 		if (!(ht->key_len & (sizeof(u32) - 1))) {
791 			ht->key_len /= sizeof(u32);
792 			ht->p.hashfn = rhashtable_jhash2;
793 		}
794 	}
795 
796 	tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
797 	if (tbl == NULL)
798 		return -ENOMEM;
799 
800 	atomic_set(&ht->nelems, 0);
801 
802 	RCU_INIT_POINTER(ht->tbl, tbl);
803 
804 	INIT_WORK(&ht->run_work, rht_deferred_worker);
805 
806 	return 0;
807 }
808 EXPORT_SYMBOL_GPL(rhashtable_init);
809 
810 /**
811  * rhashtable_free_and_destroy - free elements and destroy hash table
812  * @ht:		the hash table to destroy
813  * @free_fn:	callback to release resources of element
814  * @arg:	pointer passed to free_fn
815  *
816  * Stops an eventual async resize. If defined, invokes free_fn for each
817  * element to releasal resources. Please note that RCU protected
818  * readers may still be accessing the elements. Releasing of resources
819  * must occur in a compatible manner. Then frees the bucket array.
820  *
821  * This function will eventually sleep to wait for an async resize
822  * to complete. The caller is responsible that no further write operations
823  * occurs in parallel.
824  */
825 void rhashtable_free_and_destroy(struct rhashtable *ht,
826 				 void (*free_fn)(void *ptr, void *arg),
827 				 void *arg)
828 {
829 	const struct bucket_table *tbl;
830 	unsigned int i;
831 
832 	cancel_work_sync(&ht->run_work);
833 
834 	mutex_lock(&ht->mutex);
835 	tbl = rht_dereference(ht->tbl, ht);
836 	if (free_fn) {
837 		for (i = 0; i < tbl->size; i++) {
838 			struct rhash_head *pos, *next;
839 
840 			for (pos = rht_dereference(tbl->buckets[i], ht),
841 			     next = !rht_is_a_nulls(pos) ?
842 					rht_dereference(pos->next, ht) : NULL;
843 			     !rht_is_a_nulls(pos);
844 			     pos = next,
845 			     next = !rht_is_a_nulls(pos) ?
846 					rht_dereference(pos->next, ht) : NULL)
847 				free_fn(rht_obj(ht, pos), arg);
848 		}
849 	}
850 
851 	bucket_table_free(tbl);
852 	mutex_unlock(&ht->mutex);
853 }
854 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
855 
856 void rhashtable_destroy(struct rhashtable *ht)
857 {
858 	return rhashtable_free_and_destroy(ht, NULL, NULL);
859 }
860 EXPORT_SYMBOL_GPL(rhashtable_destroy);
861