xref: /linux/lib/rhashtable.c (revision ebf68996de0ab250c5d520eb2291ab65643e9a1e)
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7  *
8  * Code partially derived from nft_hash
9  * Rewritten with rehash code from br_multicast plus single list
10  * pointer as suggested by Josh Triplett
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16 
17 #include <linux/atomic.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/log2.h>
21 #include <linux/sched.h>
22 #include <linux/rculist.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/mm.h>
26 #include <linux/jhash.h>
27 #include <linux/random.h>
28 #include <linux/rhashtable.h>
29 #include <linux/err.h>
30 #include <linux/export.h>
31 
32 #define HASH_DEFAULT_SIZE	64UL
33 #define HASH_MIN_SIZE		4U
34 
35 union nested_table {
36 	union nested_table __rcu *table;
37 	struct rhash_lock_head *bucket;
38 };
39 
40 static u32 head_hashfn(struct rhashtable *ht,
41 		       const struct bucket_table *tbl,
42 		       const struct rhash_head *he)
43 {
44 	return rht_head_hashfn(ht, tbl, he, ht->p);
45 }
46 
47 #ifdef CONFIG_PROVE_LOCKING
48 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
49 
50 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
51 {
52 	return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
53 }
54 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
55 
56 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
57 {
58 	if (!debug_locks)
59 		return 1;
60 	if (unlikely(tbl->nest))
61 		return 1;
62 	return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]);
63 }
64 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
65 #else
66 #define ASSERT_RHT_MUTEX(HT)
67 #endif
68 
69 static void nested_table_free(union nested_table *ntbl, unsigned int size)
70 {
71 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
72 	const unsigned int len = 1 << shift;
73 	unsigned int i;
74 
75 	ntbl = rcu_dereference_raw(ntbl->table);
76 	if (!ntbl)
77 		return;
78 
79 	if (size > len) {
80 		size >>= shift;
81 		for (i = 0; i < len; i++)
82 			nested_table_free(ntbl + i, size);
83 	}
84 
85 	kfree(ntbl);
86 }
87 
88 static void nested_bucket_table_free(const struct bucket_table *tbl)
89 {
90 	unsigned int size = tbl->size >> tbl->nest;
91 	unsigned int len = 1 << tbl->nest;
92 	union nested_table *ntbl;
93 	unsigned int i;
94 
95 	ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
96 
97 	for (i = 0; i < len; i++)
98 		nested_table_free(ntbl + i, size);
99 
100 	kfree(ntbl);
101 }
102 
103 static void bucket_table_free(const struct bucket_table *tbl)
104 {
105 	if (tbl->nest)
106 		nested_bucket_table_free(tbl);
107 
108 	kvfree(tbl);
109 }
110 
111 static void bucket_table_free_rcu(struct rcu_head *head)
112 {
113 	bucket_table_free(container_of(head, struct bucket_table, rcu));
114 }
115 
116 static union nested_table *nested_table_alloc(struct rhashtable *ht,
117 					      union nested_table __rcu **prev,
118 					      bool leaf)
119 {
120 	union nested_table *ntbl;
121 	int i;
122 
123 	ntbl = rcu_dereference(*prev);
124 	if (ntbl)
125 		return ntbl;
126 
127 	ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
128 
129 	if (ntbl && leaf) {
130 		for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
131 			INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
132 	}
133 
134 	if (cmpxchg((union nested_table **)prev, NULL, ntbl) == NULL)
135 		return ntbl;
136 	/* Raced with another thread. */
137 	kfree(ntbl);
138 	return rcu_dereference(*prev);
139 }
140 
141 static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
142 						      size_t nbuckets,
143 						      gfp_t gfp)
144 {
145 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
146 	struct bucket_table *tbl;
147 	size_t size;
148 
149 	if (nbuckets < (1 << (shift + 1)))
150 		return NULL;
151 
152 	size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
153 
154 	tbl = kzalloc(size, gfp);
155 	if (!tbl)
156 		return NULL;
157 
158 	if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
159 				false)) {
160 		kfree(tbl);
161 		return NULL;
162 	}
163 
164 	tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
165 
166 	return tbl;
167 }
168 
169 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
170 					       size_t nbuckets,
171 					       gfp_t gfp)
172 {
173 	struct bucket_table *tbl = NULL;
174 	size_t size;
175 	int i;
176 	static struct lock_class_key __key;
177 
178 	tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp);
179 
180 	size = nbuckets;
181 
182 	if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) {
183 		tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
184 		nbuckets = 0;
185 	}
186 
187 	if (tbl == NULL)
188 		return NULL;
189 
190 	lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0);
191 
192 	tbl->size = size;
193 
194 	rcu_head_init(&tbl->rcu);
195 	INIT_LIST_HEAD(&tbl->walkers);
196 
197 	tbl->hash_rnd = get_random_u32();
198 
199 	for (i = 0; i < nbuckets; i++)
200 		INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
201 
202 	return tbl;
203 }
204 
205 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
206 						  struct bucket_table *tbl)
207 {
208 	struct bucket_table *new_tbl;
209 
210 	do {
211 		new_tbl = tbl;
212 		tbl = rht_dereference_rcu(tbl->future_tbl, ht);
213 	} while (tbl);
214 
215 	return new_tbl;
216 }
217 
218 static int rhashtable_rehash_one(struct rhashtable *ht,
219 				 struct rhash_lock_head **bkt,
220 				 unsigned int old_hash)
221 {
222 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
223 	struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
224 	int err = -EAGAIN;
225 	struct rhash_head *head, *next, *entry;
226 	struct rhash_head __rcu **pprev = NULL;
227 	unsigned int new_hash;
228 
229 	if (new_tbl->nest)
230 		goto out;
231 
232 	err = -ENOENT;
233 
234 	rht_for_each_from(entry, rht_ptr(bkt, old_tbl, old_hash),
235 			  old_tbl, old_hash) {
236 		err = 0;
237 		next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
238 
239 		if (rht_is_a_nulls(next))
240 			break;
241 
242 		pprev = &entry->next;
243 	}
244 
245 	if (err)
246 		goto out;
247 
248 	new_hash = head_hashfn(ht, new_tbl, entry);
249 
250 	rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING);
251 
252 	head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash);
253 
254 	RCU_INIT_POINTER(entry->next, head);
255 
256 	rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry);
257 
258 	if (pprev)
259 		rcu_assign_pointer(*pprev, next);
260 	else
261 		/* Need to preserved the bit lock. */
262 		rht_assign_locked(bkt, next);
263 
264 out:
265 	return err;
266 }
267 
268 static int rhashtable_rehash_chain(struct rhashtable *ht,
269 				    unsigned int old_hash)
270 {
271 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
272 	struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash);
273 	int err;
274 
275 	if (!bkt)
276 		return 0;
277 	rht_lock(old_tbl, bkt);
278 
279 	while (!(err = rhashtable_rehash_one(ht, bkt, old_hash)))
280 		;
281 
282 	if (err == -ENOENT)
283 		err = 0;
284 	rht_unlock(old_tbl, bkt);
285 
286 	return err;
287 }
288 
289 static int rhashtable_rehash_attach(struct rhashtable *ht,
290 				    struct bucket_table *old_tbl,
291 				    struct bucket_table *new_tbl)
292 {
293 	/* Make insertions go into the new, empty table right away. Deletions
294 	 * and lookups will be attempted in both tables until we synchronize.
295 	 * As cmpxchg() provides strong barriers, we do not need
296 	 * rcu_assign_pointer().
297 	 */
298 
299 	if (cmpxchg((struct bucket_table **)&old_tbl->future_tbl, NULL,
300 		    new_tbl) != NULL)
301 		return -EEXIST;
302 
303 	return 0;
304 }
305 
306 static int rhashtable_rehash_table(struct rhashtable *ht)
307 {
308 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
309 	struct bucket_table *new_tbl;
310 	struct rhashtable_walker *walker;
311 	unsigned int old_hash;
312 	int err;
313 
314 	new_tbl = rht_dereference(old_tbl->future_tbl, ht);
315 	if (!new_tbl)
316 		return 0;
317 
318 	for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
319 		err = rhashtable_rehash_chain(ht, old_hash);
320 		if (err)
321 			return err;
322 		cond_resched();
323 	}
324 
325 	/* Publish the new table pointer. */
326 	rcu_assign_pointer(ht->tbl, new_tbl);
327 
328 	spin_lock(&ht->lock);
329 	list_for_each_entry(walker, &old_tbl->walkers, list)
330 		walker->tbl = NULL;
331 
332 	/* Wait for readers. All new readers will see the new
333 	 * table, and thus no references to the old table will
334 	 * remain.
335 	 * We do this inside the locked region so that
336 	 * rhashtable_walk_stop() can use rcu_head_after_call_rcu()
337 	 * to check if it should not re-link the table.
338 	 */
339 	call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
340 	spin_unlock(&ht->lock);
341 
342 	return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
343 }
344 
345 static int rhashtable_rehash_alloc(struct rhashtable *ht,
346 				   struct bucket_table *old_tbl,
347 				   unsigned int size)
348 {
349 	struct bucket_table *new_tbl;
350 	int err;
351 
352 	ASSERT_RHT_MUTEX(ht);
353 
354 	new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
355 	if (new_tbl == NULL)
356 		return -ENOMEM;
357 
358 	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
359 	if (err)
360 		bucket_table_free(new_tbl);
361 
362 	return err;
363 }
364 
365 /**
366  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
367  * @ht:		the hash table to shrink
368  *
369  * This function shrinks the hash table to fit, i.e., the smallest
370  * size would not cause it to expand right away automatically.
371  *
372  * The caller must ensure that no concurrent resizing occurs by holding
373  * ht->mutex.
374  *
375  * The caller must ensure that no concurrent table mutations take place.
376  * It is however valid to have concurrent lookups if they are RCU protected.
377  *
378  * It is valid to have concurrent insertions and deletions protected by per
379  * bucket locks or concurrent RCU protected lookups and traversals.
380  */
381 static int rhashtable_shrink(struct rhashtable *ht)
382 {
383 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
384 	unsigned int nelems = atomic_read(&ht->nelems);
385 	unsigned int size = 0;
386 
387 	if (nelems)
388 		size = roundup_pow_of_two(nelems * 3 / 2);
389 	if (size < ht->p.min_size)
390 		size = ht->p.min_size;
391 
392 	if (old_tbl->size <= size)
393 		return 0;
394 
395 	if (rht_dereference(old_tbl->future_tbl, ht))
396 		return -EEXIST;
397 
398 	return rhashtable_rehash_alloc(ht, old_tbl, size);
399 }
400 
401 static void rht_deferred_worker(struct work_struct *work)
402 {
403 	struct rhashtable *ht;
404 	struct bucket_table *tbl;
405 	int err = 0;
406 
407 	ht = container_of(work, struct rhashtable, run_work);
408 	mutex_lock(&ht->mutex);
409 
410 	tbl = rht_dereference(ht->tbl, ht);
411 	tbl = rhashtable_last_table(ht, tbl);
412 
413 	if (rht_grow_above_75(ht, tbl))
414 		err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
415 	else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
416 		err = rhashtable_shrink(ht);
417 	else if (tbl->nest)
418 		err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
419 
420 	if (!err || err == -EEXIST) {
421 		int nerr;
422 
423 		nerr = rhashtable_rehash_table(ht);
424 		err = err ?: nerr;
425 	}
426 
427 	mutex_unlock(&ht->mutex);
428 
429 	if (err)
430 		schedule_work(&ht->run_work);
431 }
432 
433 static int rhashtable_insert_rehash(struct rhashtable *ht,
434 				    struct bucket_table *tbl)
435 {
436 	struct bucket_table *old_tbl;
437 	struct bucket_table *new_tbl;
438 	unsigned int size;
439 	int err;
440 
441 	old_tbl = rht_dereference_rcu(ht->tbl, ht);
442 
443 	size = tbl->size;
444 
445 	err = -EBUSY;
446 
447 	if (rht_grow_above_75(ht, tbl))
448 		size *= 2;
449 	/* Do not schedule more than one rehash */
450 	else if (old_tbl != tbl)
451 		goto fail;
452 
453 	err = -ENOMEM;
454 
455 	new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN);
456 	if (new_tbl == NULL)
457 		goto fail;
458 
459 	err = rhashtable_rehash_attach(ht, tbl, new_tbl);
460 	if (err) {
461 		bucket_table_free(new_tbl);
462 		if (err == -EEXIST)
463 			err = 0;
464 	} else
465 		schedule_work(&ht->run_work);
466 
467 	return err;
468 
469 fail:
470 	/* Do not fail the insert if someone else did a rehash. */
471 	if (likely(rcu_access_pointer(tbl->future_tbl)))
472 		return 0;
473 
474 	/* Schedule async rehash to retry allocation in process context. */
475 	if (err == -ENOMEM)
476 		schedule_work(&ht->run_work);
477 
478 	return err;
479 }
480 
481 static void *rhashtable_lookup_one(struct rhashtable *ht,
482 				   struct rhash_lock_head **bkt,
483 				   struct bucket_table *tbl, unsigned int hash,
484 				   const void *key, struct rhash_head *obj)
485 {
486 	struct rhashtable_compare_arg arg = {
487 		.ht = ht,
488 		.key = key,
489 	};
490 	struct rhash_head __rcu **pprev = NULL;
491 	struct rhash_head *head;
492 	int elasticity;
493 
494 	elasticity = RHT_ELASTICITY;
495 	rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
496 		struct rhlist_head *list;
497 		struct rhlist_head *plist;
498 
499 		elasticity--;
500 		if (!key ||
501 		    (ht->p.obj_cmpfn ?
502 		     ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
503 		     rhashtable_compare(&arg, rht_obj(ht, head)))) {
504 			pprev = &head->next;
505 			continue;
506 		}
507 
508 		if (!ht->rhlist)
509 			return rht_obj(ht, head);
510 
511 		list = container_of(obj, struct rhlist_head, rhead);
512 		plist = container_of(head, struct rhlist_head, rhead);
513 
514 		RCU_INIT_POINTER(list->next, plist);
515 		head = rht_dereference_bucket(head->next, tbl, hash);
516 		RCU_INIT_POINTER(list->rhead.next, head);
517 		if (pprev)
518 			rcu_assign_pointer(*pprev, obj);
519 		else
520 			/* Need to preserve the bit lock */
521 			rht_assign_locked(bkt, obj);
522 
523 		return NULL;
524 	}
525 
526 	if (elasticity <= 0)
527 		return ERR_PTR(-EAGAIN);
528 
529 	return ERR_PTR(-ENOENT);
530 }
531 
532 static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
533 						  struct rhash_lock_head **bkt,
534 						  struct bucket_table *tbl,
535 						  unsigned int hash,
536 						  struct rhash_head *obj,
537 						  void *data)
538 {
539 	struct bucket_table *new_tbl;
540 	struct rhash_head *head;
541 
542 	if (!IS_ERR_OR_NULL(data))
543 		return ERR_PTR(-EEXIST);
544 
545 	if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
546 		return ERR_CAST(data);
547 
548 	new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
549 	if (new_tbl)
550 		return new_tbl;
551 
552 	if (PTR_ERR(data) != -ENOENT)
553 		return ERR_CAST(data);
554 
555 	if (unlikely(rht_grow_above_max(ht, tbl)))
556 		return ERR_PTR(-E2BIG);
557 
558 	if (unlikely(rht_grow_above_100(ht, tbl)))
559 		return ERR_PTR(-EAGAIN);
560 
561 	head = rht_ptr(bkt, tbl, hash);
562 
563 	RCU_INIT_POINTER(obj->next, head);
564 	if (ht->rhlist) {
565 		struct rhlist_head *list;
566 
567 		list = container_of(obj, struct rhlist_head, rhead);
568 		RCU_INIT_POINTER(list->next, NULL);
569 	}
570 
571 	/* bkt is always the head of the list, so it holds
572 	 * the lock, which we need to preserve
573 	 */
574 	rht_assign_locked(bkt, obj);
575 
576 	atomic_inc(&ht->nelems);
577 	if (rht_grow_above_75(ht, tbl))
578 		schedule_work(&ht->run_work);
579 
580 	return NULL;
581 }
582 
583 static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
584 				   struct rhash_head *obj)
585 {
586 	struct bucket_table *new_tbl;
587 	struct bucket_table *tbl;
588 	struct rhash_lock_head **bkt;
589 	unsigned int hash;
590 	void *data;
591 
592 	new_tbl = rcu_dereference(ht->tbl);
593 
594 	do {
595 		tbl = new_tbl;
596 		hash = rht_head_hashfn(ht, tbl, obj, ht->p);
597 		if (rcu_access_pointer(tbl->future_tbl))
598 			/* Failure is OK */
599 			bkt = rht_bucket_var(tbl, hash);
600 		else
601 			bkt = rht_bucket_insert(ht, tbl, hash);
602 		if (bkt == NULL) {
603 			new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
604 			data = ERR_PTR(-EAGAIN);
605 		} else {
606 			rht_lock(tbl, bkt);
607 			data = rhashtable_lookup_one(ht, bkt, tbl,
608 						     hash, key, obj);
609 			new_tbl = rhashtable_insert_one(ht, bkt, tbl,
610 							hash, obj, data);
611 			if (PTR_ERR(new_tbl) != -EEXIST)
612 				data = ERR_CAST(new_tbl);
613 
614 			rht_unlock(tbl, bkt);
615 		}
616 	} while (!IS_ERR_OR_NULL(new_tbl));
617 
618 	if (PTR_ERR(data) == -EAGAIN)
619 		data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
620 			       -EAGAIN);
621 
622 	return data;
623 }
624 
625 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
626 			     struct rhash_head *obj)
627 {
628 	void *data;
629 
630 	do {
631 		rcu_read_lock();
632 		data = rhashtable_try_insert(ht, key, obj);
633 		rcu_read_unlock();
634 	} while (PTR_ERR(data) == -EAGAIN);
635 
636 	return data;
637 }
638 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
639 
640 /**
641  * rhashtable_walk_enter - Initialise an iterator
642  * @ht:		Table to walk over
643  * @iter:	Hash table Iterator
644  *
645  * This function prepares a hash table walk.
646  *
647  * Note that if you restart a walk after rhashtable_walk_stop you
648  * may see the same object twice.  Also, you may miss objects if
649  * there are removals in between rhashtable_walk_stop and the next
650  * call to rhashtable_walk_start.
651  *
652  * For a completely stable walk you should construct your own data
653  * structure outside the hash table.
654  *
655  * This function may be called from any process context, including
656  * non-preemptable context, but cannot be called from softirq or
657  * hardirq context.
658  *
659  * You must call rhashtable_walk_exit after this function returns.
660  */
661 void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
662 {
663 	iter->ht = ht;
664 	iter->p = NULL;
665 	iter->slot = 0;
666 	iter->skip = 0;
667 	iter->end_of_table = 0;
668 
669 	spin_lock(&ht->lock);
670 	iter->walker.tbl =
671 		rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
672 	list_add(&iter->walker.list, &iter->walker.tbl->walkers);
673 	spin_unlock(&ht->lock);
674 }
675 EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
676 
677 /**
678  * rhashtable_walk_exit - Free an iterator
679  * @iter:	Hash table Iterator
680  *
681  * This function frees resources allocated by rhashtable_walk_enter.
682  */
683 void rhashtable_walk_exit(struct rhashtable_iter *iter)
684 {
685 	spin_lock(&iter->ht->lock);
686 	if (iter->walker.tbl)
687 		list_del(&iter->walker.list);
688 	spin_unlock(&iter->ht->lock);
689 }
690 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
691 
692 /**
693  * rhashtable_walk_start_check - Start a hash table walk
694  * @iter:	Hash table iterator
695  *
696  * Start a hash table walk at the current iterator position.  Note that we take
697  * the RCU lock in all cases including when we return an error.  So you must
698  * always call rhashtable_walk_stop to clean up.
699  *
700  * Returns zero if successful.
701  *
702  * Returns -EAGAIN if resize event occured.  Note that the iterator
703  * will rewind back to the beginning and you may use it immediately
704  * by calling rhashtable_walk_next.
705  *
706  * rhashtable_walk_start is defined as an inline variant that returns
707  * void. This is preferred in cases where the caller would ignore
708  * resize events and always continue.
709  */
710 int rhashtable_walk_start_check(struct rhashtable_iter *iter)
711 	__acquires(RCU)
712 {
713 	struct rhashtable *ht = iter->ht;
714 	bool rhlist = ht->rhlist;
715 
716 	rcu_read_lock();
717 
718 	spin_lock(&ht->lock);
719 	if (iter->walker.tbl)
720 		list_del(&iter->walker.list);
721 	spin_unlock(&ht->lock);
722 
723 	if (iter->end_of_table)
724 		return 0;
725 	if (!iter->walker.tbl) {
726 		iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
727 		iter->slot = 0;
728 		iter->skip = 0;
729 		return -EAGAIN;
730 	}
731 
732 	if (iter->p && !rhlist) {
733 		/*
734 		 * We need to validate that 'p' is still in the table, and
735 		 * if so, update 'skip'
736 		 */
737 		struct rhash_head *p;
738 		int skip = 0;
739 		rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
740 			skip++;
741 			if (p == iter->p) {
742 				iter->skip = skip;
743 				goto found;
744 			}
745 		}
746 		iter->p = NULL;
747 	} else if (iter->p && rhlist) {
748 		/* Need to validate that 'list' is still in the table, and
749 		 * if so, update 'skip' and 'p'.
750 		 */
751 		struct rhash_head *p;
752 		struct rhlist_head *list;
753 		int skip = 0;
754 		rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
755 			for (list = container_of(p, struct rhlist_head, rhead);
756 			     list;
757 			     list = rcu_dereference(list->next)) {
758 				skip++;
759 				if (list == iter->list) {
760 					iter->p = p;
761 					iter->skip = skip;
762 					goto found;
763 				}
764 			}
765 		}
766 		iter->p = NULL;
767 	}
768 found:
769 	return 0;
770 }
771 EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
772 
773 /**
774  * __rhashtable_walk_find_next - Find the next element in a table (or the first
775  * one in case of a new walk).
776  *
777  * @iter:	Hash table iterator
778  *
779  * Returns the found object or NULL when the end of the table is reached.
780  *
781  * Returns -EAGAIN if resize event occurred.
782  */
783 static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
784 {
785 	struct bucket_table *tbl = iter->walker.tbl;
786 	struct rhlist_head *list = iter->list;
787 	struct rhashtable *ht = iter->ht;
788 	struct rhash_head *p = iter->p;
789 	bool rhlist = ht->rhlist;
790 
791 	if (!tbl)
792 		return NULL;
793 
794 	for (; iter->slot < tbl->size; iter->slot++) {
795 		int skip = iter->skip;
796 
797 		rht_for_each_rcu(p, tbl, iter->slot) {
798 			if (rhlist) {
799 				list = container_of(p, struct rhlist_head,
800 						    rhead);
801 				do {
802 					if (!skip)
803 						goto next;
804 					skip--;
805 					list = rcu_dereference(list->next);
806 				} while (list);
807 
808 				continue;
809 			}
810 			if (!skip)
811 				break;
812 			skip--;
813 		}
814 
815 next:
816 		if (!rht_is_a_nulls(p)) {
817 			iter->skip++;
818 			iter->p = p;
819 			iter->list = list;
820 			return rht_obj(ht, rhlist ? &list->rhead : p);
821 		}
822 
823 		iter->skip = 0;
824 	}
825 
826 	iter->p = NULL;
827 
828 	/* Ensure we see any new tables. */
829 	smp_rmb();
830 
831 	iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
832 	if (iter->walker.tbl) {
833 		iter->slot = 0;
834 		iter->skip = 0;
835 		return ERR_PTR(-EAGAIN);
836 	} else {
837 		iter->end_of_table = true;
838 	}
839 
840 	return NULL;
841 }
842 
843 /**
844  * rhashtable_walk_next - Return the next object and advance the iterator
845  * @iter:	Hash table iterator
846  *
847  * Note that you must call rhashtable_walk_stop when you are finished
848  * with the walk.
849  *
850  * Returns the next object or NULL when the end of the table is reached.
851  *
852  * Returns -EAGAIN if resize event occurred.  Note that the iterator
853  * will rewind back to the beginning and you may continue to use it.
854  */
855 void *rhashtable_walk_next(struct rhashtable_iter *iter)
856 {
857 	struct rhlist_head *list = iter->list;
858 	struct rhashtable *ht = iter->ht;
859 	struct rhash_head *p = iter->p;
860 	bool rhlist = ht->rhlist;
861 
862 	if (p) {
863 		if (!rhlist || !(list = rcu_dereference(list->next))) {
864 			p = rcu_dereference(p->next);
865 			list = container_of(p, struct rhlist_head, rhead);
866 		}
867 		if (!rht_is_a_nulls(p)) {
868 			iter->skip++;
869 			iter->p = p;
870 			iter->list = list;
871 			return rht_obj(ht, rhlist ? &list->rhead : p);
872 		}
873 
874 		/* At the end of this slot, switch to next one and then find
875 		 * next entry from that point.
876 		 */
877 		iter->skip = 0;
878 		iter->slot++;
879 	}
880 
881 	return __rhashtable_walk_find_next(iter);
882 }
883 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
884 
885 /**
886  * rhashtable_walk_peek - Return the next object but don't advance the iterator
887  * @iter:	Hash table iterator
888  *
889  * Returns the next object or NULL when the end of the table is reached.
890  *
891  * Returns -EAGAIN if resize event occurred.  Note that the iterator
892  * will rewind back to the beginning and you may continue to use it.
893  */
894 void *rhashtable_walk_peek(struct rhashtable_iter *iter)
895 {
896 	struct rhlist_head *list = iter->list;
897 	struct rhashtable *ht = iter->ht;
898 	struct rhash_head *p = iter->p;
899 
900 	if (p)
901 		return rht_obj(ht, ht->rhlist ? &list->rhead : p);
902 
903 	/* No object found in current iter, find next one in the table. */
904 
905 	if (iter->skip) {
906 		/* A nonzero skip value points to the next entry in the table
907 		 * beyond that last one that was found. Decrement skip so
908 		 * we find the current value. __rhashtable_walk_find_next
909 		 * will restore the original value of skip assuming that
910 		 * the table hasn't changed.
911 		 */
912 		iter->skip--;
913 	}
914 
915 	return __rhashtable_walk_find_next(iter);
916 }
917 EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
918 
919 /**
920  * rhashtable_walk_stop - Finish a hash table walk
921  * @iter:	Hash table iterator
922  *
923  * Finish a hash table walk.  Does not reset the iterator to the start of the
924  * hash table.
925  */
926 void rhashtable_walk_stop(struct rhashtable_iter *iter)
927 	__releases(RCU)
928 {
929 	struct rhashtable *ht;
930 	struct bucket_table *tbl = iter->walker.tbl;
931 
932 	if (!tbl)
933 		goto out;
934 
935 	ht = iter->ht;
936 
937 	spin_lock(&ht->lock);
938 	if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu))
939 		/* This bucket table is being freed, don't re-link it. */
940 		iter->walker.tbl = NULL;
941 	else
942 		list_add(&iter->walker.list, &tbl->walkers);
943 	spin_unlock(&ht->lock);
944 
945 out:
946 	rcu_read_unlock();
947 }
948 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
949 
950 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
951 {
952 	size_t retsize;
953 
954 	if (params->nelem_hint)
955 		retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
956 			      (unsigned long)params->min_size);
957 	else
958 		retsize = max(HASH_DEFAULT_SIZE,
959 			      (unsigned long)params->min_size);
960 
961 	return retsize;
962 }
963 
964 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
965 {
966 	return jhash2(key, length, seed);
967 }
968 
969 /**
970  * rhashtable_init - initialize a new hash table
971  * @ht:		hash table to be initialized
972  * @params:	configuration parameters
973  *
974  * Initializes a new hash table based on the provided configuration
975  * parameters. A table can be configured either with a variable or
976  * fixed length key:
977  *
978  * Configuration Example 1: Fixed length keys
979  * struct test_obj {
980  *	int			key;
981  *	void *			my_member;
982  *	struct rhash_head	node;
983  * };
984  *
985  * struct rhashtable_params params = {
986  *	.head_offset = offsetof(struct test_obj, node),
987  *	.key_offset = offsetof(struct test_obj, key),
988  *	.key_len = sizeof(int),
989  *	.hashfn = jhash,
990  * };
991  *
992  * Configuration Example 2: Variable length keys
993  * struct test_obj {
994  *	[...]
995  *	struct rhash_head	node;
996  * };
997  *
998  * u32 my_hash_fn(const void *data, u32 len, u32 seed)
999  * {
1000  *	struct test_obj *obj = data;
1001  *
1002  *	return [... hash ...];
1003  * }
1004  *
1005  * struct rhashtable_params params = {
1006  *	.head_offset = offsetof(struct test_obj, node),
1007  *	.hashfn = jhash,
1008  *	.obj_hashfn = my_hash_fn,
1009  * };
1010  */
1011 int rhashtable_init(struct rhashtable *ht,
1012 		    const struct rhashtable_params *params)
1013 {
1014 	struct bucket_table *tbl;
1015 	size_t size;
1016 
1017 	if ((!params->key_len && !params->obj_hashfn) ||
1018 	    (params->obj_hashfn && !params->obj_cmpfn))
1019 		return -EINVAL;
1020 
1021 	memset(ht, 0, sizeof(*ht));
1022 	mutex_init(&ht->mutex);
1023 	spin_lock_init(&ht->lock);
1024 	memcpy(&ht->p, params, sizeof(*params));
1025 
1026 	if (params->min_size)
1027 		ht->p.min_size = roundup_pow_of_two(params->min_size);
1028 
1029 	/* Cap total entries at 2^31 to avoid nelems overflow. */
1030 	ht->max_elems = 1u << 31;
1031 
1032 	if (params->max_size) {
1033 		ht->p.max_size = rounddown_pow_of_two(params->max_size);
1034 		if (ht->p.max_size < ht->max_elems / 2)
1035 			ht->max_elems = ht->p.max_size * 2;
1036 	}
1037 
1038 	ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1039 
1040 	size = rounded_hashtable_size(&ht->p);
1041 
1042 	ht->key_len = ht->p.key_len;
1043 	if (!params->hashfn) {
1044 		ht->p.hashfn = jhash;
1045 
1046 		if (!(ht->key_len & (sizeof(u32) - 1))) {
1047 			ht->key_len /= sizeof(u32);
1048 			ht->p.hashfn = rhashtable_jhash2;
1049 		}
1050 	}
1051 
1052 	/*
1053 	 * This is api initialization and thus we need to guarantee the
1054 	 * initial rhashtable allocation. Upon failure, retry with the
1055 	 * smallest possible size with __GFP_NOFAIL semantics.
1056 	 */
1057 	tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
1058 	if (unlikely(tbl == NULL)) {
1059 		size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1060 		tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL);
1061 	}
1062 
1063 	atomic_set(&ht->nelems, 0);
1064 
1065 	RCU_INIT_POINTER(ht->tbl, tbl);
1066 
1067 	INIT_WORK(&ht->run_work, rht_deferred_worker);
1068 
1069 	return 0;
1070 }
1071 EXPORT_SYMBOL_GPL(rhashtable_init);
1072 
1073 /**
1074  * rhltable_init - initialize a new hash list table
1075  * @hlt:	hash list table to be initialized
1076  * @params:	configuration parameters
1077  *
1078  * Initializes a new hash list table.
1079  *
1080  * See documentation for rhashtable_init.
1081  */
1082 int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
1083 {
1084 	int err;
1085 
1086 	err = rhashtable_init(&hlt->ht, params);
1087 	hlt->ht.rhlist = true;
1088 	return err;
1089 }
1090 EXPORT_SYMBOL_GPL(rhltable_init);
1091 
1092 static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
1093 				void (*free_fn)(void *ptr, void *arg),
1094 				void *arg)
1095 {
1096 	struct rhlist_head *list;
1097 
1098 	if (!ht->rhlist) {
1099 		free_fn(rht_obj(ht, obj), arg);
1100 		return;
1101 	}
1102 
1103 	list = container_of(obj, struct rhlist_head, rhead);
1104 	do {
1105 		obj = &list->rhead;
1106 		list = rht_dereference(list->next, ht);
1107 		free_fn(rht_obj(ht, obj), arg);
1108 	} while (list);
1109 }
1110 
1111 /**
1112  * rhashtable_free_and_destroy - free elements and destroy hash table
1113  * @ht:		the hash table to destroy
1114  * @free_fn:	callback to release resources of element
1115  * @arg:	pointer passed to free_fn
1116  *
1117  * Stops an eventual async resize. If defined, invokes free_fn for each
1118  * element to releasal resources. Please note that RCU protected
1119  * readers may still be accessing the elements. Releasing of resources
1120  * must occur in a compatible manner. Then frees the bucket array.
1121  *
1122  * This function will eventually sleep to wait for an async resize
1123  * to complete. The caller is responsible that no further write operations
1124  * occurs in parallel.
1125  */
1126 void rhashtable_free_and_destroy(struct rhashtable *ht,
1127 				 void (*free_fn)(void *ptr, void *arg),
1128 				 void *arg)
1129 {
1130 	struct bucket_table *tbl, *next_tbl;
1131 	unsigned int i;
1132 
1133 	cancel_work_sync(&ht->run_work);
1134 
1135 	mutex_lock(&ht->mutex);
1136 	tbl = rht_dereference(ht->tbl, ht);
1137 restart:
1138 	if (free_fn) {
1139 		for (i = 0; i < tbl->size; i++) {
1140 			struct rhash_head *pos, *next;
1141 
1142 			cond_resched();
1143 			for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)),
1144 			     next = !rht_is_a_nulls(pos) ?
1145 					rht_dereference(pos->next, ht) : NULL;
1146 			     !rht_is_a_nulls(pos);
1147 			     pos = next,
1148 			     next = !rht_is_a_nulls(pos) ?
1149 					rht_dereference(pos->next, ht) : NULL)
1150 				rhashtable_free_one(ht, pos, free_fn, arg);
1151 		}
1152 	}
1153 
1154 	next_tbl = rht_dereference(tbl->future_tbl, ht);
1155 	bucket_table_free(tbl);
1156 	if (next_tbl) {
1157 		tbl = next_tbl;
1158 		goto restart;
1159 	}
1160 	mutex_unlock(&ht->mutex);
1161 }
1162 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
1163 
1164 void rhashtable_destroy(struct rhashtable *ht)
1165 {
1166 	return rhashtable_free_and_destroy(ht, NULL, NULL);
1167 }
1168 EXPORT_SYMBOL_GPL(rhashtable_destroy);
1169 
1170 struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
1171 					     unsigned int hash)
1172 {
1173 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1174 	unsigned int index = hash & ((1 << tbl->nest) - 1);
1175 	unsigned int size = tbl->size >> tbl->nest;
1176 	unsigned int subhash = hash;
1177 	union nested_table *ntbl;
1178 
1179 	ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1180 	ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
1181 	subhash >>= tbl->nest;
1182 
1183 	while (ntbl && size > (1 << shift)) {
1184 		index = subhash & ((1 << shift) - 1);
1185 		ntbl = rht_dereference_bucket_rcu(ntbl[index].table,
1186 						  tbl, hash);
1187 		size >>= shift;
1188 		subhash >>= shift;
1189 	}
1190 
1191 	if (!ntbl)
1192 		return NULL;
1193 
1194 	return &ntbl[subhash].bucket;
1195 
1196 }
1197 EXPORT_SYMBOL_GPL(__rht_bucket_nested);
1198 
1199 struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
1200 					   unsigned int hash)
1201 {
1202 	static struct rhash_lock_head *rhnull;
1203 
1204 	if (!rhnull)
1205 		INIT_RHT_NULLS_HEAD(rhnull);
1206 	return __rht_bucket_nested(tbl, hash) ?: &rhnull;
1207 }
1208 EXPORT_SYMBOL_GPL(rht_bucket_nested);
1209 
1210 struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
1211 						  struct bucket_table *tbl,
1212 						  unsigned int hash)
1213 {
1214 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1215 	unsigned int index = hash & ((1 << tbl->nest) - 1);
1216 	unsigned int size = tbl->size >> tbl->nest;
1217 	union nested_table *ntbl;
1218 
1219 	ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1220 	hash >>= tbl->nest;
1221 	ntbl = nested_table_alloc(ht, &ntbl[index].table,
1222 				  size <= (1 << shift));
1223 
1224 	while (ntbl && size > (1 << shift)) {
1225 		index = hash & ((1 << shift) - 1);
1226 		size >>= shift;
1227 		hash >>= shift;
1228 		ntbl = nested_table_alloc(ht, &ntbl[index].table,
1229 					  size <= (1 << shift));
1230 	}
1231 
1232 	if (!ntbl)
1233 		return NULL;
1234 
1235 	return &ntbl[hash].bucket;
1236 
1237 }
1238 EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);
1239