xref: /linux/lib/rhashtable.c (revision fd639726bf15fca8ee1a00dce8e0096d0ad9bd18)
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7  *
8  * Code partially derived from nft_hash
9  * Rewritten with rehash code from br_multicast plus single list
10  * pointer as suggested by Josh Triplett
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16 
17 #include <linux/atomic.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/log2.h>
21 #include <linux/sched.h>
22 #include <linux/rculist.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/mm.h>
26 #include <linux/jhash.h>
27 #include <linux/random.h>
28 #include <linux/rhashtable.h>
29 #include <linux/err.h>
30 #include <linux/export.h>
31 
32 #define HASH_DEFAULT_SIZE	64UL
33 #define HASH_MIN_SIZE		4U
34 #define BUCKET_LOCKS_PER_CPU	32UL
35 
36 union nested_table {
37 	union nested_table __rcu *table;
38 	struct rhash_head __rcu *bucket;
39 };
40 
41 static u32 head_hashfn(struct rhashtable *ht,
42 		       const struct bucket_table *tbl,
43 		       const struct rhash_head *he)
44 {
45 	return rht_head_hashfn(ht, tbl, he, ht->p);
46 }
47 
48 #ifdef CONFIG_PROVE_LOCKING
49 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
50 
51 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
52 {
53 	return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
54 }
55 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
56 
57 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
58 {
59 	spinlock_t *lock = rht_bucket_lock(tbl, hash);
60 
61 	return (debug_locks) ? lockdep_is_held(lock) : 1;
62 }
63 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
64 #else
65 #define ASSERT_RHT_MUTEX(HT)
66 #endif
67 
68 
69 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
70 			      gfp_t gfp)
71 {
72 	unsigned int i, size;
73 #if defined(CONFIG_PROVE_LOCKING)
74 	unsigned int nr_pcpus = 2;
75 #else
76 	unsigned int nr_pcpus = num_possible_cpus();
77 #endif
78 
79 	nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
80 	size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
81 
82 	/* Never allocate more than 0.5 locks per bucket */
83 	size = min_t(unsigned int, size, tbl->size >> 1);
84 
85 	if (tbl->nest)
86 		size = min(size, 1U << tbl->nest);
87 
88 	if (sizeof(spinlock_t) != 0) {
89 		if (gfpflags_allow_blocking(gfp))
90 			tbl->locks = kvmalloc(size * sizeof(spinlock_t), gfp);
91 		else
92 			tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
93 						   gfp);
94 		if (!tbl->locks)
95 			return -ENOMEM;
96 		for (i = 0; i < size; i++)
97 			spin_lock_init(&tbl->locks[i]);
98 	}
99 	tbl->locks_mask = size - 1;
100 
101 	return 0;
102 }
103 
104 static void nested_table_free(union nested_table *ntbl, unsigned int size)
105 {
106 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
107 	const unsigned int len = 1 << shift;
108 	unsigned int i;
109 
110 	ntbl = rcu_dereference_raw(ntbl->table);
111 	if (!ntbl)
112 		return;
113 
114 	if (size > len) {
115 		size >>= shift;
116 		for (i = 0; i < len; i++)
117 			nested_table_free(ntbl + i, size);
118 	}
119 
120 	kfree(ntbl);
121 }
122 
123 static void nested_bucket_table_free(const struct bucket_table *tbl)
124 {
125 	unsigned int size = tbl->size >> tbl->nest;
126 	unsigned int len = 1 << tbl->nest;
127 	union nested_table *ntbl;
128 	unsigned int i;
129 
130 	ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
131 
132 	for (i = 0; i < len; i++)
133 		nested_table_free(ntbl + i, size);
134 
135 	kfree(ntbl);
136 }
137 
138 static void bucket_table_free(const struct bucket_table *tbl)
139 {
140 	if (tbl->nest)
141 		nested_bucket_table_free(tbl);
142 
143 	kvfree(tbl->locks);
144 	kvfree(tbl);
145 }
146 
147 static void bucket_table_free_rcu(struct rcu_head *head)
148 {
149 	bucket_table_free(container_of(head, struct bucket_table, rcu));
150 }
151 
152 static union nested_table *nested_table_alloc(struct rhashtable *ht,
153 					      union nested_table __rcu **prev,
154 					      unsigned int shifted,
155 					      unsigned int nhash)
156 {
157 	union nested_table *ntbl;
158 	int i;
159 
160 	ntbl = rcu_dereference(*prev);
161 	if (ntbl)
162 		return ntbl;
163 
164 	ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
165 
166 	if (ntbl && shifted) {
167 		for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
168 			INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht,
169 					    (i << shifted) | nhash);
170 	}
171 
172 	rcu_assign_pointer(*prev, ntbl);
173 
174 	return ntbl;
175 }
176 
177 static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
178 						      size_t nbuckets,
179 						      gfp_t gfp)
180 {
181 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
182 	struct bucket_table *tbl;
183 	size_t size;
184 
185 	if (nbuckets < (1 << (shift + 1)))
186 		return NULL;
187 
188 	size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
189 
190 	tbl = kzalloc(size, gfp);
191 	if (!tbl)
192 		return NULL;
193 
194 	if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
195 				0, 0)) {
196 		kfree(tbl);
197 		return NULL;
198 	}
199 
200 	tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
201 
202 	return tbl;
203 }
204 
205 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
206 					       size_t nbuckets,
207 					       gfp_t gfp)
208 {
209 	struct bucket_table *tbl = NULL;
210 	size_t size;
211 	int i;
212 
213 	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
214 	if (gfp != GFP_KERNEL)
215 		tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
216 	else
217 		tbl = kvzalloc(size, gfp);
218 
219 	size = nbuckets;
220 
221 	if (tbl == NULL && gfp != GFP_KERNEL) {
222 		tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
223 		nbuckets = 0;
224 	}
225 	if (tbl == NULL)
226 		return NULL;
227 
228 	tbl->size = size;
229 
230 	if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
231 		bucket_table_free(tbl);
232 		return NULL;
233 	}
234 
235 	INIT_LIST_HEAD(&tbl->walkers);
236 
237 	tbl->hash_rnd = get_random_u32();
238 
239 	for (i = 0; i < nbuckets; i++)
240 		INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
241 
242 	return tbl;
243 }
244 
245 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
246 						  struct bucket_table *tbl)
247 {
248 	struct bucket_table *new_tbl;
249 
250 	do {
251 		new_tbl = tbl;
252 		tbl = rht_dereference_rcu(tbl->future_tbl, ht);
253 	} while (tbl);
254 
255 	return new_tbl;
256 }
257 
258 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
259 {
260 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
261 	struct bucket_table *new_tbl = rhashtable_last_table(ht,
262 		rht_dereference_rcu(old_tbl->future_tbl, ht));
263 	struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
264 	int err = -EAGAIN;
265 	struct rhash_head *head, *next, *entry;
266 	spinlock_t *new_bucket_lock;
267 	unsigned int new_hash;
268 
269 	if (new_tbl->nest)
270 		goto out;
271 
272 	err = -ENOENT;
273 
274 	rht_for_each(entry, old_tbl, old_hash) {
275 		err = 0;
276 		next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
277 
278 		if (rht_is_a_nulls(next))
279 			break;
280 
281 		pprev = &entry->next;
282 	}
283 
284 	if (err)
285 		goto out;
286 
287 	new_hash = head_hashfn(ht, new_tbl, entry);
288 
289 	new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
290 
291 	spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
292 	head = rht_dereference_bucket(new_tbl->buckets[new_hash],
293 				      new_tbl, new_hash);
294 
295 	RCU_INIT_POINTER(entry->next, head);
296 
297 	rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
298 	spin_unlock(new_bucket_lock);
299 
300 	rcu_assign_pointer(*pprev, next);
301 
302 out:
303 	return err;
304 }
305 
306 static int rhashtable_rehash_chain(struct rhashtable *ht,
307 				    unsigned int old_hash)
308 {
309 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
310 	spinlock_t *old_bucket_lock;
311 	int err;
312 
313 	old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
314 
315 	spin_lock_bh(old_bucket_lock);
316 	while (!(err = rhashtable_rehash_one(ht, old_hash)))
317 		;
318 
319 	if (err == -ENOENT) {
320 		old_tbl->rehash++;
321 		err = 0;
322 	}
323 	spin_unlock_bh(old_bucket_lock);
324 
325 	return err;
326 }
327 
328 static int rhashtable_rehash_attach(struct rhashtable *ht,
329 				    struct bucket_table *old_tbl,
330 				    struct bucket_table *new_tbl)
331 {
332 	/* Protect future_tbl using the first bucket lock. */
333 	spin_lock_bh(old_tbl->locks);
334 
335 	/* Did somebody beat us to it? */
336 	if (rcu_access_pointer(old_tbl->future_tbl)) {
337 		spin_unlock_bh(old_tbl->locks);
338 		return -EEXIST;
339 	}
340 
341 	/* Make insertions go into the new, empty table right away. Deletions
342 	 * and lookups will be attempted in both tables until we synchronize.
343 	 */
344 	rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
345 
346 	spin_unlock_bh(old_tbl->locks);
347 
348 	return 0;
349 }
350 
351 static int rhashtable_rehash_table(struct rhashtable *ht)
352 {
353 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
354 	struct bucket_table *new_tbl;
355 	struct rhashtable_walker *walker;
356 	unsigned int old_hash;
357 	int err;
358 
359 	new_tbl = rht_dereference(old_tbl->future_tbl, ht);
360 	if (!new_tbl)
361 		return 0;
362 
363 	for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
364 		err = rhashtable_rehash_chain(ht, old_hash);
365 		if (err)
366 			return err;
367 	}
368 
369 	/* Publish the new table pointer. */
370 	rcu_assign_pointer(ht->tbl, new_tbl);
371 
372 	spin_lock(&ht->lock);
373 	list_for_each_entry(walker, &old_tbl->walkers, list)
374 		walker->tbl = NULL;
375 	spin_unlock(&ht->lock);
376 
377 	/* Wait for readers. All new readers will see the new
378 	 * table, and thus no references to the old table will
379 	 * remain.
380 	 */
381 	call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
382 
383 	return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
384 }
385 
386 static int rhashtable_rehash_alloc(struct rhashtable *ht,
387 				   struct bucket_table *old_tbl,
388 				   unsigned int size)
389 {
390 	struct bucket_table *new_tbl;
391 	int err;
392 
393 	ASSERT_RHT_MUTEX(ht);
394 
395 	new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
396 	if (new_tbl == NULL)
397 		return -ENOMEM;
398 
399 	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
400 	if (err)
401 		bucket_table_free(new_tbl);
402 
403 	return err;
404 }
405 
406 /**
407  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
408  * @ht:		the hash table to shrink
409  *
410  * This function shrinks the hash table to fit, i.e., the smallest
411  * size would not cause it to expand right away automatically.
412  *
413  * The caller must ensure that no concurrent resizing occurs by holding
414  * ht->mutex.
415  *
416  * The caller must ensure that no concurrent table mutations take place.
417  * It is however valid to have concurrent lookups if they are RCU protected.
418  *
419  * It is valid to have concurrent insertions and deletions protected by per
420  * bucket locks or concurrent RCU protected lookups and traversals.
421  */
422 static int rhashtable_shrink(struct rhashtable *ht)
423 {
424 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
425 	unsigned int nelems = atomic_read(&ht->nelems);
426 	unsigned int size = 0;
427 
428 	if (nelems)
429 		size = roundup_pow_of_two(nelems * 3 / 2);
430 	if (size < ht->p.min_size)
431 		size = ht->p.min_size;
432 
433 	if (old_tbl->size <= size)
434 		return 0;
435 
436 	if (rht_dereference(old_tbl->future_tbl, ht))
437 		return -EEXIST;
438 
439 	return rhashtable_rehash_alloc(ht, old_tbl, size);
440 }
441 
442 static void rht_deferred_worker(struct work_struct *work)
443 {
444 	struct rhashtable *ht;
445 	struct bucket_table *tbl;
446 	int err = 0;
447 
448 	ht = container_of(work, struct rhashtable, run_work);
449 	mutex_lock(&ht->mutex);
450 
451 	tbl = rht_dereference(ht->tbl, ht);
452 	tbl = rhashtable_last_table(ht, tbl);
453 
454 	if (rht_grow_above_75(ht, tbl))
455 		err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
456 	else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
457 		err = rhashtable_shrink(ht);
458 	else if (tbl->nest)
459 		err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
460 
461 	if (!err)
462 		err = rhashtable_rehash_table(ht);
463 
464 	mutex_unlock(&ht->mutex);
465 
466 	if (err)
467 		schedule_work(&ht->run_work);
468 }
469 
470 static int rhashtable_insert_rehash(struct rhashtable *ht,
471 				    struct bucket_table *tbl)
472 {
473 	struct bucket_table *old_tbl;
474 	struct bucket_table *new_tbl;
475 	unsigned int size;
476 	int err;
477 
478 	old_tbl = rht_dereference_rcu(ht->tbl, ht);
479 
480 	size = tbl->size;
481 
482 	err = -EBUSY;
483 
484 	if (rht_grow_above_75(ht, tbl))
485 		size *= 2;
486 	/* Do not schedule more than one rehash */
487 	else if (old_tbl != tbl)
488 		goto fail;
489 
490 	err = -ENOMEM;
491 
492 	new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
493 	if (new_tbl == NULL)
494 		goto fail;
495 
496 	err = rhashtable_rehash_attach(ht, tbl, new_tbl);
497 	if (err) {
498 		bucket_table_free(new_tbl);
499 		if (err == -EEXIST)
500 			err = 0;
501 	} else
502 		schedule_work(&ht->run_work);
503 
504 	return err;
505 
506 fail:
507 	/* Do not fail the insert if someone else did a rehash. */
508 	if (likely(rcu_dereference_raw(tbl->future_tbl)))
509 		return 0;
510 
511 	/* Schedule async rehash to retry allocation in process context. */
512 	if (err == -ENOMEM)
513 		schedule_work(&ht->run_work);
514 
515 	return err;
516 }
517 
518 static void *rhashtable_lookup_one(struct rhashtable *ht,
519 				   struct bucket_table *tbl, unsigned int hash,
520 				   const void *key, struct rhash_head *obj)
521 {
522 	struct rhashtable_compare_arg arg = {
523 		.ht = ht,
524 		.key = key,
525 	};
526 	struct rhash_head __rcu **pprev;
527 	struct rhash_head *head;
528 	int elasticity;
529 
530 	elasticity = RHT_ELASTICITY;
531 	pprev = rht_bucket_var(tbl, hash);
532 	rht_for_each_continue(head, *pprev, tbl, hash) {
533 		struct rhlist_head *list;
534 		struct rhlist_head *plist;
535 
536 		elasticity--;
537 		if (!key ||
538 		    (ht->p.obj_cmpfn ?
539 		     ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
540 		     rhashtable_compare(&arg, rht_obj(ht, head))))
541 			continue;
542 
543 		if (!ht->rhlist)
544 			return rht_obj(ht, head);
545 
546 		list = container_of(obj, struct rhlist_head, rhead);
547 		plist = container_of(head, struct rhlist_head, rhead);
548 
549 		RCU_INIT_POINTER(list->next, plist);
550 		head = rht_dereference_bucket(head->next, tbl, hash);
551 		RCU_INIT_POINTER(list->rhead.next, head);
552 		rcu_assign_pointer(*pprev, obj);
553 
554 		return NULL;
555 	}
556 
557 	if (elasticity <= 0)
558 		return ERR_PTR(-EAGAIN);
559 
560 	return ERR_PTR(-ENOENT);
561 }
562 
563 static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
564 						  struct bucket_table *tbl,
565 						  unsigned int hash,
566 						  struct rhash_head *obj,
567 						  void *data)
568 {
569 	struct rhash_head __rcu **pprev;
570 	struct bucket_table *new_tbl;
571 	struct rhash_head *head;
572 
573 	if (!IS_ERR_OR_NULL(data))
574 		return ERR_PTR(-EEXIST);
575 
576 	if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
577 		return ERR_CAST(data);
578 
579 	new_tbl = rcu_dereference(tbl->future_tbl);
580 	if (new_tbl)
581 		return new_tbl;
582 
583 	if (PTR_ERR(data) != -ENOENT)
584 		return ERR_CAST(data);
585 
586 	if (unlikely(rht_grow_above_max(ht, tbl)))
587 		return ERR_PTR(-E2BIG);
588 
589 	if (unlikely(rht_grow_above_100(ht, tbl)))
590 		return ERR_PTR(-EAGAIN);
591 
592 	pprev = rht_bucket_insert(ht, tbl, hash);
593 	if (!pprev)
594 		return ERR_PTR(-ENOMEM);
595 
596 	head = rht_dereference_bucket(*pprev, tbl, hash);
597 
598 	RCU_INIT_POINTER(obj->next, head);
599 	if (ht->rhlist) {
600 		struct rhlist_head *list;
601 
602 		list = container_of(obj, struct rhlist_head, rhead);
603 		RCU_INIT_POINTER(list->next, NULL);
604 	}
605 
606 	rcu_assign_pointer(*pprev, obj);
607 
608 	atomic_inc(&ht->nelems);
609 	if (rht_grow_above_75(ht, tbl))
610 		schedule_work(&ht->run_work);
611 
612 	return NULL;
613 }
614 
615 static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
616 				   struct rhash_head *obj)
617 {
618 	struct bucket_table *new_tbl;
619 	struct bucket_table *tbl;
620 	unsigned int hash;
621 	spinlock_t *lock;
622 	void *data;
623 
624 	tbl = rcu_dereference(ht->tbl);
625 
626 	/* All insertions must grab the oldest table containing
627 	 * the hashed bucket that is yet to be rehashed.
628 	 */
629 	for (;;) {
630 		hash = rht_head_hashfn(ht, tbl, obj, ht->p);
631 		lock = rht_bucket_lock(tbl, hash);
632 		spin_lock_bh(lock);
633 
634 		if (tbl->rehash <= hash)
635 			break;
636 
637 		spin_unlock_bh(lock);
638 		tbl = rcu_dereference(tbl->future_tbl);
639 	}
640 
641 	data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
642 	new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
643 	if (PTR_ERR(new_tbl) != -EEXIST)
644 		data = ERR_CAST(new_tbl);
645 
646 	while (!IS_ERR_OR_NULL(new_tbl)) {
647 		tbl = new_tbl;
648 		hash = rht_head_hashfn(ht, tbl, obj, ht->p);
649 		spin_lock_nested(rht_bucket_lock(tbl, hash),
650 				 SINGLE_DEPTH_NESTING);
651 
652 		data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
653 		new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
654 		if (PTR_ERR(new_tbl) != -EEXIST)
655 			data = ERR_CAST(new_tbl);
656 
657 		spin_unlock(rht_bucket_lock(tbl, hash));
658 	}
659 
660 	spin_unlock_bh(lock);
661 
662 	if (PTR_ERR(data) == -EAGAIN)
663 		data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
664 			       -EAGAIN);
665 
666 	return data;
667 }
668 
669 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
670 			     struct rhash_head *obj)
671 {
672 	void *data;
673 
674 	do {
675 		rcu_read_lock();
676 		data = rhashtable_try_insert(ht, key, obj);
677 		rcu_read_unlock();
678 	} while (PTR_ERR(data) == -EAGAIN);
679 
680 	return data;
681 }
682 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
683 
684 /**
685  * rhashtable_walk_enter - Initialise an iterator
686  * @ht:		Table to walk over
687  * @iter:	Hash table Iterator
688  *
689  * This function prepares a hash table walk.
690  *
691  * Note that if you restart a walk after rhashtable_walk_stop you
692  * may see the same object twice.  Also, you may miss objects if
693  * there are removals in between rhashtable_walk_stop and the next
694  * call to rhashtable_walk_start.
695  *
696  * For a completely stable walk you should construct your own data
697  * structure outside the hash table.
698  *
699  * This function may sleep so you must not call it from interrupt
700  * context or with spin locks held.
701  *
702  * You must call rhashtable_walk_exit after this function returns.
703  */
704 void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
705 {
706 	iter->ht = ht;
707 	iter->p = NULL;
708 	iter->slot = 0;
709 	iter->skip = 0;
710 
711 	spin_lock(&ht->lock);
712 	iter->walker.tbl =
713 		rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
714 	list_add(&iter->walker.list, &iter->walker.tbl->walkers);
715 	spin_unlock(&ht->lock);
716 }
717 EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
718 
719 /**
720  * rhashtable_walk_exit - Free an iterator
721  * @iter:	Hash table Iterator
722  *
723  * This function frees resources allocated by rhashtable_walk_init.
724  */
725 void rhashtable_walk_exit(struct rhashtable_iter *iter)
726 {
727 	spin_lock(&iter->ht->lock);
728 	if (iter->walker.tbl)
729 		list_del(&iter->walker.list);
730 	spin_unlock(&iter->ht->lock);
731 }
732 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
733 
734 /**
735  * rhashtable_walk_start - Start a hash table walk
736  * @iter:	Hash table iterator
737  *
738  * Start a hash table walk at the current iterator position.  Note that we take
739  * the RCU lock in all cases including when we return an error.  So you must
740  * always call rhashtable_walk_stop to clean up.
741  *
742  * Returns zero if successful.
743  *
744  * Returns -EAGAIN if resize event occured.  Note that the iterator
745  * will rewind back to the beginning and you may use it immediately
746  * by calling rhashtable_walk_next.
747  */
748 int rhashtable_walk_start(struct rhashtable_iter *iter)
749 	__acquires(RCU)
750 {
751 	struct rhashtable *ht = iter->ht;
752 
753 	rcu_read_lock();
754 
755 	spin_lock(&ht->lock);
756 	if (iter->walker.tbl)
757 		list_del(&iter->walker.list);
758 	spin_unlock(&ht->lock);
759 
760 	if (!iter->walker.tbl) {
761 		iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
762 		return -EAGAIN;
763 	}
764 
765 	return 0;
766 }
767 EXPORT_SYMBOL_GPL(rhashtable_walk_start);
768 
769 /**
770  * rhashtable_walk_next - Return the next object and advance the iterator
771  * @iter:	Hash table iterator
772  *
773  * Note that you must call rhashtable_walk_stop when you are finished
774  * with the walk.
775  *
776  * Returns the next object or NULL when the end of the table is reached.
777  *
778  * Returns -EAGAIN if resize event occured.  Note that the iterator
779  * will rewind back to the beginning and you may continue to use it.
780  */
781 void *rhashtable_walk_next(struct rhashtable_iter *iter)
782 {
783 	struct bucket_table *tbl = iter->walker.tbl;
784 	struct rhlist_head *list = iter->list;
785 	struct rhashtable *ht = iter->ht;
786 	struct rhash_head *p = iter->p;
787 	bool rhlist = ht->rhlist;
788 
789 	if (p) {
790 		if (!rhlist || !(list = rcu_dereference(list->next))) {
791 			p = rcu_dereference(p->next);
792 			list = container_of(p, struct rhlist_head, rhead);
793 		}
794 		goto next;
795 	}
796 
797 	for (; iter->slot < tbl->size; iter->slot++) {
798 		int skip = iter->skip;
799 
800 		rht_for_each_rcu(p, tbl, iter->slot) {
801 			if (rhlist) {
802 				list = container_of(p, struct rhlist_head,
803 						    rhead);
804 				do {
805 					if (!skip)
806 						goto next;
807 					skip--;
808 					list = rcu_dereference(list->next);
809 				} while (list);
810 
811 				continue;
812 			}
813 			if (!skip)
814 				break;
815 			skip--;
816 		}
817 
818 next:
819 		if (!rht_is_a_nulls(p)) {
820 			iter->skip++;
821 			iter->p = p;
822 			iter->list = list;
823 			return rht_obj(ht, rhlist ? &list->rhead : p);
824 		}
825 
826 		iter->skip = 0;
827 	}
828 
829 	iter->p = NULL;
830 
831 	/* Ensure we see any new tables. */
832 	smp_rmb();
833 
834 	iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
835 	if (iter->walker.tbl) {
836 		iter->slot = 0;
837 		iter->skip = 0;
838 		return ERR_PTR(-EAGAIN);
839 	}
840 
841 	return NULL;
842 }
843 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
844 
845 /**
846  * rhashtable_walk_stop - Finish a hash table walk
847  * @iter:	Hash table iterator
848  *
849  * Finish a hash table walk.  Does not reset the iterator to the start of the
850  * hash table.
851  */
852 void rhashtable_walk_stop(struct rhashtable_iter *iter)
853 	__releases(RCU)
854 {
855 	struct rhashtable *ht;
856 	struct bucket_table *tbl = iter->walker.tbl;
857 
858 	if (!tbl)
859 		goto out;
860 
861 	ht = iter->ht;
862 
863 	spin_lock(&ht->lock);
864 	if (tbl->rehash < tbl->size)
865 		list_add(&iter->walker.list, &tbl->walkers);
866 	else
867 		iter->walker.tbl = NULL;
868 	spin_unlock(&ht->lock);
869 
870 	iter->p = NULL;
871 
872 out:
873 	rcu_read_unlock();
874 }
875 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
876 
877 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
878 {
879 	return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
880 		   (unsigned long)params->min_size);
881 }
882 
883 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
884 {
885 	return jhash2(key, length, seed);
886 }
887 
888 /**
889  * rhashtable_init - initialize a new hash table
890  * @ht:		hash table to be initialized
891  * @params:	configuration parameters
892  *
893  * Initializes a new hash table based on the provided configuration
894  * parameters. A table can be configured either with a variable or
895  * fixed length key:
896  *
897  * Configuration Example 1: Fixed length keys
898  * struct test_obj {
899  *	int			key;
900  *	void *			my_member;
901  *	struct rhash_head	node;
902  * };
903  *
904  * struct rhashtable_params params = {
905  *	.head_offset = offsetof(struct test_obj, node),
906  *	.key_offset = offsetof(struct test_obj, key),
907  *	.key_len = sizeof(int),
908  *	.hashfn = jhash,
909  *	.nulls_base = (1U << RHT_BASE_SHIFT),
910  * };
911  *
912  * Configuration Example 2: Variable length keys
913  * struct test_obj {
914  *	[...]
915  *	struct rhash_head	node;
916  * };
917  *
918  * u32 my_hash_fn(const void *data, u32 len, u32 seed)
919  * {
920  *	struct test_obj *obj = data;
921  *
922  *	return [... hash ...];
923  * }
924  *
925  * struct rhashtable_params params = {
926  *	.head_offset = offsetof(struct test_obj, node),
927  *	.hashfn = jhash,
928  *	.obj_hashfn = my_hash_fn,
929  * };
930  */
931 int rhashtable_init(struct rhashtable *ht,
932 		    const struct rhashtable_params *params)
933 {
934 	struct bucket_table *tbl;
935 	size_t size;
936 
937 	size = HASH_DEFAULT_SIZE;
938 
939 	if ((!params->key_len && !params->obj_hashfn) ||
940 	    (params->obj_hashfn && !params->obj_cmpfn))
941 		return -EINVAL;
942 
943 	if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
944 		return -EINVAL;
945 
946 	memset(ht, 0, sizeof(*ht));
947 	mutex_init(&ht->mutex);
948 	spin_lock_init(&ht->lock);
949 	memcpy(&ht->p, params, sizeof(*params));
950 
951 	if (params->min_size)
952 		ht->p.min_size = roundup_pow_of_two(params->min_size);
953 
954 	/* Cap total entries at 2^31 to avoid nelems overflow. */
955 	ht->max_elems = 1u << 31;
956 
957 	if (params->max_size) {
958 		ht->p.max_size = rounddown_pow_of_two(params->max_size);
959 		if (ht->p.max_size < ht->max_elems / 2)
960 			ht->max_elems = ht->p.max_size * 2;
961 	}
962 
963 	ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
964 
965 	if (params->nelem_hint)
966 		size = rounded_hashtable_size(&ht->p);
967 
968 	if (params->locks_mul)
969 		ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
970 	else
971 		ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
972 
973 	ht->key_len = ht->p.key_len;
974 	if (!params->hashfn) {
975 		ht->p.hashfn = jhash;
976 
977 		if (!(ht->key_len & (sizeof(u32) - 1))) {
978 			ht->key_len /= sizeof(u32);
979 			ht->p.hashfn = rhashtable_jhash2;
980 		}
981 	}
982 
983 	tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
984 	if (tbl == NULL)
985 		return -ENOMEM;
986 
987 	atomic_set(&ht->nelems, 0);
988 
989 	RCU_INIT_POINTER(ht->tbl, tbl);
990 
991 	INIT_WORK(&ht->run_work, rht_deferred_worker);
992 
993 	return 0;
994 }
995 EXPORT_SYMBOL_GPL(rhashtable_init);
996 
997 /**
998  * rhltable_init - initialize a new hash list table
999  * @hlt:	hash list table to be initialized
1000  * @params:	configuration parameters
1001  *
1002  * Initializes a new hash list table.
1003  *
1004  * See documentation for rhashtable_init.
1005  */
1006 int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
1007 {
1008 	int err;
1009 
1010 	/* No rhlist NULLs marking for now. */
1011 	if (params->nulls_base)
1012 		return -EINVAL;
1013 
1014 	err = rhashtable_init(&hlt->ht, params);
1015 	hlt->ht.rhlist = true;
1016 	return err;
1017 }
1018 EXPORT_SYMBOL_GPL(rhltable_init);
1019 
1020 static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
1021 				void (*free_fn)(void *ptr, void *arg),
1022 				void *arg)
1023 {
1024 	struct rhlist_head *list;
1025 
1026 	if (!ht->rhlist) {
1027 		free_fn(rht_obj(ht, obj), arg);
1028 		return;
1029 	}
1030 
1031 	list = container_of(obj, struct rhlist_head, rhead);
1032 	do {
1033 		obj = &list->rhead;
1034 		list = rht_dereference(list->next, ht);
1035 		free_fn(rht_obj(ht, obj), arg);
1036 	} while (list);
1037 }
1038 
1039 /**
1040  * rhashtable_free_and_destroy - free elements and destroy hash table
1041  * @ht:		the hash table to destroy
1042  * @free_fn:	callback to release resources of element
1043  * @arg:	pointer passed to free_fn
1044  *
1045  * Stops an eventual async resize. If defined, invokes free_fn for each
1046  * element to releasal resources. Please note that RCU protected
1047  * readers may still be accessing the elements. Releasing of resources
1048  * must occur in a compatible manner. Then frees the bucket array.
1049  *
1050  * This function will eventually sleep to wait for an async resize
1051  * to complete. The caller is responsible that no further write operations
1052  * occurs in parallel.
1053  */
1054 void rhashtable_free_and_destroy(struct rhashtable *ht,
1055 				 void (*free_fn)(void *ptr, void *arg),
1056 				 void *arg)
1057 {
1058 	struct bucket_table *tbl;
1059 	unsigned int i;
1060 
1061 	cancel_work_sync(&ht->run_work);
1062 
1063 	mutex_lock(&ht->mutex);
1064 	tbl = rht_dereference(ht->tbl, ht);
1065 	if (free_fn) {
1066 		for (i = 0; i < tbl->size; i++) {
1067 			struct rhash_head *pos, *next;
1068 
1069 			for (pos = rht_dereference(*rht_bucket(tbl, i), ht),
1070 			     next = !rht_is_a_nulls(pos) ?
1071 					rht_dereference(pos->next, ht) : NULL;
1072 			     !rht_is_a_nulls(pos);
1073 			     pos = next,
1074 			     next = !rht_is_a_nulls(pos) ?
1075 					rht_dereference(pos->next, ht) : NULL)
1076 				rhashtable_free_one(ht, pos, free_fn, arg);
1077 		}
1078 	}
1079 
1080 	bucket_table_free(tbl);
1081 	mutex_unlock(&ht->mutex);
1082 }
1083 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
1084 
1085 void rhashtable_destroy(struct rhashtable *ht)
1086 {
1087 	return rhashtable_free_and_destroy(ht, NULL, NULL);
1088 }
1089 EXPORT_SYMBOL_GPL(rhashtable_destroy);
1090 
1091 struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
1092 					    unsigned int hash)
1093 {
1094 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1095 	static struct rhash_head __rcu *rhnull =
1096 		(struct rhash_head __rcu *)NULLS_MARKER(0);
1097 	unsigned int index = hash & ((1 << tbl->nest) - 1);
1098 	unsigned int size = tbl->size >> tbl->nest;
1099 	unsigned int subhash = hash;
1100 	union nested_table *ntbl;
1101 
1102 	ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1103 	ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
1104 	subhash >>= tbl->nest;
1105 
1106 	while (ntbl && size > (1 << shift)) {
1107 		index = subhash & ((1 << shift) - 1);
1108 		ntbl = rht_dereference_bucket_rcu(ntbl[index].table,
1109 						  tbl, hash);
1110 		size >>= shift;
1111 		subhash >>= shift;
1112 	}
1113 
1114 	if (!ntbl)
1115 		return &rhnull;
1116 
1117 	return &ntbl[subhash].bucket;
1118 
1119 }
1120 EXPORT_SYMBOL_GPL(rht_bucket_nested);
1121 
1122 struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
1123 						   struct bucket_table *tbl,
1124 						   unsigned int hash)
1125 {
1126 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1127 	unsigned int index = hash & ((1 << tbl->nest) - 1);
1128 	unsigned int size = tbl->size >> tbl->nest;
1129 	union nested_table *ntbl;
1130 	unsigned int shifted;
1131 	unsigned int nhash;
1132 
1133 	ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1134 	hash >>= tbl->nest;
1135 	nhash = index;
1136 	shifted = tbl->nest;
1137 	ntbl = nested_table_alloc(ht, &ntbl[index].table,
1138 				  size <= (1 << shift) ? shifted : 0, nhash);
1139 
1140 	while (ntbl && size > (1 << shift)) {
1141 		index = hash & ((1 << shift) - 1);
1142 		size >>= shift;
1143 		hash >>= shift;
1144 		nhash |= index << shifted;
1145 		shifted += shift;
1146 		ntbl = nested_table_alloc(ht, &ntbl[index].table,
1147 					  size <= (1 << shift) ? shifted : 0,
1148 					  nhash);
1149 	}
1150 
1151 	if (!ntbl)
1152 		return NULL;
1153 
1154 	return &ntbl[hash].bucket;
1155 
1156 }
1157 EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);
1158