xref: /linux/lib/rhashtable.c (revision fd6b56615696c2addca7b43c862b21a9a386c116)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Resizable, Scalable, Concurrent Hash Table
4  *
5  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
6  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
7  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
8  *
9  * Code partially derived from nft_hash
10  * Rewritten with rehash code from br_multicast plus single list
11  * pointer as suggested by Josh Triplett
12  */
13 
14 #include <linux/atomic.h>
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/log2.h>
18 #include <linux/sched.h>
19 #include <linux/rculist.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/mm.h>
23 #include <linux/jhash.h>
24 #include <linux/random.h>
25 #include <linux/rhashtable.h>
26 #include <linux/err.h>
27 #include <linux/export.h>
28 
29 #define HASH_DEFAULT_SIZE	64UL
30 #define HASH_MIN_SIZE		4U
31 
32 union nested_table {
33 	union nested_table __rcu *table;
34 	struct rhash_lock_head __rcu *bucket;
35 };
36 
head_hashfn(struct rhashtable * ht,const struct bucket_table * tbl,const struct rhash_head * he)37 static u32 head_hashfn(struct rhashtable *ht,
38 		       const struct bucket_table *tbl,
39 		       const struct rhash_head *he)
40 {
41 	return rht_head_hashfn(ht, tbl, he, ht->p);
42 }
43 
44 #ifdef CONFIG_PROVE_LOCKING
45 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
46 
lockdep_rht_mutex_is_held(struct rhashtable * ht)47 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
48 {
49 	return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
50 }
51 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
52 
lockdep_rht_bucket_is_held(const struct bucket_table * tbl,u32 hash)53 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
54 {
55 	if (!debug_locks)
56 		return 1;
57 	if (unlikely(tbl->nest))
58 		return 1;
59 	return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]);
60 }
61 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
62 #else
63 #define ASSERT_RHT_MUTEX(HT)
64 #endif
65 
nested_table_top(const struct bucket_table * tbl)66 static inline union nested_table *nested_table_top(
67 	const struct bucket_table *tbl)
68 {
69 	/* The top-level bucket entry does not need RCU protection
70 	 * because it's set at the same time as tbl->nest.
71 	 */
72 	return (void *)rcu_dereference_protected(tbl->buckets[0], 1);
73 }
74 
nested_table_free(union nested_table * ntbl,unsigned int size)75 static void nested_table_free(union nested_table *ntbl, unsigned int size)
76 {
77 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
78 	const unsigned int len = 1 << shift;
79 	unsigned int i;
80 
81 	ntbl = rcu_dereference_protected(ntbl->table, 1);
82 	if (!ntbl)
83 		return;
84 
85 	if (size > len) {
86 		size >>= shift;
87 		for (i = 0; i < len; i++)
88 			nested_table_free(ntbl + i, size);
89 	}
90 
91 	kfree(ntbl);
92 }
93 
nested_bucket_table_free(const struct bucket_table * tbl)94 static void nested_bucket_table_free(const struct bucket_table *tbl)
95 {
96 	unsigned int size = tbl->size >> tbl->nest;
97 	unsigned int len = 1 << tbl->nest;
98 	union nested_table *ntbl;
99 	unsigned int i;
100 
101 	ntbl = nested_table_top(tbl);
102 
103 	for (i = 0; i < len; i++)
104 		nested_table_free(ntbl + i, size);
105 
106 	kfree(ntbl);
107 }
108 
bucket_table_free(const struct bucket_table * tbl)109 static void bucket_table_free(const struct bucket_table *tbl)
110 {
111 	if (tbl->nest)
112 		nested_bucket_table_free(tbl);
113 
114 	kvfree(tbl);
115 }
116 
bucket_table_free_atomic(const struct bucket_table * tbl)117 static void bucket_table_free_atomic(const struct bucket_table *tbl)
118 {
119 	if (tbl->nest)
120 		nested_bucket_table_free(tbl);
121 
122 	kvfree_atomic(tbl);
123 }
124 
bucket_table_free_rcu(struct rcu_head * head)125 static void bucket_table_free_rcu(struct rcu_head *head)
126 {
127 	bucket_table_free(container_of(head, struct bucket_table, rcu));
128 }
129 
nested_table_alloc(struct rhashtable * ht,union nested_table __rcu ** prev,bool leaf)130 static union nested_table *nested_table_alloc(struct rhashtable *ht,
131 					      union nested_table __rcu **prev,
132 					      bool leaf)
133 {
134 	union nested_table *ntbl;
135 	int i;
136 
137 	ntbl = rcu_dereference(*prev);
138 	if (ntbl)
139 		return ntbl;
140 
141 	ntbl = alloc_hooks_tag(ht->alloc_tag,
142 			kmalloc_noprof(PAGE_SIZE, GFP_ATOMIC|__GFP_ZERO));
143 
144 	if (ntbl && leaf) {
145 		for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
146 			INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
147 	}
148 
149 	if (cmpxchg((union nested_table **)prev, NULL, ntbl) == NULL)
150 		return ntbl;
151 	/* Raced with another thread. */
152 	kfree(ntbl);
153 	return rcu_dereference(*prev);
154 }
155 
nested_bucket_table_alloc(struct rhashtable * ht,size_t nbuckets,gfp_t gfp)156 static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
157 						      size_t nbuckets,
158 						      gfp_t gfp)
159 {
160 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
161 	struct bucket_table *tbl;
162 	size_t size;
163 
164 	if (nbuckets < (1 << (shift + 1)))
165 		return NULL;
166 
167 	size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
168 
169 	tbl = alloc_hooks_tag(ht->alloc_tag,
170 			kmalloc_noprof(size, gfp|__GFP_ZERO));
171 	if (!tbl)
172 		return NULL;
173 
174 	if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
175 				false)) {
176 		kfree(tbl);
177 		return NULL;
178 	}
179 
180 	tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
181 
182 	return tbl;
183 }
184 
bucket_table_alloc(struct rhashtable * ht,size_t nbuckets,gfp_t gfp)185 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
186 					       size_t nbuckets,
187 					       gfp_t gfp)
188 {
189 	struct bucket_table *tbl = NULL;
190 	size_t size;
191 	int i;
192 	static struct lock_class_key __key;
193 
194 	tbl = alloc_hooks_tag(ht->alloc_tag,
195 			kvmalloc_node_align_noprof(struct_size(tbl, buckets, nbuckets),
196 					     1, gfp|__GFP_ZERO, NUMA_NO_NODE));
197 
198 	size = nbuckets;
199 
200 	if (tbl == NULL && !gfpflags_allow_blocking(gfp)) {
201 		tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
202 		nbuckets = 0;
203 	}
204 
205 	if (tbl == NULL)
206 		return NULL;
207 
208 	lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0);
209 
210 	tbl->size = size;
211 
212 	rcu_head_init(&tbl->rcu);
213 	INIT_LIST_HEAD(&tbl->walkers);
214 
215 	tbl->hash_rnd = get_random_u32();
216 
217 	for (i = 0; i < nbuckets; i++)
218 		INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
219 
220 	return tbl;
221 }
222 
rhashtable_last_table(struct rhashtable * ht,struct bucket_table * tbl)223 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
224 						  struct bucket_table *tbl)
225 {
226 	struct bucket_table *new_tbl;
227 
228 	do {
229 		new_tbl = tbl;
230 		tbl = rht_dereference_rcu(tbl->future_tbl, ht);
231 	} while (tbl);
232 
233 	return new_tbl;
234 }
235 
rhashtable_rehash_one(struct rhashtable * ht,struct rhash_lock_head __rcu ** bkt,unsigned int old_hash)236 static int rhashtable_rehash_one(struct rhashtable *ht,
237 				 struct rhash_lock_head __rcu **bkt,
238 				 unsigned int old_hash)
239 {
240 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
241 	struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
242 	int err = -EAGAIN;
243 	struct rhash_head *head, *next, *entry;
244 	struct rhash_head __rcu **pprev = NULL;
245 	unsigned int new_hash;
246 	unsigned long flags;
247 
248 	if (new_tbl->nest)
249 		goto out;
250 
251 	err = -ENOENT;
252 
253 	rht_for_each_from(entry, rht_ptr(bkt, old_tbl, old_hash),
254 			  old_tbl, old_hash) {
255 		err = 0;
256 		next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
257 
258 		if (rht_is_a_nulls(next))
259 			break;
260 
261 		pprev = &entry->next;
262 	}
263 
264 	if (err)
265 		goto out;
266 
267 	new_hash = head_hashfn(ht, new_tbl, entry);
268 
269 	flags = rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash],
270 				SINGLE_DEPTH_NESTING);
271 
272 	head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash);
273 
274 	RCU_INIT_POINTER(entry->next, head);
275 
276 	rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry, flags);
277 
278 	if (pprev)
279 		rcu_assign_pointer(*pprev, next);
280 	else
281 		/* Need to preserved the bit lock. */
282 		rht_assign_locked(bkt, next);
283 
284 out:
285 	return err;
286 }
287 
rhashtable_rehash_chain(struct rhashtable * ht,unsigned int old_hash)288 static int rhashtable_rehash_chain(struct rhashtable *ht,
289 				    unsigned int old_hash)
290 {
291 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
292 	struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
293 	unsigned long flags;
294 	int err;
295 
296 	if (!bkt)
297 		return 0;
298 	flags = rht_lock(old_tbl, bkt);
299 
300 	while (!(err = rhashtable_rehash_one(ht, bkt, old_hash)))
301 		;
302 
303 	if (err == -ENOENT)
304 		err = 0;
305 	rht_unlock(old_tbl, bkt, flags);
306 
307 	return err;
308 }
309 
rhashtable_rehash_attach(struct rhashtable * ht,struct bucket_table * old_tbl,struct bucket_table * new_tbl)310 static int rhashtable_rehash_attach(struct rhashtable *ht,
311 				    struct bucket_table *old_tbl,
312 				    struct bucket_table *new_tbl)
313 {
314 	/* Make insertions go into the new, empty table right away. Deletions
315 	 * and lookups will be attempted in both tables until we synchronize.
316 	 * As cmpxchg() provides strong barriers, we do not need
317 	 * rcu_assign_pointer().
318 	 */
319 
320 	if (cmpxchg((struct bucket_table **)&old_tbl->future_tbl, NULL,
321 		    new_tbl) != NULL)
322 		return -EEXIST;
323 
324 	return 0;
325 }
326 
rhashtable_rehash_table(struct rhashtable * ht)327 static int rhashtable_rehash_table(struct rhashtable *ht)
328 {
329 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
330 	struct bucket_table *new_tbl;
331 	struct rhashtable_walker *walker;
332 	unsigned int old_hash;
333 	int err;
334 
335 	new_tbl = rht_dereference(old_tbl->future_tbl, ht);
336 	if (!new_tbl)
337 		return 0;
338 
339 	for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
340 		err = rhashtable_rehash_chain(ht, old_hash);
341 		if (err)
342 			return err;
343 		cond_resched();
344 	}
345 
346 	/* Publish the new table pointer. */
347 	rcu_assign_pointer(ht->tbl, new_tbl);
348 
349 	spin_lock(&ht->lock);
350 	list_for_each_entry(walker, &old_tbl->walkers, list)
351 		walker->tbl = NULL;
352 
353 	/* Wait for readers. All new readers will see the new
354 	 * table, and thus no references to the old table will
355 	 * remain.
356 	 * We do this inside the locked region so that
357 	 * rhashtable_walk_stop() can use rcu_head_after_call_rcu()
358 	 * to check if it should not re-link the table.
359 	 */
360 	call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
361 	spin_unlock(&ht->lock);
362 
363 	return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
364 }
365 
rhashtable_rehash_alloc(struct rhashtable * ht,struct bucket_table * old_tbl,unsigned int size)366 static int rhashtable_rehash_alloc(struct rhashtable *ht,
367 				   struct bucket_table *old_tbl,
368 				   unsigned int size)
369 	__must_hold(&ht->mutex)
370 {
371 	struct bucket_table *new_tbl;
372 	int err;
373 
374 	ASSERT_RHT_MUTEX(ht);
375 
376 	new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
377 	if (new_tbl == NULL)
378 		return -ENOMEM;
379 
380 	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
381 	if (err)
382 		bucket_table_free(new_tbl);
383 
384 	return err;
385 }
386 
387 /**
388  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
389  * @ht:		the hash table to shrink
390  *
391  * This function shrinks the hash table to fit, i.e., the smallest
392  * size would not cause it to expand right away automatically.
393  *
394  * The caller must ensure that no concurrent resizing occurs by holding
395  * ht->mutex.
396  *
397  * The caller must ensure that no concurrent table mutations take place.
398  * It is however valid to have concurrent lookups if they are RCU protected.
399  *
400  * It is valid to have concurrent insertions and deletions protected by per
401  * bucket locks or concurrent RCU protected lookups and traversals.
402  */
rhashtable_shrink(struct rhashtable * ht)403 static int rhashtable_shrink(struct rhashtable *ht)
404 	__must_hold(&ht->mutex)
405 {
406 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
407 	unsigned int nelems = atomic_read(&ht->nelems);
408 	unsigned int size = 0;
409 
410 	if (nelems)
411 		size = roundup_pow_of_two(nelems * 3 / 2);
412 	if (size < ht->p.min_size)
413 		size = ht->p.min_size;
414 
415 	if (old_tbl->size <= size)
416 		return 0;
417 
418 	if (rht_dereference(old_tbl->future_tbl, ht))
419 		return -EEXIST;
420 
421 	return rhashtable_rehash_alloc(ht, old_tbl, size);
422 }
423 
rht_deferred_worker(struct work_struct * work)424 static void rht_deferred_worker(struct work_struct *work)
425 {
426 	struct rhashtable *ht;
427 	struct bucket_table *tbl;
428 	int err = 0;
429 
430 	ht = container_of(work, struct rhashtable, run_work);
431 	mutex_lock(&ht->mutex);
432 
433 	tbl = rht_dereference(ht->tbl, ht);
434 	tbl = rhashtable_last_table(ht, tbl);
435 
436 	if (rht_grow_above_75(ht, tbl))
437 		err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
438 	else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
439 		err = rhashtable_shrink(ht);
440 	else if (tbl->nest)
441 		err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
442 
443 	if (!err || err == -EEXIST) {
444 		int nerr;
445 
446 		nerr = rhashtable_rehash_table(ht);
447 		err = err ?: nerr;
448 	}
449 
450 	mutex_unlock(&ht->mutex);
451 
452 	/*
453 	 * Re-arm via @run_work, not @run_irq_work.
454 	 * rhashtable_free_and_destroy() drains async work as irq_work_sync()
455 	 * followed by cancel_work_sync(). If this site queued irq_work while
456 	 * cancel_work_sync() was waiting for us, irq_work_sync() would already
457 	 * have returned and the stale irq_work could fire post-teardown.
458 	 * cancel_work_sync() natively handles self-requeue on @run_work.
459 	 */
460 	if (err)
461 		schedule_work(&ht->run_work);
462 }
463 
464 /*
465  * Insert-path callers can run under a raw spinlock (e.g. an insecure_elasticity
466  * user). Calling schedule_work() under that lock records caller_lock ->
467  * pool->lock -> pi_lock -> rq->__lock, closing a locking cycle if any of
468  * these is acquired in the reverse direction elsewhere. Bounce through
469  * irq_work so the schedule_work() runs with the caller's lock no longer held.
470  */
rht_deferred_irq_work(struct irq_work * irq_work)471 static void rht_deferred_irq_work(struct irq_work *irq_work)
472 {
473 	struct rhashtable *ht = container_of(irq_work, struct rhashtable,
474 					     run_irq_work);
475 
476 	schedule_work(&ht->run_work);
477 }
478 
rhashtable_insert_rehash(struct rhashtable * ht,struct bucket_table * tbl)479 static int rhashtable_insert_rehash(struct rhashtable *ht,
480 				    struct bucket_table *tbl)
481 {
482 	struct bucket_table *old_tbl;
483 	struct bucket_table *new_tbl;
484 	unsigned int size;
485 	int err;
486 
487 	old_tbl = rht_dereference_rcu(ht->tbl, ht);
488 
489 	size = tbl->size;
490 
491 	err = -EBUSY;
492 
493 	if (rht_grow_above_75(ht, tbl))
494 		size *= 2;
495 	/* Do not schedule more than one rehash */
496 	else if (old_tbl != tbl)
497 		goto fail;
498 
499 	err = -ENOMEM;
500 
501 	new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN);
502 	if (new_tbl == NULL)
503 		goto fail;
504 
505 	err = rhashtable_rehash_attach(ht, tbl, new_tbl);
506 	if (err) {
507 		bucket_table_free_atomic(new_tbl);
508 		if (err == -EEXIST)
509 			err = 0;
510 	} else
511 		irq_work_queue(&ht->run_irq_work);
512 
513 	return err;
514 
515 fail:
516 	/* Do not fail the insert if someone else did a rehash. */
517 	if (likely(rcu_access_pointer(tbl->future_tbl)))
518 		return 0;
519 
520 	/* Schedule async rehash to retry allocation in process context. */
521 	if (err == -ENOMEM)
522 		irq_work_queue(&ht->run_irq_work);
523 
524 	return err;
525 }
526 
rhashtable_lookup_one(struct rhashtable * ht,struct rhash_lock_head __rcu ** bkt,struct bucket_table * tbl,unsigned int hash,const void * key,struct rhash_head * obj)527 static void *rhashtable_lookup_one(struct rhashtable *ht,
528 				   struct rhash_lock_head __rcu **bkt,
529 				   struct bucket_table *tbl, unsigned int hash,
530 				   const void *key, struct rhash_head *obj)
531 {
532 	struct rhashtable_compare_arg arg = {
533 		.ht = ht,
534 		.key = key,
535 	};
536 	struct rhash_head __rcu **pprev = NULL;
537 	struct rhash_head *head;
538 	int elasticity;
539 
540 	elasticity = RHT_ELASTICITY;
541 	rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
542 		struct rhlist_head *list;
543 		struct rhlist_head *plist;
544 
545 		elasticity--;
546 		if (!key ||
547 		    (ht->p.obj_cmpfn ?
548 		     ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
549 		     rhashtable_compare(&arg, rht_obj(ht, head)))) {
550 			pprev = &head->next;
551 			continue;
552 		}
553 
554 		if (!ht->rhlist)
555 			return rht_obj(ht, head);
556 
557 		list = container_of(obj, struct rhlist_head, rhead);
558 		plist = container_of(head, struct rhlist_head, rhead);
559 
560 		RCU_INIT_POINTER(list->next, plist);
561 		head = rht_dereference_bucket(head->next, tbl, hash);
562 		RCU_INIT_POINTER(list->rhead.next, head);
563 		if (pprev)
564 			rcu_assign_pointer(*pprev, obj);
565 		else
566 			/* Need to preserve the bit lock */
567 			rht_assign_locked(bkt, obj);
568 
569 		return NULL;
570 	}
571 
572 	if (elasticity <= 0 && !ht->p.insecure_elasticity)
573 		return ERR_PTR(-EAGAIN);
574 
575 	return ERR_PTR(-ENOENT);
576 }
577 
rhashtable_insert_one(struct rhashtable * ht,struct rhash_lock_head __rcu ** bkt,struct bucket_table * tbl,unsigned int hash,struct rhash_head * obj,void * data)578 static struct bucket_table *rhashtable_insert_one(
579 	struct rhashtable *ht, struct rhash_lock_head __rcu **bkt,
580 	struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj,
581 	void *data)
582 {
583 	struct bucket_table *new_tbl;
584 	struct rhash_head *head;
585 
586 	if (!IS_ERR_OR_NULL(data))
587 		return ERR_PTR(-EEXIST);
588 
589 	if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
590 		return ERR_CAST(data);
591 
592 	new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
593 	if (new_tbl)
594 		return new_tbl;
595 
596 	if (PTR_ERR(data) != -ENOENT)
597 		return ERR_CAST(data);
598 
599 	if (unlikely(rht_grow_above_max(ht, tbl)))
600 		return ERR_PTR(-E2BIG);
601 
602 	if (unlikely(rht_grow_above_100(ht, tbl)) &&
603 	    !ht->p.insecure_elasticity)
604 		return ERR_PTR(-EAGAIN);
605 
606 	head = rht_ptr(bkt, tbl, hash);
607 
608 	RCU_INIT_POINTER(obj->next, head);
609 	if (ht->rhlist) {
610 		struct rhlist_head *list;
611 
612 		list = container_of(obj, struct rhlist_head, rhead);
613 		RCU_INIT_POINTER(list->next, NULL);
614 	}
615 
616 	/* bkt is always the head of the list, so it holds
617 	 * the lock, which we need to preserve
618 	 */
619 	rht_assign_locked(bkt, obj);
620 
621 	return NULL;
622 }
623 
rhashtable_try_insert(struct rhashtable * ht,const void * key,struct rhash_head * obj)624 static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
625 				   struct rhash_head *obj)
626 {
627 	struct bucket_table *new_tbl;
628 	struct bucket_table *tbl;
629 	struct rhash_lock_head __rcu **bkt;
630 	unsigned long flags;
631 	unsigned int hash;
632 	void *data;
633 
634 	new_tbl = rcu_dereference(ht->tbl);
635 
636 	do {
637 		tbl = new_tbl;
638 		hash = rht_head_hashfn(ht, tbl, obj, ht->p);
639 		if (rcu_access_pointer(tbl->future_tbl))
640 			/* Failure is OK */
641 			bkt = rht_bucket_var(tbl, hash);
642 		else
643 			bkt = rht_bucket_insert(ht, tbl, hash);
644 		if (bkt == NULL) {
645 			new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
646 			data = ERR_PTR(-EAGAIN);
647 		} else {
648 			bool inserted;
649 
650 			flags = rht_lock(tbl, bkt);
651 			data = rhashtable_lookup_one(ht, bkt, tbl,
652 						     hash, key, obj);
653 			new_tbl = rhashtable_insert_one(ht, bkt, tbl,
654 							hash, obj, data);
655 			inserted = data && !new_tbl;
656 			if (inserted)
657 				atomic_inc(&ht->nelems);
658 			if (PTR_ERR(new_tbl) != -EEXIST)
659 				data = ERR_CAST(new_tbl);
660 
661 			rht_unlock(tbl, bkt, flags);
662 
663 			if (inserted && rht_grow_above_75(ht, tbl))
664 				irq_work_queue(&ht->run_irq_work);
665 		}
666 	} while (!IS_ERR_OR_NULL(new_tbl));
667 
668 	if (PTR_ERR(data) == -EAGAIN)
669 		data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
670 			       -EAGAIN);
671 
672 	return data;
673 }
674 
rhashtable_insert_slow(struct rhashtable * ht,const void * key,struct rhash_head * obj)675 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
676 			     struct rhash_head *obj)
677 {
678 	void *data;
679 
680 	do {
681 		rcu_read_lock();
682 		data = rhashtable_try_insert(ht, key, obj);
683 		rcu_read_unlock();
684 	} while (PTR_ERR(data) == -EAGAIN);
685 
686 	return data;
687 }
688 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
689 
690 /**
691  * rhashtable_walk_enter - Initialise an iterator
692  * @ht:		Table to walk over
693  * @iter:	Hash table Iterator
694  *
695  * This function prepares a hash table walk.
696  *
697  * Note that if you restart a walk after rhashtable_walk_stop you
698  * may see the same object twice.  Also, you may miss objects if
699  * there are removals in between rhashtable_walk_stop and the next
700  * call to rhashtable_walk_start.
701  *
702  * For a completely stable walk you should construct your own data
703  * structure outside the hash table.
704  *
705  * This function may be called from any process context, including
706  * non-preemptible context, but cannot be called from softirq or
707  * hardirq context.
708  *
709  * You must call rhashtable_walk_exit after this function returns.
710  */
rhashtable_walk_enter(struct rhashtable * ht,struct rhashtable_iter * iter)711 void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
712 {
713 	iter->ht = ht;
714 	iter->p = NULL;
715 	iter->slot = 0;
716 	iter->skip = 0;
717 	iter->end_of_table = 0;
718 
719 	spin_lock(&ht->lock);
720 	iter->walker.tbl =
721 		rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
722 	list_add(&iter->walker.list, &iter->walker.tbl->walkers);
723 	spin_unlock(&ht->lock);
724 }
725 EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
726 
727 /**
728  * rhashtable_walk_exit - Free an iterator
729  * @iter:	Hash table Iterator
730  *
731  * This function frees resources allocated by rhashtable_walk_enter.
732  */
rhashtable_walk_exit(struct rhashtable_iter * iter)733 void rhashtable_walk_exit(struct rhashtable_iter *iter)
734 {
735 	spin_lock(&iter->ht->lock);
736 	if (iter->walker.tbl)
737 		list_del(&iter->walker.list);
738 	spin_unlock(&iter->ht->lock);
739 }
740 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
741 
742 /**
743  * rhashtable_walk_start_check - Start a hash table walk
744  * @iter:	Hash table iterator
745  *
746  * Start a hash table walk at the current iterator position.  Note that we take
747  * the RCU lock in all cases including when we return an error.  So you must
748  * always call rhashtable_walk_stop to clean up.
749  *
750  * Returns zero if successful.
751  *
752  * Returns -EAGAIN if resize event occurred.  Note that the iterator
753  * will rewind back to the beginning and you may use it immediately
754  * by calling rhashtable_walk_next.
755  *
756  * rhashtable_walk_start is defined as an inline variant that returns
757  * void. This is preferred in cases where the caller would ignore
758  * resize events and always continue.
759  */
rhashtable_walk_start_check(struct rhashtable_iter * iter)760 int rhashtable_walk_start_check(struct rhashtable_iter *iter)
761 	__acquires_shared(RCU)
762 {
763 	struct rhashtable *ht = iter->ht;
764 	bool rhlist = ht->rhlist;
765 
766 	rcu_read_lock();
767 
768 	spin_lock(&ht->lock);
769 	if (iter->walker.tbl)
770 		list_del(&iter->walker.list);
771 	spin_unlock(&ht->lock);
772 
773 	if (iter->end_of_table)
774 		return 0;
775 	if (!iter->walker.tbl) {
776 		iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
777 		iter->slot = 0;
778 		iter->skip = 0;
779 		return -EAGAIN;
780 	}
781 
782 	if (iter->p && !rhlist) {
783 		/*
784 		 * We need to validate that 'p' is still in the table, and
785 		 * if so, update 'skip'
786 		 */
787 		struct rhash_head *p;
788 		int skip = 0;
789 		rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
790 			skip++;
791 			if (p == iter->p) {
792 				iter->skip = skip;
793 				goto found;
794 			}
795 		}
796 		iter->p = NULL;
797 	} else if (iter->p && rhlist) {
798 		/* Need to validate that 'list' is still in the table, and
799 		 * if so, update 'skip' and 'p'.
800 		 */
801 		struct rhash_head *p;
802 		struct rhlist_head *list;
803 		int skip = 0;
804 		rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
805 			for (list = container_of(p, struct rhlist_head, rhead);
806 			     list;
807 			     list = rcu_dereference(list->next)) {
808 				skip++;
809 				if (list == iter->list) {
810 					iter->p = p;
811 					iter->skip = skip;
812 					goto found;
813 				}
814 			}
815 		}
816 		iter->p = NULL;
817 	}
818 found:
819 	return 0;
820 }
821 EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
822 
823 /**
824  * __rhashtable_walk_find_next - Find the next element in a table (or the first
825  * one in case of a new walk).
826  *
827  * @iter:	Hash table iterator
828  *
829  * Returns the found object or NULL when the end of the table is reached.
830  *
831  * Returns -EAGAIN if resize event occurred.
832  */
__rhashtable_walk_find_next(struct rhashtable_iter * iter)833 static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
834 {
835 	struct bucket_table *tbl = iter->walker.tbl;
836 	struct rhlist_head *list = iter->list;
837 	struct rhashtable *ht = iter->ht;
838 	struct rhash_head *p = iter->p;
839 	bool rhlist = ht->rhlist;
840 
841 	if (!tbl)
842 		return NULL;
843 
844 	for (; iter->slot < tbl->size; iter->slot++) {
845 		int skip = iter->skip;
846 
847 		rht_for_each_rcu(p, tbl, iter->slot) {
848 			if (rhlist) {
849 				list = container_of(p, struct rhlist_head,
850 						    rhead);
851 				do {
852 					if (!skip)
853 						goto next;
854 					skip--;
855 					list = rcu_dereference(list->next);
856 				} while (list);
857 
858 				continue;
859 			}
860 			if (!skip)
861 				break;
862 			skip--;
863 		}
864 
865 next:
866 		if (!rht_is_a_nulls(p)) {
867 			iter->skip++;
868 			iter->p = p;
869 			iter->list = list;
870 			return rht_obj(ht, rhlist ? &list->rhead : p);
871 		}
872 
873 		iter->skip = 0;
874 	}
875 
876 	iter->p = NULL;
877 
878 	/* Ensure we see any new tables. */
879 	smp_rmb();
880 
881 	iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
882 	if (iter->walker.tbl) {
883 		iter->slot = 0;
884 		iter->skip = 0;
885 		return ERR_PTR(-EAGAIN);
886 	} else {
887 		iter->end_of_table = true;
888 	}
889 
890 	return NULL;
891 }
892 
893 /**
894  * rhashtable_walk_next - Return the next object and advance the iterator
895  * @iter:	Hash table iterator
896  *
897  * Note that you must call rhashtable_walk_stop when you are finished
898  * with the walk.
899  *
900  * Returns the next object or NULL when the end of the table is reached.
901  *
902  * Returns -EAGAIN if resize event occurred.  Note that the iterator
903  * will rewind back to the beginning and you may continue to use it.
904  */
rhashtable_walk_next(struct rhashtable_iter * iter)905 void *rhashtable_walk_next(struct rhashtable_iter *iter)
906 {
907 	struct rhlist_head *list = iter->list;
908 	struct rhashtable *ht = iter->ht;
909 	struct rhash_head *p = iter->p;
910 	bool rhlist = ht->rhlist;
911 
912 	if (p) {
913 		if (!rhlist || !(list = rcu_dereference(list->next))) {
914 			p = rcu_dereference(p->next);
915 			list = container_of(p, struct rhlist_head, rhead);
916 		}
917 		if (!rht_is_a_nulls(p)) {
918 			iter->skip++;
919 			iter->p = p;
920 			iter->list = list;
921 			return rht_obj(ht, rhlist ? &list->rhead : p);
922 		}
923 
924 		/* At the end of this slot, switch to next one and then find
925 		 * next entry from that point.
926 		 */
927 		iter->skip = 0;
928 		iter->slot++;
929 	}
930 
931 	return __rhashtable_walk_find_next(iter);
932 }
933 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
934 
935 /**
936  * rhashtable_walk_peek - Return the next object but don't advance the iterator
937  * @iter:	Hash table iterator
938  *
939  * Returns the next object or NULL when the end of the table is reached.
940  *
941  * Returns -EAGAIN if resize event occurred.  Note that the iterator
942  * will rewind back to the beginning and you may continue to use it.
943  */
rhashtable_walk_peek(struct rhashtable_iter * iter)944 void *rhashtable_walk_peek(struct rhashtable_iter *iter)
945 {
946 	struct rhlist_head *list = iter->list;
947 	struct rhashtable *ht = iter->ht;
948 	struct rhash_head *p = iter->p;
949 
950 	if (p)
951 		return rht_obj(ht, ht->rhlist ? &list->rhead : p);
952 
953 	/* No object found in current iter, find next one in the table. */
954 
955 	if (iter->skip) {
956 		/* A nonzero skip value points to the next entry in the table
957 		 * beyond that last one that was found. Decrement skip so
958 		 * we find the current value. __rhashtable_walk_find_next
959 		 * will restore the original value of skip assuming that
960 		 * the table hasn't changed.
961 		 */
962 		iter->skip--;
963 	}
964 
965 	return __rhashtable_walk_find_next(iter);
966 }
967 EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
968 
969 /**
970  * rhashtable_walk_stop - Finish a hash table walk
971  * @iter:	Hash table iterator
972  *
973  * Finish a hash table walk.  Does not reset the iterator to the start of the
974  * hash table.
975  */
rhashtable_walk_stop(struct rhashtable_iter * iter)976 void rhashtable_walk_stop(struct rhashtable_iter *iter)
977 {
978 	struct rhashtable *ht;
979 	struct bucket_table *tbl = iter->walker.tbl;
980 
981 	if (!tbl)
982 		goto out;
983 
984 	ht = iter->ht;
985 
986 	spin_lock(&ht->lock);
987 	if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu))
988 		/* This bucket table is being freed, don't re-link it. */
989 		iter->walker.tbl = NULL;
990 	else
991 		list_add(&iter->walker.list, &tbl->walkers);
992 	spin_unlock(&ht->lock);
993 
994 out:
995 	rcu_read_unlock();
996 }
997 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
998 
rounded_hashtable_size(const struct rhashtable_params * params)999 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
1000 {
1001 	size_t retsize;
1002 
1003 	if (params->nelem_hint)
1004 		retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
1005 			      (unsigned long)params->min_size);
1006 	else
1007 		retsize = max(HASH_DEFAULT_SIZE,
1008 			      (unsigned long)params->min_size);
1009 
1010 	return retsize;
1011 }
1012 
rhashtable_jhash2(const void * key,u32 length,u32 seed)1013 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
1014 {
1015 	return jhash2(key, length, seed);
1016 }
1017 
1018 /**
1019  * rhashtable_init - initialize a new hash table
1020  * @ht:		hash table to be initialized
1021  * @params:	configuration parameters
1022  *
1023  * Initializes a new hash table based on the provided configuration
1024  * parameters. A table can be configured either with a variable or
1025  * fixed length key:
1026  *
1027  * Configuration Example 1: Fixed length keys
1028  * struct test_obj {
1029  *	int			key;
1030  *	void *			my_member;
1031  *	struct rhash_head	node;
1032  * };
1033  *
1034  * struct rhashtable_params params = {
1035  *	.head_offset = offsetof(struct test_obj, node),
1036  *	.key_offset = offsetof(struct test_obj, key),
1037  *	.key_len = sizeof(int),
1038  *	.hashfn = jhash,
1039  * };
1040  *
1041  * Configuration Example 2: Variable length keys
1042  * struct test_obj {
1043  *	[...]
1044  *	struct rhash_head	node;
1045  * };
1046  *
1047  * u32 my_hash_fn(const void *data, u32 len, u32 seed)
1048  * {
1049  *	struct test_obj *obj = data;
1050  *
1051  *	return [... hash ...];
1052  * }
1053  *
1054  * struct rhashtable_params params = {
1055  *	.head_offset = offsetof(struct test_obj, node),
1056  *	.hashfn = jhash,
1057  *	.obj_hashfn = my_hash_fn,
1058  * };
1059  */
rhashtable_init_noprof(struct rhashtable * ht,const struct rhashtable_params * params)1060 int rhashtable_init_noprof(struct rhashtable *ht,
1061 		    const struct rhashtable_params *params)
1062 {
1063 	struct bucket_table *tbl;
1064 	size_t size;
1065 
1066 	if ((!params->key_len && !params->obj_hashfn) ||
1067 	    (params->obj_hashfn && !params->obj_cmpfn))
1068 		return -EINVAL;
1069 
1070 	memset(ht, 0, sizeof(*ht));
1071 	mutex_init(&ht->mutex);
1072 	spin_lock_init(&ht->lock);
1073 	memcpy(&ht->p, params, sizeof(*params));
1074 
1075 	alloc_tag_record(ht->alloc_tag);
1076 
1077 	if (params->min_size)
1078 		ht->p.min_size = roundup_pow_of_two(params->min_size);
1079 
1080 	/* Cap total entries at 2^31 to avoid nelems overflow. */
1081 	ht->max_elems = 1u << 31;
1082 
1083 	if (params->max_size) {
1084 		ht->p.max_size = rounddown_pow_of_two(params->max_size);
1085 		if (ht->p.max_size < ht->max_elems / 2)
1086 			ht->max_elems = ht->p.max_size * 2;
1087 	}
1088 
1089 	ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1090 
1091 	size = rounded_hashtable_size(&ht->p);
1092 
1093 	ht->key_len = ht->p.key_len;
1094 	if (!params->hashfn) {
1095 		ht->p.hashfn = jhash;
1096 
1097 		if (!(ht->key_len & (sizeof(u32) - 1))) {
1098 			ht->key_len /= sizeof(u32);
1099 			ht->p.hashfn = rhashtable_jhash2;
1100 		}
1101 	}
1102 
1103 	/*
1104 	 * This is api initialization and thus we need to guarantee the
1105 	 * initial rhashtable allocation. Upon failure, retry with the
1106 	 * smallest possible size with __GFP_NOFAIL semantics.
1107 	 */
1108 	tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
1109 	if (unlikely(tbl == NULL)) {
1110 		size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1111 		tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL);
1112 	}
1113 
1114 	atomic_set(&ht->nelems, 0);
1115 
1116 	RCU_INIT_POINTER(ht->tbl, tbl);
1117 
1118 	INIT_WORK(&ht->run_work, rht_deferred_worker);
1119 	init_irq_work(&ht->run_irq_work, rht_deferred_irq_work);
1120 
1121 	return 0;
1122 }
1123 EXPORT_SYMBOL_GPL(rhashtable_init_noprof);
1124 
1125 /**
1126  * rhltable_init - initialize a new hash list table
1127  * @hlt:	hash list table to be initialized
1128  * @params:	configuration parameters
1129  *
1130  * Initializes a new hash list table.
1131  *
1132  * See documentation for rhashtable_init.
1133  */
rhltable_init_noprof(struct rhltable * hlt,const struct rhashtable_params * params)1134 int rhltable_init_noprof(struct rhltable *hlt, const struct rhashtable_params *params)
1135 {
1136 	int err;
1137 
1138 	err = rhashtable_init_noprof(&hlt->ht, params);
1139 	hlt->ht.rhlist = true;
1140 	return err;
1141 }
1142 EXPORT_SYMBOL_GPL(rhltable_init_noprof);
1143 
rhashtable_free_one(struct rhashtable * ht,struct rhash_head * obj,void (* free_fn)(void * ptr,void * arg),void * arg)1144 static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
1145 				void (*free_fn)(void *ptr, void *arg),
1146 				void *arg)
1147 {
1148 	struct rhlist_head *list;
1149 
1150 	if (!ht->rhlist) {
1151 		free_fn(rht_obj(ht, obj), arg);
1152 		return;
1153 	}
1154 
1155 	list = container_of(obj, struct rhlist_head, rhead);
1156 	do {
1157 		obj = &list->rhead;
1158 		list = rht_dereference(list->next, ht);
1159 		free_fn(rht_obj(ht, obj), arg);
1160 	} while (list);
1161 }
1162 
1163 /**
1164  * rhashtable_free_and_destroy - free elements and destroy hash table
1165  * @ht:		the hash table to destroy
1166  * @free_fn:	callback to release resources of element
1167  * @arg:	pointer passed to free_fn
1168  *
1169  * Stops an eventual async resize. If defined, invokes free_fn for each
1170  * element to releasal resources. Please note that RCU protected
1171  * readers may still be accessing the elements. Releasing of resources
1172  * must occur in a compatible manner. Then frees the bucket array.
1173  *
1174  * This function will eventually sleep to wait for an async resize
1175  * to complete. The caller is responsible that no further write operations
1176  * occurs in parallel.
1177  *
1178  * After cancel_work_sync() has returned, the deferred rehash worker is
1179  * quiesced and, per the contract above, no other concurrent access to the
1180  * rhashtable is possible. The tables are therefore owned exclusively by
1181  * this function and can be walked without ht->mutex held.
1182  */
rhashtable_free_and_destroy(struct rhashtable * ht,void (* free_fn)(void * ptr,void * arg),void * arg)1183 void rhashtable_free_and_destroy(struct rhashtable *ht,
1184 				 void (*free_fn)(void *ptr, void *arg),
1185 				 void *arg)
1186 {
1187 	struct bucket_table *tbl, *next_tbl;
1188 	unsigned int i;
1189 
1190 	irq_work_sync(&ht->run_irq_work);
1191 	cancel_work_sync(&ht->run_work);
1192 
1193 	/*
1194 	 * Do NOT take ht->mutex here. The rehash worker establishes
1195 	 * ht->mutex -> fs_reclaim via GFP_KERNEL bucket allocation under
1196 	 * the mutex; callers on the reclaim path (e.g. simple_xattr_ht_free()
1197 	 * from evict() under the dcache shrinker for shmem/kernfs/pidfs
1198 	 * inodes) would otherwise close a circular dependency
1199 	 * fs_reclaim -> ht->mutex.
1200 	 */
1201 	tbl = rcu_dereference_raw(ht->tbl);
1202 restart:
1203 	if (free_fn) {
1204 		for (i = 0; i < tbl->size; i++) {
1205 			struct rhash_head *pos, *next;
1206 
1207 			cond_resched();
1208 			for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)),
1209 			     next = !rht_is_a_nulls(pos) ?
1210 					rcu_dereference_raw(pos->next) : NULL;
1211 			     !rht_is_a_nulls(pos);
1212 			     pos = next,
1213 			     next = !rht_is_a_nulls(pos) ?
1214 					rcu_dereference_raw(pos->next) : NULL)
1215 				rhashtable_free_one(ht, pos, free_fn, arg);
1216 		}
1217 	}
1218 
1219 	next_tbl = rcu_dereference_raw(tbl->future_tbl);
1220 	bucket_table_free(tbl);
1221 	if (next_tbl) {
1222 		tbl = next_tbl;
1223 		goto restart;
1224 	}
1225 }
1226 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
1227 
rhashtable_destroy(struct rhashtable * ht)1228 void rhashtable_destroy(struct rhashtable *ht)
1229 {
1230 	return rhashtable_free_and_destroy(ht, NULL, NULL);
1231 }
1232 EXPORT_SYMBOL_GPL(rhashtable_destroy);
1233 
__rht_bucket_nested(const struct bucket_table * tbl,unsigned int hash)1234 struct rhash_lock_head __rcu **__rht_bucket_nested(
1235 	const struct bucket_table *tbl, unsigned int hash)
1236 {
1237 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1238 	unsigned int index = hash & ((1 << tbl->nest) - 1);
1239 	unsigned int size = tbl->size >> tbl->nest;
1240 	unsigned int subhash = hash;
1241 	union nested_table *ntbl;
1242 
1243 	ntbl = nested_table_top(tbl);
1244 	ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
1245 	subhash >>= tbl->nest;
1246 
1247 	while (ntbl && size > (1 << shift)) {
1248 		index = subhash & ((1 << shift) - 1);
1249 		ntbl = rht_dereference_bucket_rcu(ntbl[index].table,
1250 						  tbl, hash);
1251 		size >>= shift;
1252 		subhash >>= shift;
1253 	}
1254 
1255 	if (!ntbl)
1256 		return NULL;
1257 
1258 	return &ntbl[subhash].bucket;
1259 
1260 }
1261 EXPORT_SYMBOL_GPL(__rht_bucket_nested);
1262 
rht_bucket_nested(const struct bucket_table * tbl,unsigned int hash)1263 struct rhash_lock_head __rcu **rht_bucket_nested(
1264 	const struct bucket_table *tbl, unsigned int hash)
1265 {
1266 	static struct rhash_lock_head __rcu *rhnull;
1267 
1268 	if (!rhnull)
1269 		INIT_RHT_NULLS_HEAD(rhnull);
1270 	return __rht_bucket_nested(tbl, hash) ?: &rhnull;
1271 }
1272 EXPORT_SYMBOL_GPL(rht_bucket_nested);
1273 
rht_bucket_nested_insert(struct rhashtable * ht,struct bucket_table * tbl,unsigned int hash)1274 struct rhash_lock_head __rcu **rht_bucket_nested_insert(
1275 	struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
1276 {
1277 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1278 	unsigned int index = hash & ((1 << tbl->nest) - 1);
1279 	unsigned int size = tbl->size >> tbl->nest;
1280 	union nested_table *ntbl;
1281 
1282 	ntbl = nested_table_top(tbl);
1283 	hash >>= tbl->nest;
1284 	ntbl = nested_table_alloc(ht, &ntbl[index].table,
1285 				  size <= (1 << shift));
1286 
1287 	while (ntbl && size > (1 << shift)) {
1288 		index = hash & ((1 << shift) - 1);
1289 		size >>= shift;
1290 		hash >>= shift;
1291 		ntbl = nested_table_alloc(ht, &ntbl[index].table,
1292 					  size <= (1 << shift));
1293 	}
1294 
1295 	if (!ntbl)
1296 		return NULL;
1297 
1298 	return &ntbl[hash].bucket;
1299 
1300 }
1301 EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);
1302