xref: /linux/lib/rhashtable.c (revision 06bc7ff0a1e0f2b0102e1314e3527a7ec0997851)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Resizable, Scalable, Concurrent Hash Table
4  *
5  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
6  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
7  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
8  *
9  * Code partially derived from nft_hash
10  * Rewritten with rehash code from br_multicast plus single list
11  * pointer as suggested by Josh Triplett
12  */
13 
14 #include <linux/atomic.h>
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/log2.h>
18 #include <linux/sched.h>
19 #include <linux/rculist.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/mm.h>
23 #include <linux/jhash.h>
24 #include <linux/random.h>
25 #include <linux/rhashtable.h>
26 #include <linux/err.h>
27 #include <linux/export.h>
28 
29 #define HASH_DEFAULT_SIZE	64UL
30 #define HASH_MIN_SIZE		4U
31 
32 union nested_table {
33 	union nested_table __rcu *table;
34 	struct rhash_lock_head __rcu *bucket;
35 };
36 
head_hashfn(struct rhashtable * ht,const struct bucket_table * tbl,const struct rhash_head * he)37 static u32 head_hashfn(struct rhashtable *ht,
38 		       const struct bucket_table *tbl,
39 		       const struct rhash_head *he)
40 {
41 	return rht_head_hashfn(ht, tbl, he, ht->p);
42 }
43 
44 #ifdef CONFIG_PROVE_LOCKING
45 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
46 
lockdep_rht_mutex_is_held(struct rhashtable * ht)47 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
48 {
49 	return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
50 }
51 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
52 
lockdep_rht_bucket_is_held(const struct bucket_table * tbl,u32 hash)53 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
54 {
55 	if (!debug_locks)
56 		return 1;
57 	if (unlikely(tbl->nest))
58 		return 1;
59 	return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]);
60 }
61 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
62 #else
63 #define ASSERT_RHT_MUTEX(HT)
64 #endif
65 
nested_table_top(const struct bucket_table * tbl)66 static inline union nested_table *nested_table_top(
67 	const struct bucket_table *tbl)
68 {
69 	/* The top-level bucket entry does not need RCU protection
70 	 * because it's set at the same time as tbl->nest.
71 	 */
72 	return (void *)rcu_dereference_protected(tbl->buckets[0], 1);
73 }
74 
nested_table_free(union nested_table * ntbl,unsigned int size)75 static void nested_table_free(union nested_table *ntbl, unsigned int size)
76 {
77 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
78 	const unsigned int len = 1 << shift;
79 	unsigned int i;
80 
81 	ntbl = rcu_dereference_protected(ntbl->table, 1);
82 	if (!ntbl)
83 		return;
84 
85 	if (size > len) {
86 		size >>= shift;
87 		for (i = 0; i < len; i++)
88 			nested_table_free(ntbl + i, size);
89 	}
90 
91 	kfree(ntbl);
92 }
93 
nested_bucket_table_free(const struct bucket_table * tbl)94 static void nested_bucket_table_free(const struct bucket_table *tbl)
95 {
96 	unsigned int size = tbl->size >> tbl->nest;
97 	unsigned int len = 1 << tbl->nest;
98 	union nested_table *ntbl;
99 	unsigned int i;
100 
101 	ntbl = nested_table_top(tbl);
102 
103 	for (i = 0; i < len; i++)
104 		nested_table_free(ntbl + i, size);
105 
106 	kfree(ntbl);
107 }
108 
bucket_table_free(const struct bucket_table * tbl)109 static void bucket_table_free(const struct bucket_table *tbl)
110 {
111 	if (tbl->nest)
112 		nested_bucket_table_free(tbl);
113 
114 	kvfree(tbl);
115 }
116 
bucket_table_free_rcu(struct rcu_head * head)117 static void bucket_table_free_rcu(struct rcu_head *head)
118 {
119 	bucket_table_free(container_of(head, struct bucket_table, rcu));
120 }
121 
nested_table_alloc(struct rhashtable * ht,union nested_table __rcu ** prev,bool leaf)122 static union nested_table *nested_table_alloc(struct rhashtable *ht,
123 					      union nested_table __rcu **prev,
124 					      bool leaf)
125 {
126 	union nested_table *ntbl;
127 	int i;
128 
129 	ntbl = rcu_dereference(*prev);
130 	if (ntbl)
131 		return ntbl;
132 
133 	ntbl = alloc_hooks_tag(ht->alloc_tag,
134 			kmalloc_noprof(PAGE_SIZE, GFP_ATOMIC|__GFP_ZERO));
135 
136 	if (ntbl && leaf) {
137 		for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
138 			INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
139 	}
140 
141 	if (cmpxchg((union nested_table **)prev, NULL, ntbl) == NULL)
142 		return ntbl;
143 	/* Raced with another thread. */
144 	kfree(ntbl);
145 	return rcu_dereference(*prev);
146 }
147 
nested_bucket_table_alloc(struct rhashtable * ht,size_t nbuckets,gfp_t gfp)148 static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
149 						      size_t nbuckets,
150 						      gfp_t gfp)
151 {
152 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
153 	struct bucket_table *tbl;
154 	size_t size;
155 
156 	if (nbuckets < (1 << (shift + 1)))
157 		return NULL;
158 
159 	size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
160 
161 	tbl = alloc_hooks_tag(ht->alloc_tag,
162 			kmalloc_noprof(size, gfp|__GFP_ZERO));
163 	if (!tbl)
164 		return NULL;
165 
166 	if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
167 				false)) {
168 		kfree(tbl);
169 		return NULL;
170 	}
171 
172 	tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
173 
174 	return tbl;
175 }
176 
bucket_table_alloc(struct rhashtable * ht,size_t nbuckets,gfp_t gfp)177 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
178 					       size_t nbuckets,
179 					       gfp_t gfp)
180 {
181 	struct bucket_table *tbl = NULL;
182 	size_t size;
183 	int i;
184 	static struct lock_class_key __key;
185 
186 	tbl = alloc_hooks_tag(ht->alloc_tag,
187 			kvmalloc_node_align_noprof(struct_size(tbl, buckets, nbuckets),
188 					     1, gfp|__GFP_ZERO, NUMA_NO_NODE));
189 
190 	size = nbuckets;
191 
192 	if (tbl == NULL && !gfpflags_allow_blocking(gfp)) {
193 		tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
194 		nbuckets = 0;
195 	}
196 
197 	if (tbl == NULL)
198 		return NULL;
199 
200 	lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0);
201 
202 	tbl->size = size;
203 
204 	rcu_head_init(&tbl->rcu);
205 	INIT_LIST_HEAD(&tbl->walkers);
206 
207 	tbl->hash_rnd = get_random_u32();
208 
209 	for (i = 0; i < nbuckets; i++)
210 		INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
211 
212 	return tbl;
213 }
214 
rhashtable_last_table(struct rhashtable * ht,struct bucket_table * tbl)215 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
216 						  struct bucket_table *tbl)
217 {
218 	struct bucket_table *new_tbl;
219 
220 	do {
221 		new_tbl = tbl;
222 		tbl = rht_dereference_rcu(tbl->future_tbl, ht);
223 	} while (tbl);
224 
225 	return new_tbl;
226 }
227 
rhashtable_rehash_one(struct rhashtable * ht,struct rhash_lock_head __rcu ** bkt,unsigned int old_hash)228 static int rhashtable_rehash_one(struct rhashtable *ht,
229 				 struct rhash_lock_head __rcu **bkt,
230 				 unsigned int old_hash)
231 {
232 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
233 	struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
234 	int err = -EAGAIN;
235 	struct rhash_head *head, *next, *entry;
236 	struct rhash_head __rcu **pprev = NULL;
237 	unsigned int new_hash;
238 	unsigned long flags;
239 
240 	if (new_tbl->nest)
241 		goto out;
242 
243 	err = -ENOENT;
244 
245 	rht_for_each_from(entry, rht_ptr(bkt, old_tbl, old_hash),
246 			  old_tbl, old_hash) {
247 		err = 0;
248 		next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
249 
250 		if (rht_is_a_nulls(next))
251 			break;
252 
253 		pprev = &entry->next;
254 	}
255 
256 	if (err)
257 		goto out;
258 
259 	new_hash = head_hashfn(ht, new_tbl, entry);
260 
261 	flags = rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash],
262 				SINGLE_DEPTH_NESTING);
263 
264 	head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash);
265 
266 	RCU_INIT_POINTER(entry->next, head);
267 
268 	rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry, flags);
269 
270 	if (pprev)
271 		rcu_assign_pointer(*pprev, next);
272 	else
273 		/* Need to preserved the bit lock. */
274 		rht_assign_locked(bkt, next);
275 
276 out:
277 	return err;
278 }
279 
rhashtable_rehash_chain(struct rhashtable * ht,unsigned int old_hash)280 static int rhashtable_rehash_chain(struct rhashtable *ht,
281 				    unsigned int old_hash)
282 {
283 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
284 	struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
285 	unsigned long flags;
286 	int err;
287 
288 	if (!bkt)
289 		return 0;
290 	flags = rht_lock(old_tbl, bkt);
291 
292 	while (!(err = rhashtable_rehash_one(ht, bkt, old_hash)))
293 		;
294 
295 	if (err == -ENOENT)
296 		err = 0;
297 	rht_unlock(old_tbl, bkt, flags);
298 
299 	return err;
300 }
301 
rhashtable_rehash_attach(struct rhashtable * ht,struct bucket_table * old_tbl,struct bucket_table * new_tbl)302 static int rhashtable_rehash_attach(struct rhashtable *ht,
303 				    struct bucket_table *old_tbl,
304 				    struct bucket_table *new_tbl)
305 {
306 	/* Make insertions go into the new, empty table right away. Deletions
307 	 * and lookups will be attempted in both tables until we synchronize.
308 	 * As cmpxchg() provides strong barriers, we do not need
309 	 * rcu_assign_pointer().
310 	 */
311 
312 	if (cmpxchg((struct bucket_table **)&old_tbl->future_tbl, NULL,
313 		    new_tbl) != NULL)
314 		return -EEXIST;
315 
316 	return 0;
317 }
318 
rhashtable_rehash_table(struct rhashtable * ht)319 static int rhashtable_rehash_table(struct rhashtable *ht)
320 {
321 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
322 	struct bucket_table *new_tbl;
323 	struct rhashtable_walker *walker;
324 	unsigned int old_hash;
325 	int err;
326 
327 	new_tbl = rht_dereference(old_tbl->future_tbl, ht);
328 	if (!new_tbl)
329 		return 0;
330 
331 	for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
332 		err = rhashtable_rehash_chain(ht, old_hash);
333 		if (err)
334 			return err;
335 		cond_resched();
336 	}
337 
338 	/* Publish the new table pointer. */
339 	rcu_assign_pointer(ht->tbl, new_tbl);
340 
341 	spin_lock(&ht->lock);
342 	list_for_each_entry(walker, &old_tbl->walkers, list)
343 		walker->tbl = NULL;
344 
345 	/* Wait for readers. All new readers will see the new
346 	 * table, and thus no references to the old table will
347 	 * remain.
348 	 * We do this inside the locked region so that
349 	 * rhashtable_walk_stop() can use rcu_head_after_call_rcu()
350 	 * to check if it should not re-link the table.
351 	 */
352 	call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
353 	spin_unlock(&ht->lock);
354 
355 	return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
356 }
357 
rhashtable_rehash_alloc(struct rhashtable * ht,struct bucket_table * old_tbl,unsigned int size)358 static int rhashtable_rehash_alloc(struct rhashtable *ht,
359 				   struct bucket_table *old_tbl,
360 				   unsigned int size)
361 	__must_hold(&ht->mutex)
362 {
363 	struct bucket_table *new_tbl;
364 	int err;
365 
366 	ASSERT_RHT_MUTEX(ht);
367 
368 	new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
369 	if (new_tbl == NULL)
370 		return -ENOMEM;
371 
372 	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
373 	if (err)
374 		bucket_table_free(new_tbl);
375 
376 	return err;
377 }
378 
379 /**
380  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
381  * @ht:		the hash table to shrink
382  *
383  * This function shrinks the hash table to fit, i.e., the smallest
384  * size would not cause it to expand right away automatically.
385  *
386  * The caller must ensure that no concurrent resizing occurs by holding
387  * ht->mutex.
388  *
389  * The caller must ensure that no concurrent table mutations take place.
390  * It is however valid to have concurrent lookups if they are RCU protected.
391  *
392  * It is valid to have concurrent insertions and deletions protected by per
393  * bucket locks or concurrent RCU protected lookups and traversals.
394  */
rhashtable_shrink(struct rhashtable * ht)395 static int rhashtable_shrink(struct rhashtable *ht)
396 	__must_hold(&ht->mutex)
397 {
398 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
399 	unsigned int nelems = atomic_read(&ht->nelems);
400 	unsigned int size = 0;
401 
402 	if (nelems)
403 		size = roundup_pow_of_two(nelems * 3 / 2);
404 	if (size < ht->p.min_size)
405 		size = ht->p.min_size;
406 
407 	if (old_tbl->size <= size)
408 		return 0;
409 
410 	if (rht_dereference(old_tbl->future_tbl, ht))
411 		return -EEXIST;
412 
413 	return rhashtable_rehash_alloc(ht, old_tbl, size);
414 }
415 
rht_deferred_worker(struct work_struct * work)416 static void rht_deferred_worker(struct work_struct *work)
417 {
418 	struct rhashtable *ht;
419 	struct bucket_table *tbl;
420 	int err = 0;
421 
422 	ht = container_of(work, struct rhashtable, run_work);
423 	mutex_lock(&ht->mutex);
424 
425 	tbl = rht_dereference(ht->tbl, ht);
426 	tbl = rhashtable_last_table(ht, tbl);
427 
428 	if (rht_grow_above_75(ht, tbl))
429 		err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
430 	else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
431 		err = rhashtable_shrink(ht);
432 	else if (tbl->nest)
433 		err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
434 
435 	if (!err || err == -EEXIST) {
436 		int nerr;
437 
438 		nerr = rhashtable_rehash_table(ht);
439 		err = err ?: nerr;
440 	}
441 
442 	mutex_unlock(&ht->mutex);
443 
444 	/*
445 	 * Re-arm via @run_work, not @run_irq_work.
446 	 * rhashtable_free_and_destroy() drains async work as irq_work_sync()
447 	 * followed by cancel_work_sync(). If this site queued irq_work while
448 	 * cancel_work_sync() was waiting for us, irq_work_sync() would already
449 	 * have returned and the stale irq_work could fire post-teardown.
450 	 * cancel_work_sync() natively handles self-requeue on @run_work.
451 	 */
452 	if (err)
453 		schedule_work(&ht->run_work);
454 }
455 
456 /*
457  * Insert-path callers can run under a raw spinlock (e.g. an insecure_elasticity
458  * user). Calling schedule_work() under that lock records caller_lock ->
459  * pool->lock -> pi_lock -> rq->__lock, closing a locking cycle if any of
460  * these is acquired in the reverse direction elsewhere. Bounce through
461  * irq_work so the schedule_work() runs with the caller's lock no longer held.
462  */
rht_deferred_irq_work(struct irq_work * irq_work)463 static void rht_deferred_irq_work(struct irq_work *irq_work)
464 {
465 	struct rhashtable *ht = container_of(irq_work, struct rhashtable,
466 					     run_irq_work);
467 
468 	schedule_work(&ht->run_work);
469 }
470 
rhashtable_insert_rehash(struct rhashtable * ht,struct bucket_table * tbl)471 static int rhashtable_insert_rehash(struct rhashtable *ht,
472 				    struct bucket_table *tbl)
473 {
474 	struct bucket_table *old_tbl;
475 	struct bucket_table *new_tbl;
476 	unsigned int size;
477 	int err;
478 
479 	old_tbl = rht_dereference_rcu(ht->tbl, ht);
480 
481 	size = tbl->size;
482 
483 	err = -EBUSY;
484 
485 	if (rht_grow_above_75(ht, tbl))
486 		size *= 2;
487 	/* Do not schedule more than one rehash */
488 	else if (old_tbl != tbl)
489 		goto fail;
490 
491 	err = -ENOMEM;
492 
493 	new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN);
494 	if (new_tbl == NULL)
495 		goto fail;
496 
497 	err = rhashtable_rehash_attach(ht, tbl, new_tbl);
498 	if (err) {
499 		bucket_table_free(new_tbl);
500 		if (err == -EEXIST)
501 			err = 0;
502 	} else
503 		irq_work_queue(&ht->run_irq_work);
504 
505 	return err;
506 
507 fail:
508 	/* Do not fail the insert if someone else did a rehash. */
509 	if (likely(rcu_access_pointer(tbl->future_tbl)))
510 		return 0;
511 
512 	/* Schedule async rehash to retry allocation in process context. */
513 	if (err == -ENOMEM)
514 		irq_work_queue(&ht->run_irq_work);
515 
516 	return err;
517 }
518 
rhashtable_lookup_one(struct rhashtable * ht,struct rhash_lock_head __rcu ** bkt,struct bucket_table * tbl,unsigned int hash,const void * key,struct rhash_head * obj)519 static void *rhashtable_lookup_one(struct rhashtable *ht,
520 				   struct rhash_lock_head __rcu **bkt,
521 				   struct bucket_table *tbl, unsigned int hash,
522 				   const void *key, struct rhash_head *obj)
523 {
524 	struct rhashtable_compare_arg arg = {
525 		.ht = ht,
526 		.key = key,
527 	};
528 	struct rhash_head __rcu **pprev = NULL;
529 	struct rhash_head *head;
530 	int elasticity;
531 
532 	elasticity = RHT_ELASTICITY;
533 	rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
534 		struct rhlist_head *list;
535 		struct rhlist_head *plist;
536 
537 		elasticity--;
538 		if (!key ||
539 		    (ht->p.obj_cmpfn ?
540 		     ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
541 		     rhashtable_compare(&arg, rht_obj(ht, head)))) {
542 			pprev = &head->next;
543 			continue;
544 		}
545 
546 		if (!ht->rhlist)
547 			return rht_obj(ht, head);
548 
549 		list = container_of(obj, struct rhlist_head, rhead);
550 		plist = container_of(head, struct rhlist_head, rhead);
551 
552 		RCU_INIT_POINTER(list->next, plist);
553 		head = rht_dereference_bucket(head->next, tbl, hash);
554 		RCU_INIT_POINTER(list->rhead.next, head);
555 		if (pprev)
556 			rcu_assign_pointer(*pprev, obj);
557 		else
558 			/* Need to preserve the bit lock */
559 			rht_assign_locked(bkt, obj);
560 
561 		return NULL;
562 	}
563 
564 	if (elasticity <= 0 && !ht->p.insecure_elasticity)
565 		return ERR_PTR(-EAGAIN);
566 
567 	return ERR_PTR(-ENOENT);
568 }
569 
rhashtable_insert_one(struct rhashtable * ht,struct rhash_lock_head __rcu ** bkt,struct bucket_table * tbl,unsigned int hash,struct rhash_head * obj,void * data)570 static struct bucket_table *rhashtable_insert_one(
571 	struct rhashtable *ht, struct rhash_lock_head __rcu **bkt,
572 	struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj,
573 	void *data)
574 {
575 	struct bucket_table *new_tbl;
576 	struct rhash_head *head;
577 
578 	if (!IS_ERR_OR_NULL(data))
579 		return ERR_PTR(-EEXIST);
580 
581 	if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
582 		return ERR_CAST(data);
583 
584 	new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
585 	if (new_tbl)
586 		return new_tbl;
587 
588 	if (PTR_ERR(data) != -ENOENT)
589 		return ERR_CAST(data);
590 
591 	if (unlikely(rht_grow_above_max(ht, tbl)))
592 		return ERR_PTR(-E2BIG);
593 
594 	if (unlikely(rht_grow_above_100(ht, tbl)) &&
595 	    !ht->p.insecure_elasticity)
596 		return ERR_PTR(-EAGAIN);
597 
598 	head = rht_ptr(bkt, tbl, hash);
599 
600 	RCU_INIT_POINTER(obj->next, head);
601 	if (ht->rhlist) {
602 		struct rhlist_head *list;
603 
604 		list = container_of(obj, struct rhlist_head, rhead);
605 		RCU_INIT_POINTER(list->next, NULL);
606 	}
607 
608 	/* bkt is always the head of the list, so it holds
609 	 * the lock, which we need to preserve
610 	 */
611 	rht_assign_locked(bkt, obj);
612 
613 	return NULL;
614 }
615 
rhashtable_try_insert(struct rhashtable * ht,const void * key,struct rhash_head * obj)616 static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
617 				   struct rhash_head *obj)
618 {
619 	struct bucket_table *new_tbl;
620 	struct bucket_table *tbl;
621 	struct rhash_lock_head __rcu **bkt;
622 	unsigned long flags;
623 	unsigned int hash;
624 	void *data;
625 
626 	new_tbl = rcu_dereference(ht->tbl);
627 
628 	do {
629 		tbl = new_tbl;
630 		hash = rht_head_hashfn(ht, tbl, obj, ht->p);
631 		if (rcu_access_pointer(tbl->future_tbl))
632 			/* Failure is OK */
633 			bkt = rht_bucket_var(tbl, hash);
634 		else
635 			bkt = rht_bucket_insert(ht, tbl, hash);
636 		if (bkt == NULL) {
637 			new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
638 			data = ERR_PTR(-EAGAIN);
639 		} else {
640 			bool inserted;
641 
642 			flags = rht_lock(tbl, bkt);
643 			data = rhashtable_lookup_one(ht, bkt, tbl,
644 						     hash, key, obj);
645 			new_tbl = rhashtable_insert_one(ht, bkt, tbl,
646 							hash, obj, data);
647 			inserted = data && !new_tbl;
648 			if (inserted)
649 				atomic_inc(&ht->nelems);
650 			if (PTR_ERR(new_tbl) != -EEXIST)
651 				data = ERR_CAST(new_tbl);
652 
653 			rht_unlock(tbl, bkt, flags);
654 
655 			if (inserted && rht_grow_above_75(ht, tbl))
656 				irq_work_queue(&ht->run_irq_work);
657 		}
658 	} while (!IS_ERR_OR_NULL(new_tbl));
659 
660 	if (PTR_ERR(data) == -EAGAIN)
661 		data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
662 			       -EAGAIN);
663 
664 	return data;
665 }
666 
rhashtable_insert_slow(struct rhashtable * ht,const void * key,struct rhash_head * obj)667 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
668 			     struct rhash_head *obj)
669 {
670 	void *data;
671 
672 	do {
673 		rcu_read_lock();
674 		data = rhashtable_try_insert(ht, key, obj);
675 		rcu_read_unlock();
676 	} while (PTR_ERR(data) == -EAGAIN);
677 
678 	return data;
679 }
680 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
681 
682 /**
683  * rhashtable_walk_enter - Initialise an iterator
684  * @ht:		Table to walk over
685  * @iter:	Hash table Iterator
686  *
687  * This function prepares a hash table walk.
688  *
689  * Note that if you restart a walk after rhashtable_walk_stop you
690  * may see the same object twice.  Also, you may miss objects if
691  * there are removals in between rhashtable_walk_stop and the next
692  * call to rhashtable_walk_start.
693  *
694  * For a completely stable walk you should construct your own data
695  * structure outside the hash table.
696  *
697  * This function may be called from any process context, including
698  * non-preemptible context, but cannot be called from softirq or
699  * hardirq context.
700  *
701  * You must call rhashtable_walk_exit after this function returns.
702  */
rhashtable_walk_enter(struct rhashtable * ht,struct rhashtable_iter * iter)703 void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
704 {
705 	iter->ht = ht;
706 	iter->p = NULL;
707 	iter->slot = 0;
708 	iter->skip = 0;
709 	iter->end_of_table = 0;
710 
711 	spin_lock(&ht->lock);
712 	iter->walker.tbl =
713 		rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
714 	list_add(&iter->walker.list, &iter->walker.tbl->walkers);
715 	spin_unlock(&ht->lock);
716 }
717 EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
718 
719 /**
720  * rhashtable_walk_exit - Free an iterator
721  * @iter:	Hash table Iterator
722  *
723  * This function frees resources allocated by rhashtable_walk_enter.
724  */
rhashtable_walk_exit(struct rhashtable_iter * iter)725 void rhashtable_walk_exit(struct rhashtable_iter *iter)
726 {
727 	spin_lock(&iter->ht->lock);
728 	if (iter->walker.tbl)
729 		list_del(&iter->walker.list);
730 	spin_unlock(&iter->ht->lock);
731 }
732 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
733 
734 /**
735  * rhashtable_walk_start_check - Start a hash table walk
736  * @iter:	Hash table iterator
737  *
738  * Start a hash table walk at the current iterator position.  Note that we take
739  * the RCU lock in all cases including when we return an error.  So you must
740  * always call rhashtable_walk_stop to clean up.
741  *
742  * Returns zero if successful.
743  *
744  * Returns -EAGAIN if resize event occurred.  Note that the iterator
745  * will rewind back to the beginning and you may use it immediately
746  * by calling rhashtable_walk_next.
747  *
748  * rhashtable_walk_start is defined as an inline variant that returns
749  * void. This is preferred in cases where the caller would ignore
750  * resize events and always continue.
751  */
rhashtable_walk_start_check(struct rhashtable_iter * iter)752 int rhashtable_walk_start_check(struct rhashtable_iter *iter)
753 	__acquires_shared(RCU)
754 {
755 	struct rhashtable *ht = iter->ht;
756 	bool rhlist = ht->rhlist;
757 
758 	rcu_read_lock();
759 
760 	spin_lock(&ht->lock);
761 	if (iter->walker.tbl)
762 		list_del(&iter->walker.list);
763 	spin_unlock(&ht->lock);
764 
765 	if (iter->end_of_table)
766 		return 0;
767 	if (!iter->walker.tbl) {
768 		iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
769 		iter->slot = 0;
770 		iter->skip = 0;
771 		return -EAGAIN;
772 	}
773 
774 	if (iter->p && !rhlist) {
775 		/*
776 		 * We need to validate that 'p' is still in the table, and
777 		 * if so, update 'skip'
778 		 */
779 		struct rhash_head *p;
780 		int skip = 0;
781 		rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
782 			skip++;
783 			if (p == iter->p) {
784 				iter->skip = skip;
785 				goto found;
786 			}
787 		}
788 		iter->p = NULL;
789 	} else if (iter->p && rhlist) {
790 		/* Need to validate that 'list' is still in the table, and
791 		 * if so, update 'skip' and 'p'.
792 		 */
793 		struct rhash_head *p;
794 		struct rhlist_head *list;
795 		int skip = 0;
796 		rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
797 			for (list = container_of(p, struct rhlist_head, rhead);
798 			     list;
799 			     list = rcu_dereference(list->next)) {
800 				skip++;
801 				if (list == iter->list) {
802 					iter->p = p;
803 					iter->skip = skip;
804 					goto found;
805 				}
806 			}
807 		}
808 		iter->p = NULL;
809 	}
810 found:
811 	return 0;
812 }
813 EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
814 
815 /**
816  * __rhashtable_walk_find_next - Find the next element in a table (or the first
817  * one in case of a new walk).
818  *
819  * @iter:	Hash table iterator
820  *
821  * Returns the found object or NULL when the end of the table is reached.
822  *
823  * Returns -EAGAIN if resize event occurred.
824  */
__rhashtable_walk_find_next(struct rhashtable_iter * iter)825 static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
826 {
827 	struct bucket_table *tbl = iter->walker.tbl;
828 	struct rhlist_head *list = iter->list;
829 	struct rhashtable *ht = iter->ht;
830 	struct rhash_head *p = iter->p;
831 	bool rhlist = ht->rhlist;
832 
833 	if (!tbl)
834 		return NULL;
835 
836 	for (; iter->slot < tbl->size; iter->slot++) {
837 		int skip = iter->skip;
838 
839 		rht_for_each_rcu(p, tbl, iter->slot) {
840 			if (rhlist) {
841 				list = container_of(p, struct rhlist_head,
842 						    rhead);
843 				do {
844 					if (!skip)
845 						goto next;
846 					skip--;
847 					list = rcu_dereference(list->next);
848 				} while (list);
849 
850 				continue;
851 			}
852 			if (!skip)
853 				break;
854 			skip--;
855 		}
856 
857 next:
858 		if (!rht_is_a_nulls(p)) {
859 			iter->skip++;
860 			iter->p = p;
861 			iter->list = list;
862 			return rht_obj(ht, rhlist ? &list->rhead : p);
863 		}
864 
865 		iter->skip = 0;
866 	}
867 
868 	iter->p = NULL;
869 
870 	/* Ensure we see any new tables. */
871 	smp_rmb();
872 
873 	iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
874 	if (iter->walker.tbl) {
875 		iter->slot = 0;
876 		iter->skip = 0;
877 		return ERR_PTR(-EAGAIN);
878 	} else {
879 		iter->end_of_table = true;
880 	}
881 
882 	return NULL;
883 }
884 
885 /**
886  * rhashtable_walk_next - Return the next object and advance the iterator
887  * @iter:	Hash table iterator
888  *
889  * Note that you must call rhashtable_walk_stop when you are finished
890  * with the walk.
891  *
892  * Returns the next object or NULL when the end of the table is reached.
893  *
894  * Returns -EAGAIN if resize event occurred.  Note that the iterator
895  * will rewind back to the beginning and you may continue to use it.
896  */
rhashtable_walk_next(struct rhashtable_iter * iter)897 void *rhashtable_walk_next(struct rhashtable_iter *iter)
898 {
899 	struct rhlist_head *list = iter->list;
900 	struct rhashtable *ht = iter->ht;
901 	struct rhash_head *p = iter->p;
902 	bool rhlist = ht->rhlist;
903 
904 	if (p) {
905 		if (!rhlist || !(list = rcu_dereference(list->next))) {
906 			p = rcu_dereference(p->next);
907 			list = container_of(p, struct rhlist_head, rhead);
908 		}
909 		if (!rht_is_a_nulls(p)) {
910 			iter->skip++;
911 			iter->p = p;
912 			iter->list = list;
913 			return rht_obj(ht, rhlist ? &list->rhead : p);
914 		}
915 
916 		/* At the end of this slot, switch to next one and then find
917 		 * next entry from that point.
918 		 */
919 		iter->skip = 0;
920 		iter->slot++;
921 	}
922 
923 	return __rhashtable_walk_find_next(iter);
924 }
925 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
926 
927 /**
928  * rhashtable_walk_peek - Return the next object but don't advance the iterator
929  * @iter:	Hash table iterator
930  *
931  * Returns the next object or NULL when the end of the table is reached.
932  *
933  * Returns -EAGAIN if resize event occurred.  Note that the iterator
934  * will rewind back to the beginning and you may continue to use it.
935  */
rhashtable_walk_peek(struct rhashtable_iter * iter)936 void *rhashtable_walk_peek(struct rhashtable_iter *iter)
937 {
938 	struct rhlist_head *list = iter->list;
939 	struct rhashtable *ht = iter->ht;
940 	struct rhash_head *p = iter->p;
941 
942 	if (p)
943 		return rht_obj(ht, ht->rhlist ? &list->rhead : p);
944 
945 	/* No object found in current iter, find next one in the table. */
946 
947 	if (iter->skip) {
948 		/* A nonzero skip value points to the next entry in the table
949 		 * beyond that last one that was found. Decrement skip so
950 		 * we find the current value. __rhashtable_walk_find_next
951 		 * will restore the original value of skip assuming that
952 		 * the table hasn't changed.
953 		 */
954 		iter->skip--;
955 	}
956 
957 	return __rhashtable_walk_find_next(iter);
958 }
959 EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
960 
961 /**
962  * rhashtable_walk_stop - Finish a hash table walk
963  * @iter:	Hash table iterator
964  *
965  * Finish a hash table walk.  Does not reset the iterator to the start of the
966  * hash table.
967  */
rhashtable_walk_stop(struct rhashtable_iter * iter)968 void rhashtable_walk_stop(struct rhashtable_iter *iter)
969 {
970 	struct rhashtable *ht;
971 	struct bucket_table *tbl = iter->walker.tbl;
972 
973 	if (!tbl)
974 		goto out;
975 
976 	ht = iter->ht;
977 
978 	spin_lock(&ht->lock);
979 	if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu))
980 		/* This bucket table is being freed, don't re-link it. */
981 		iter->walker.tbl = NULL;
982 	else
983 		list_add(&iter->walker.list, &tbl->walkers);
984 	spin_unlock(&ht->lock);
985 
986 out:
987 	rcu_read_unlock();
988 }
989 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
990 
rounded_hashtable_size(const struct rhashtable_params * params)991 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
992 {
993 	size_t retsize;
994 
995 	if (params->nelem_hint)
996 		retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
997 			      (unsigned long)params->min_size);
998 	else
999 		retsize = max(HASH_DEFAULT_SIZE,
1000 			      (unsigned long)params->min_size);
1001 
1002 	return retsize;
1003 }
1004 
rhashtable_jhash2(const void * key,u32 length,u32 seed)1005 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
1006 {
1007 	return jhash2(key, length, seed);
1008 }
1009 
1010 /**
1011  * rhashtable_init - initialize a new hash table
1012  * @ht:		hash table to be initialized
1013  * @params:	configuration parameters
1014  *
1015  * Initializes a new hash table based on the provided configuration
1016  * parameters. A table can be configured either with a variable or
1017  * fixed length key:
1018  *
1019  * Configuration Example 1: Fixed length keys
1020  * struct test_obj {
1021  *	int			key;
1022  *	void *			my_member;
1023  *	struct rhash_head	node;
1024  * };
1025  *
1026  * struct rhashtable_params params = {
1027  *	.head_offset = offsetof(struct test_obj, node),
1028  *	.key_offset = offsetof(struct test_obj, key),
1029  *	.key_len = sizeof(int),
1030  *	.hashfn = jhash,
1031  * };
1032  *
1033  * Configuration Example 2: Variable length keys
1034  * struct test_obj {
1035  *	[...]
1036  *	struct rhash_head	node;
1037  * };
1038  *
1039  * u32 my_hash_fn(const void *data, u32 len, u32 seed)
1040  * {
1041  *	struct test_obj *obj = data;
1042  *
1043  *	return [... hash ...];
1044  * }
1045  *
1046  * struct rhashtable_params params = {
1047  *	.head_offset = offsetof(struct test_obj, node),
1048  *	.hashfn = jhash,
1049  *	.obj_hashfn = my_hash_fn,
1050  * };
1051  */
rhashtable_init_noprof(struct rhashtable * ht,const struct rhashtable_params * params)1052 int rhashtable_init_noprof(struct rhashtable *ht,
1053 		    const struct rhashtable_params *params)
1054 {
1055 	struct bucket_table *tbl;
1056 	size_t size;
1057 
1058 	if ((!params->key_len && !params->obj_hashfn) ||
1059 	    (params->obj_hashfn && !params->obj_cmpfn))
1060 		return -EINVAL;
1061 
1062 	memset(ht, 0, sizeof(*ht));
1063 	mutex_init(&ht->mutex);
1064 	spin_lock_init(&ht->lock);
1065 	memcpy(&ht->p, params, sizeof(*params));
1066 
1067 	alloc_tag_record(ht->alloc_tag);
1068 
1069 	if (params->min_size)
1070 		ht->p.min_size = roundup_pow_of_two(params->min_size);
1071 
1072 	/* Cap total entries at 2^31 to avoid nelems overflow. */
1073 	ht->max_elems = 1u << 31;
1074 
1075 	if (params->max_size) {
1076 		ht->p.max_size = rounddown_pow_of_two(params->max_size);
1077 		if (ht->p.max_size < ht->max_elems / 2)
1078 			ht->max_elems = ht->p.max_size * 2;
1079 	}
1080 
1081 	ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1082 
1083 	size = rounded_hashtable_size(&ht->p);
1084 
1085 	ht->key_len = ht->p.key_len;
1086 	if (!params->hashfn) {
1087 		ht->p.hashfn = jhash;
1088 
1089 		if (!(ht->key_len & (sizeof(u32) - 1))) {
1090 			ht->key_len /= sizeof(u32);
1091 			ht->p.hashfn = rhashtable_jhash2;
1092 		}
1093 	}
1094 
1095 	/*
1096 	 * This is api initialization and thus we need to guarantee the
1097 	 * initial rhashtable allocation. Upon failure, retry with the
1098 	 * smallest possible size with __GFP_NOFAIL semantics.
1099 	 */
1100 	tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
1101 	if (unlikely(tbl == NULL)) {
1102 		size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1103 		tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL);
1104 	}
1105 
1106 	atomic_set(&ht->nelems, 0);
1107 
1108 	RCU_INIT_POINTER(ht->tbl, tbl);
1109 
1110 	INIT_WORK(&ht->run_work, rht_deferred_worker);
1111 	init_irq_work(&ht->run_irq_work, rht_deferred_irq_work);
1112 
1113 	return 0;
1114 }
1115 EXPORT_SYMBOL_GPL(rhashtable_init_noprof);
1116 
1117 /**
1118  * rhltable_init - initialize a new hash list table
1119  * @hlt:	hash list table to be initialized
1120  * @params:	configuration parameters
1121  *
1122  * Initializes a new hash list table.
1123  *
1124  * See documentation for rhashtable_init.
1125  */
rhltable_init_noprof(struct rhltable * hlt,const struct rhashtable_params * params)1126 int rhltable_init_noprof(struct rhltable *hlt, const struct rhashtable_params *params)
1127 {
1128 	int err;
1129 
1130 	err = rhashtable_init_noprof(&hlt->ht, params);
1131 	hlt->ht.rhlist = true;
1132 	return err;
1133 }
1134 EXPORT_SYMBOL_GPL(rhltable_init_noprof);
1135 
rhashtable_free_one(struct rhashtable * ht,struct rhash_head * obj,void (* free_fn)(void * ptr,void * arg),void * arg)1136 static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
1137 				void (*free_fn)(void *ptr, void *arg),
1138 				void *arg)
1139 {
1140 	struct rhlist_head *list;
1141 
1142 	if (!ht->rhlist) {
1143 		free_fn(rht_obj(ht, obj), arg);
1144 		return;
1145 	}
1146 
1147 	list = container_of(obj, struct rhlist_head, rhead);
1148 	do {
1149 		obj = &list->rhead;
1150 		list = rht_dereference(list->next, ht);
1151 		free_fn(rht_obj(ht, obj), arg);
1152 	} while (list);
1153 }
1154 
1155 /**
1156  * rhashtable_free_and_destroy - free elements and destroy hash table
1157  * @ht:		the hash table to destroy
1158  * @free_fn:	callback to release resources of element
1159  * @arg:	pointer passed to free_fn
1160  *
1161  * Stops an eventual async resize. If defined, invokes free_fn for each
1162  * element to releasal resources. Please note that RCU protected
1163  * readers may still be accessing the elements. Releasing of resources
1164  * must occur in a compatible manner. Then frees the bucket array.
1165  *
1166  * This function will eventually sleep to wait for an async resize
1167  * to complete. The caller is responsible that no further write operations
1168  * occurs in parallel.
1169  */
rhashtable_free_and_destroy(struct rhashtable * ht,void (* free_fn)(void * ptr,void * arg),void * arg)1170 void rhashtable_free_and_destroy(struct rhashtable *ht,
1171 				 void (*free_fn)(void *ptr, void *arg),
1172 				 void *arg)
1173 {
1174 	struct bucket_table *tbl, *next_tbl;
1175 	unsigned int i;
1176 
1177 	irq_work_sync(&ht->run_irq_work);
1178 	cancel_work_sync(&ht->run_work);
1179 
1180 	mutex_lock(&ht->mutex);
1181 	tbl = rht_dereference(ht->tbl, ht);
1182 restart:
1183 	if (free_fn) {
1184 		for (i = 0; i < tbl->size; i++) {
1185 			struct rhash_head *pos, *next;
1186 
1187 			cond_resched();
1188 			for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)),
1189 			     next = !rht_is_a_nulls(pos) ?
1190 					rht_dereference(pos->next, ht) : NULL;
1191 			     !rht_is_a_nulls(pos);
1192 			     pos = next,
1193 			     next = !rht_is_a_nulls(pos) ?
1194 					rht_dereference(pos->next, ht) : NULL)
1195 				rhashtable_free_one(ht, pos, free_fn, arg);
1196 		}
1197 	}
1198 
1199 	next_tbl = rht_dereference(tbl->future_tbl, ht);
1200 	bucket_table_free(tbl);
1201 	if (next_tbl) {
1202 		tbl = next_tbl;
1203 		goto restart;
1204 	}
1205 	mutex_unlock(&ht->mutex);
1206 }
1207 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
1208 
rhashtable_destroy(struct rhashtable * ht)1209 void rhashtable_destroy(struct rhashtable *ht)
1210 {
1211 	return rhashtable_free_and_destroy(ht, NULL, NULL);
1212 }
1213 EXPORT_SYMBOL_GPL(rhashtable_destroy);
1214 
__rht_bucket_nested(const struct bucket_table * tbl,unsigned int hash)1215 struct rhash_lock_head __rcu **__rht_bucket_nested(
1216 	const struct bucket_table *tbl, unsigned int hash)
1217 {
1218 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1219 	unsigned int index = hash & ((1 << tbl->nest) - 1);
1220 	unsigned int size = tbl->size >> tbl->nest;
1221 	unsigned int subhash = hash;
1222 	union nested_table *ntbl;
1223 
1224 	ntbl = nested_table_top(tbl);
1225 	ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
1226 	subhash >>= tbl->nest;
1227 
1228 	while (ntbl && size > (1 << shift)) {
1229 		index = subhash & ((1 << shift) - 1);
1230 		ntbl = rht_dereference_bucket_rcu(ntbl[index].table,
1231 						  tbl, hash);
1232 		size >>= shift;
1233 		subhash >>= shift;
1234 	}
1235 
1236 	if (!ntbl)
1237 		return NULL;
1238 
1239 	return &ntbl[subhash].bucket;
1240 
1241 }
1242 EXPORT_SYMBOL_GPL(__rht_bucket_nested);
1243 
rht_bucket_nested(const struct bucket_table * tbl,unsigned int hash)1244 struct rhash_lock_head __rcu **rht_bucket_nested(
1245 	const struct bucket_table *tbl, unsigned int hash)
1246 {
1247 	static struct rhash_lock_head __rcu *rhnull;
1248 
1249 	if (!rhnull)
1250 		INIT_RHT_NULLS_HEAD(rhnull);
1251 	return __rht_bucket_nested(tbl, hash) ?: &rhnull;
1252 }
1253 EXPORT_SYMBOL_GPL(rht_bucket_nested);
1254 
rht_bucket_nested_insert(struct rhashtable * ht,struct bucket_table * tbl,unsigned int hash)1255 struct rhash_lock_head __rcu **rht_bucket_nested_insert(
1256 	struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
1257 {
1258 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1259 	unsigned int index = hash & ((1 << tbl->nest) - 1);
1260 	unsigned int size = tbl->size >> tbl->nest;
1261 	union nested_table *ntbl;
1262 
1263 	ntbl = nested_table_top(tbl);
1264 	hash >>= tbl->nest;
1265 	ntbl = nested_table_alloc(ht, &ntbl[index].table,
1266 				  size <= (1 << shift));
1267 
1268 	while (ntbl && size > (1 << shift)) {
1269 		index = hash & ((1 << shift) - 1);
1270 		size >>= shift;
1271 		hash >>= shift;
1272 		ntbl = nested_table_alloc(ht, &ntbl[index].table,
1273 					  size <= (1 << shift));
1274 	}
1275 
1276 	if (!ntbl)
1277 		return NULL;
1278 
1279 	return &ntbl[hash].bucket;
1280 
1281 }
1282 EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);
1283