rhashtable.c (408f13ef358aa5ad56dc6230c2c7deb92cf462b1) rhashtable.c (4feb7c7a4fbb8f63371be31cda79433c7cf3da86)
1/*
2 * Resizable, Scalable, Concurrent Hash Table
3 *
4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7 *
8 * Code partially derived from nft_hash

--- 183 unchanged lines hidden (view full) ---

192 max_locks = min_t(size_t, max_locks, 1U << tbl->nest);
193
194 if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks,
195 ht->p.locks_mul, gfp) < 0) {
196 bucket_table_free(tbl);
197 return NULL;
198 }
199
1/*
2 * Resizable, Scalable, Concurrent Hash Table
3 *
4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7 *
8 * Code partially derived from nft_hash

--- 183 unchanged lines hidden (view full) ---

192 max_locks = min_t(size_t, max_locks, 1U << tbl->nest);
193
194 if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks,
195 ht->p.locks_mul, gfp) < 0) {
196 bucket_table_free(tbl);
197 return NULL;
198 }
199
200 rcu_head_init(&tbl->rcu);
200 INIT_LIST_HEAD(&tbl->walkers);
201
202 tbl->hash_rnd = get_random_u32();
203
204 for (i = 0; i < nbuckets; i++)
205 INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
206
207 return tbl;

--- 67 unchanged lines hidden (view full) ---

275 int err;
276
277 old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
278
279 spin_lock_bh(old_bucket_lock);
280 while (!(err = rhashtable_rehash_one(ht, old_hash)))
281 ;
282
201 INIT_LIST_HEAD(&tbl->walkers);
202
203 tbl->hash_rnd = get_random_u32();
204
205 for (i = 0; i < nbuckets; i++)
206 INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
207
208 return tbl;

--- 67 unchanged lines hidden (view full) ---

276 int err;
277
278 old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
279
280 spin_lock_bh(old_bucket_lock);
281 while (!(err = rhashtable_rehash_one(ht, old_hash)))
282 ;
283
283 if (err == -ENOENT) {
284 old_tbl->rehash++;
284 if (err == -ENOENT)
285 err = 0;
285 err = 0;
286 }
286
287 spin_unlock_bh(old_bucket_lock);
288
289 return err;
290}
291
292static int rhashtable_rehash_attach(struct rhashtable *ht,
293 struct bucket_table *old_tbl,
294 struct bucket_table *new_tbl)

--- 30 unchanged lines hidden (view full) ---

325 }
326
327 /* Publish the new table pointer. */
328 rcu_assign_pointer(ht->tbl, new_tbl);
329
330 spin_lock(&ht->lock);
331 list_for_each_entry(walker, &old_tbl->walkers, list)
332 walker->tbl = NULL;
287 spin_unlock_bh(old_bucket_lock);
288
289 return err;
290}
291
292static int rhashtable_rehash_attach(struct rhashtable *ht,
293 struct bucket_table *old_tbl,
294 struct bucket_table *new_tbl)

--- 30 unchanged lines hidden (view full) ---

325 }
326
327 /* Publish the new table pointer. */
328 rcu_assign_pointer(ht->tbl, new_tbl);
329
330 spin_lock(&ht->lock);
331 list_for_each_entry(walker, &old_tbl->walkers, list)
332 walker->tbl = NULL;
333 spin_unlock(&ht->lock);
334
335 /* Wait for readers. All new readers will see the new
336 * table, and thus no references to the old table will
337 * remain.
333
334 /* Wait for readers. All new readers will see the new
335 * table, and thus no references to the old table will
336 * remain.
337 * We do this inside the locked region so that
338 * rhashtable_walk_stop() can use rcu_head_after_call_rcu()
339 * to check if it should not re-link the table.
338 */
339 call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
340 */
341 call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
342 spin_unlock(&ht->lock);
340
341 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
342}
343
344static int rhashtable_rehash_alloc(struct rhashtable *ht,
345 struct bucket_table *old_tbl,
346 unsigned int size)
347{

--- 63 unchanged lines hidden (view full) ---

411
412 if (rht_grow_above_75(ht, tbl))
413 err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
414 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
415 err = rhashtable_shrink(ht);
416 else if (tbl->nest)
417 err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
418
343
344 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
345}
346
347static int rhashtable_rehash_alloc(struct rhashtable *ht,
348 struct bucket_table *old_tbl,
349 unsigned int size)
350{

--- 63 unchanged lines hidden (view full) ---

414
415 if (rht_grow_above_75(ht, tbl))
416 err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
417 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
418 err = rhashtable_shrink(ht);
419 else if (tbl->nest)
420 err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
421
419 if (!err || err == -EEXIST) {
420 int nerr;
422 if (!err)
423 err = rhashtable_rehash_table(ht);
421
424
422 nerr = rhashtable_rehash_table(ht);
423 err = err ?: nerr;
424 }
425
426 mutex_unlock(&ht->mutex);
427
428 if (err)
429 schedule_work(&ht->run_work);
430}
431
432static int rhashtable_insert_rehash(struct rhashtable *ht,
433 struct bucket_table *tbl)

--- 143 unchanged lines hidden (view full) ---

577}
578
579static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
580 struct rhash_head *obj)
581{
582 struct bucket_table *new_tbl;
583 struct bucket_table *tbl;
584 unsigned int hash;
425 mutex_unlock(&ht->mutex);
426
427 if (err)
428 schedule_work(&ht->run_work);
429}
430
431static int rhashtable_insert_rehash(struct rhashtable *ht,
432 struct bucket_table *tbl)

--- 143 unchanged lines hidden (view full) ---

576}
577
578static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
579 struct rhash_head *obj)
580{
581 struct bucket_table *new_tbl;
582 struct bucket_table *tbl;
583 unsigned int hash;
585 spinlock_t *lock;
586 void *data;
587
584 void *data;
585
588 tbl = rcu_dereference(ht->tbl);
586 new_tbl = rcu_dereference(ht->tbl);
589
587
590 /* All insertions must grab the oldest table containing
591 * the hashed bucket that is yet to be rehashed.
592 */
593 for (;;) {
594 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
595 lock = rht_bucket_lock(tbl, hash);
596 spin_lock_bh(lock);
597
598 if (tbl->rehash <= hash)
599 break;
600
601 spin_unlock_bh(lock);
602 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
603 }
604
605 data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
606 new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
607 if (PTR_ERR(new_tbl) != -EEXIST)
608 data = ERR_CAST(new_tbl);
609
610 while (!IS_ERR_OR_NULL(new_tbl)) {
588 do {
611 tbl = new_tbl;
612 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
589 tbl = new_tbl;
590 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
613 spin_lock_nested(rht_bucket_lock(tbl, hash),
614 SINGLE_DEPTH_NESTING);
591 spin_lock_bh(rht_bucket_lock(tbl, hash));
615
616 data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
617 new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
618 if (PTR_ERR(new_tbl) != -EEXIST)
619 data = ERR_CAST(new_tbl);
620
592
593 data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
594 new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
595 if (PTR_ERR(new_tbl) != -EEXIST)
596 data = ERR_CAST(new_tbl);
597
621 spin_unlock(rht_bucket_lock(tbl, hash));
622 }
598 spin_unlock_bh(rht_bucket_lock(tbl, hash));
599 } while (!IS_ERR_OR_NULL(new_tbl));
623
600
624 spin_unlock_bh(lock);
625
626 if (PTR_ERR(data) == -EAGAIN)
627 data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
628 -EAGAIN);
629
630 return data;
631}
632
633void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,

--- 304 unchanged lines hidden (view full) ---

938 struct bucket_table *tbl = iter->walker.tbl;
939
940 if (!tbl)
941 goto out;
942
943 ht = iter->ht;
944
945 spin_lock(&ht->lock);
601 if (PTR_ERR(data) == -EAGAIN)
602 data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
603 -EAGAIN);
604
605 return data;
606}
607
608void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,

--- 304 unchanged lines hidden (view full) ---

913 struct bucket_table *tbl = iter->walker.tbl;
914
915 if (!tbl)
916 goto out;
917
918 ht = iter->ht;
919
920 spin_lock(&ht->lock);
946 if (tbl->rehash < tbl->size)
947 list_add(&iter->walker.list, &tbl->walkers);
948 else
921 if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu))
922 /* This bucket table is being freed, don't re-link it. */
949 iter->walker.tbl = NULL;
923 iter->walker.tbl = NULL;
924 else
925 list_add(&iter->walker.list, &tbl->walkers);
950 spin_unlock(&ht->lock);
951
952out:
953 rcu_read_unlock();
954}
955EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
956
957static size_t rounded_hashtable_size(const struct rhashtable_params *params)

--- 286 unchanged lines hidden ---
926 spin_unlock(&ht->lock);
927
928out:
929 rcu_read_unlock();
930}
931EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
932
933static size_t rounded_hashtable_size(const struct rhashtable_params *params)

--- 286 unchanged lines hidden ---