Lines Matching full:ht

37 static u32 head_hashfn(struct rhashtable *ht,  in head_hashfn()  argument
41 return rht_head_hashfn(ht, tbl, he, ht->p); in head_hashfn()
45 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) argument
47 int lockdep_rht_mutex_is_held(struct rhashtable *ht) in lockdep_rht_mutex_is_held() argument
49 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; in lockdep_rht_mutex_is_held()
63 #define ASSERT_RHT_MUTEX(HT) argument
122 static union nested_table *nested_table_alloc(struct rhashtable *ht, in nested_table_alloc() argument
133 ntbl = alloc_hooks_tag(ht->alloc_tag, in nested_table_alloc()
148 static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, in nested_bucket_table_alloc() argument
161 tbl = alloc_hooks_tag(ht->alloc_tag, in nested_bucket_table_alloc()
166 if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, in nested_bucket_table_alloc()
177 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, in bucket_table_alloc() argument
186 tbl = alloc_hooks_tag(ht->alloc_tag, in bucket_table_alloc()
193 tbl = nested_bucket_table_alloc(ht, nbuckets, gfp); in bucket_table_alloc()
215 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, in rhashtable_last_table() argument
222 tbl = rht_dereference_rcu(tbl->future_tbl, ht); in rhashtable_last_table()
228 static int rhashtable_rehash_one(struct rhashtable *ht, in rhashtable_rehash_one() argument
232 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); in rhashtable_rehash_one()
233 struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl); in rhashtable_rehash_one()
259 new_hash = head_hashfn(ht, new_tbl, entry); in rhashtable_rehash_one()
280 static int rhashtable_rehash_chain(struct rhashtable *ht, in rhashtable_rehash_chain() argument
283 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); in rhashtable_rehash_chain()
292 while (!(err = rhashtable_rehash_one(ht, bkt, old_hash))) in rhashtable_rehash_chain()
302 static int rhashtable_rehash_attach(struct rhashtable *ht, in rhashtable_rehash_attach() argument
319 static int rhashtable_rehash_table(struct rhashtable *ht) in rhashtable_rehash_table() argument
321 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); in rhashtable_rehash_table()
327 new_tbl = rht_dereference(old_tbl->future_tbl, ht); in rhashtable_rehash_table()
332 err = rhashtable_rehash_chain(ht, old_hash); in rhashtable_rehash_table()
339 rcu_assign_pointer(ht->tbl, new_tbl); in rhashtable_rehash_table()
341 spin_lock(&ht->lock); in rhashtable_rehash_table()
353 spin_unlock(&ht->lock); in rhashtable_rehash_table()
355 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; in rhashtable_rehash_table()
358 static int rhashtable_rehash_alloc(struct rhashtable *ht, in rhashtable_rehash_alloc() argument
365 ASSERT_RHT_MUTEX(ht); in rhashtable_rehash_alloc()
367 new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); in rhashtable_rehash_alloc()
371 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); in rhashtable_rehash_alloc()
380 * @ht: the hash table to shrink
386 * ht->mutex.
394 static int rhashtable_shrink(struct rhashtable *ht) in rhashtable_shrink() argument
396 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); in rhashtable_shrink()
397 unsigned int nelems = atomic_read(&ht->nelems); in rhashtable_shrink()
402 if (size < ht->p.min_size) in rhashtable_shrink()
403 size = ht->p.min_size; in rhashtable_shrink()
408 if (rht_dereference(old_tbl->future_tbl, ht)) in rhashtable_shrink()
411 return rhashtable_rehash_alloc(ht, old_tbl, size); in rhashtable_shrink()
416 struct rhashtable *ht; in rht_deferred_worker() local
420 ht = container_of(work, struct rhashtable, run_work); in rht_deferred_worker()
421 mutex_lock(&ht->mutex); in rht_deferred_worker()
423 tbl = rht_dereference(ht->tbl, ht); in rht_deferred_worker()
424 tbl = rhashtable_last_table(ht, tbl); in rht_deferred_worker()
426 if (rht_grow_above_75(ht, tbl)) in rht_deferred_worker()
427 err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2); in rht_deferred_worker()
428 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) in rht_deferred_worker()
429 err = rhashtable_shrink(ht); in rht_deferred_worker()
431 err = rhashtable_rehash_alloc(ht, tbl, tbl->size); in rht_deferred_worker()
436 nerr = rhashtable_rehash_table(ht); in rht_deferred_worker()
440 mutex_unlock(&ht->mutex); in rht_deferred_worker()
443 schedule_work(&ht->run_work); in rht_deferred_worker()
446 static int rhashtable_insert_rehash(struct rhashtable *ht, in rhashtable_insert_rehash() argument
454 old_tbl = rht_dereference_rcu(ht->tbl, ht); in rhashtable_insert_rehash()
460 if (rht_grow_above_75(ht, tbl)) in rhashtable_insert_rehash()
468 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN); in rhashtable_insert_rehash()
472 err = rhashtable_rehash_attach(ht, tbl, new_tbl); in rhashtable_insert_rehash()
478 schedule_work(&ht->run_work); in rhashtable_insert_rehash()
489 schedule_work(&ht->run_work); in rhashtable_insert_rehash()
494 static void *rhashtable_lookup_one(struct rhashtable *ht, in rhashtable_lookup_one() argument
500 .ht = ht, in rhashtable_lookup_one()
514 (ht->p.obj_cmpfn ? in rhashtable_lookup_one()
515 ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : in rhashtable_lookup_one()
516 rhashtable_compare(&arg, rht_obj(ht, head)))) { in rhashtable_lookup_one()
521 if (!ht->rhlist) in rhashtable_lookup_one()
522 return rht_obj(ht, head); in rhashtable_lookup_one()
546 struct rhashtable *ht, struct rhash_lock_head __rcu **bkt, in rhashtable_insert_one() argument
559 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); in rhashtable_insert_one()
566 if (unlikely(rht_grow_above_max(ht, tbl))) in rhashtable_insert_one()
569 if (unlikely(rht_grow_above_100(ht, tbl))) in rhashtable_insert_one()
575 if (ht->rhlist) { in rhashtable_insert_one()
587 atomic_inc(&ht->nelems); in rhashtable_insert_one()
588 if (rht_grow_above_75(ht, tbl)) in rhashtable_insert_one()
589 schedule_work(&ht->run_work); in rhashtable_insert_one()
594 static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, in rhashtable_try_insert() argument
604 new_tbl = rcu_dereference(ht->tbl); in rhashtable_try_insert()
608 hash = rht_head_hashfn(ht, tbl, obj, ht->p); in rhashtable_try_insert()
613 bkt = rht_bucket_insert(ht, tbl, hash); in rhashtable_try_insert()
615 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); in rhashtable_try_insert()
619 data = rhashtable_lookup_one(ht, bkt, tbl, in rhashtable_try_insert()
621 new_tbl = rhashtable_insert_one(ht, bkt, tbl, in rhashtable_try_insert()
631 data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?: in rhashtable_try_insert()
637 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, in rhashtable_insert_slow() argument
644 data = rhashtable_try_insert(ht, key, obj); in rhashtable_insert_slow()
654 * @ht: Table to walk over
673 void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter) in rhashtable_walk_enter() argument
675 iter->ht = ht; in rhashtable_walk_enter()
681 spin_lock(&ht->lock); in rhashtable_walk_enter()
683 rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); in rhashtable_walk_enter()
685 spin_unlock(&ht->lock); in rhashtable_walk_enter()
697 spin_lock(&iter->ht->lock); in rhashtable_walk_exit()
700 spin_unlock(&iter->ht->lock); in rhashtable_walk_exit()
725 struct rhashtable *ht = iter->ht; in rhashtable_walk_start_check() local
726 bool rhlist = ht->rhlist; in rhashtable_walk_start_check()
730 spin_lock(&ht->lock); in rhashtable_walk_start_check()
733 spin_unlock(&ht->lock); in rhashtable_walk_start_check()
738 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); in rhashtable_walk_start_check()
799 struct rhashtable *ht = iter->ht; in __rhashtable_walk_find_next() local
801 bool rhlist = ht->rhlist; in __rhashtable_walk_find_next()
832 return rht_obj(ht, rhlist ? &list->rhead : p); in __rhashtable_walk_find_next()
843 iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht); in __rhashtable_walk_find_next()
870 struct rhashtable *ht = iter->ht; in rhashtable_walk_next() local
872 bool rhlist = ht->rhlist; in rhashtable_walk_next()
883 return rht_obj(ht, rhlist ? &list->rhead : p); in rhashtable_walk_next()
909 struct rhashtable *ht = iter->ht; in rhashtable_walk_peek() local
913 return rht_obj(ht, ht->rhlist ? &list->rhead : p); in rhashtable_walk_peek()
941 struct rhashtable *ht; in rhashtable_walk_stop() local
947 ht = iter->ht; in rhashtable_walk_stop()
949 spin_lock(&ht->lock); in rhashtable_walk_stop()
955 spin_unlock(&ht->lock); in rhashtable_walk_stop()
983 * @ht: hash table to be initialized
1023 int rhashtable_init_noprof(struct rhashtable *ht, in rhashtable_init_noprof() argument
1033 memset(ht, 0, sizeof(*ht)); in rhashtable_init_noprof()
1034 mutex_init(&ht->mutex); in rhashtable_init_noprof()
1035 spin_lock_init(&ht->lock); in rhashtable_init_noprof()
1036 memcpy(&ht->p, params, sizeof(*params)); in rhashtable_init_noprof()
1038 alloc_tag_record(ht->alloc_tag); in rhashtable_init_noprof()
1041 ht->p.min_size = roundup_pow_of_two(params->min_size); in rhashtable_init_noprof()
1044 ht->max_elems = 1u << 31; in rhashtable_init_noprof()
1047 ht->p.max_size = rounddown_pow_of_two(params->max_size); in rhashtable_init_noprof()
1048 if (ht->p.max_size < ht->max_elems / 2) in rhashtable_init_noprof()
1049 ht->max_elems = ht->p.max_size * 2; in rhashtable_init_noprof()
1052 ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); in rhashtable_init_noprof()
1054 size = rounded_hashtable_size(&ht->p); in rhashtable_init_noprof()
1056 ht->key_len = ht->p.key_len; in rhashtable_init_noprof()
1058 ht->p.hashfn = jhash; in rhashtable_init_noprof()
1060 if (!(ht->key_len & (sizeof(u32) - 1))) { in rhashtable_init_noprof()
1061 ht->key_len /= sizeof(u32); in rhashtable_init_noprof()
1062 ht->p.hashfn = rhashtable_jhash2; in rhashtable_init_noprof()
1071 tbl = bucket_table_alloc(ht, size, GFP_KERNEL); in rhashtable_init_noprof()
1073 size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); in rhashtable_init_noprof()
1074 tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL); in rhashtable_init_noprof()
1077 atomic_set(&ht->nelems, 0); in rhashtable_init_noprof()
1079 RCU_INIT_POINTER(ht->tbl, tbl); in rhashtable_init_noprof()
1081 INIT_WORK(&ht->run_work, rht_deferred_worker); in rhashtable_init_noprof()
1100 err = rhashtable_init_noprof(&hlt->ht, params); in rhltable_init_noprof()
1101 hlt->ht.rhlist = true; in rhltable_init_noprof()
1106 static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj, in rhashtable_free_one() argument
1112 if (!ht->rhlist) { in rhashtable_free_one()
1113 free_fn(rht_obj(ht, obj), arg); in rhashtable_free_one()
1120 list = rht_dereference(list->next, ht); in rhashtable_free_one()
1121 free_fn(rht_obj(ht, obj), arg); in rhashtable_free_one()
1127 * @ht: the hash table to destroy
1140 void rhashtable_free_and_destroy(struct rhashtable *ht, in rhashtable_free_and_destroy() argument
1147 cancel_work_sync(&ht->run_work); in rhashtable_free_and_destroy()
1149 mutex_lock(&ht->mutex); in rhashtable_free_and_destroy()
1150 tbl = rht_dereference(ht->tbl, ht); in rhashtable_free_and_destroy()
1159 rht_dereference(pos->next, ht) : NULL; in rhashtable_free_and_destroy()
1163 rht_dereference(pos->next, ht) : NULL) in rhashtable_free_and_destroy()
1164 rhashtable_free_one(ht, pos, free_fn, arg); in rhashtable_free_and_destroy()
1168 next_tbl = rht_dereference(tbl->future_tbl, ht); in rhashtable_free_and_destroy()
1174 mutex_unlock(&ht->mutex); in rhashtable_free_and_destroy()
1178 void rhashtable_destroy(struct rhashtable *ht) in rhashtable_destroy() argument
1180 return rhashtable_free_and_destroy(ht, NULL, NULL); in rhashtable_destroy()
1225 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) in rht_bucket_nested_insert() argument
1234 ntbl = nested_table_alloc(ht, &ntbl[index].table, in rht_bucket_nested_insert()
1241 ntbl = nested_table_alloc(ht, &ntbl[index].table, in rht_bucket_nested_insert()