Lines Matching +full:array +full:- +full:nest

1 // SPDX-License-Identifier: GPL-2.0-only
6 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
7 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
41 return rht_head_hashfn(ht, tbl, he, ht->p);
49 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
57 if (unlikely(tbl->nest))
59 return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]);
69 /* The top-level bucket entry does not need RCU protection
70 * because it's set at the same time as tbl->nest.
72 return (void *)rcu_dereference_protected(tbl->buckets[0], 1);
77 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
81 ntbl = rcu_dereference_protected(ntbl->table, 1);
96 unsigned int size = tbl->size >> tbl->nest;
97 unsigned int len = 1 << tbl->nest;
111 if (tbl->nest)
133 ntbl = alloc_hooks_tag(ht->alloc_tag,
152 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
159 size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
161 tbl = alloc_hooks_tag(ht->alloc_tag,
166 if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
172 tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
186 tbl = alloc_hooks_tag(ht->alloc_tag,
200 lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0);
202 tbl->size = size;
204 rcu_head_init(&tbl->rcu);
205 INIT_LIST_HEAD(&tbl->walkers);
207 tbl->hash_rnd = get_random_u32();
210 INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
222 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
232 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
234 int err = -EAGAIN;
240 if (new_tbl->nest)
243 err = -ENOENT;
248 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
253 pprev = &entry->next;
261 flags = rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash],
264 head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash);
266 RCU_INIT_POINTER(entry->next, head);
268 rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry, flags);
283 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
295 if (err == -ENOENT)
312 if (cmpxchg((struct bucket_table **)&old_tbl->future_tbl, NULL,
314 return -EEXIST;
321 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
327 new_tbl = rht_dereference(old_tbl->future_tbl, ht);
331 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
339 rcu_assign_pointer(ht->tbl, new_tbl);
341 spin_lock(&ht->lock);
342 list_for_each_entry(walker, &old_tbl->walkers, list)
343 walker->tbl = NULL;
350 * to check if it should not re-link the table.
352 call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
353 spin_unlock(&ht->lock);
355 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
369 return -ENOMEM;
379 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
386 * ht->mutex.
396 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
397 unsigned int nelems = atomic_read(&ht->nelems);
402 if (size < ht->p.min_size)
403 size = ht->p.min_size;
405 if (old_tbl->size <= size)
408 if (rht_dereference(old_tbl->future_tbl, ht))
409 return -EEXIST;
421 mutex_lock(&ht->mutex);
423 tbl = rht_dereference(ht->tbl, ht);
427 err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
428 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
430 else if (tbl->nest)
431 err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
433 if (!err || err == -EEXIST) {
440 mutex_unlock(&ht->mutex);
443 schedule_work(&ht->run_work);
454 old_tbl = rht_dereference_rcu(ht->tbl, ht);
456 size = tbl->size;
458 err = -EBUSY;
466 err = -ENOMEM;
475 if (err == -EEXIST)
478 schedule_work(&ht->run_work);
484 if (likely(rcu_access_pointer(tbl->future_tbl)))
488 if (err == -ENOMEM)
489 schedule_work(&ht->run_work);
512 elasticity--;
514 (ht->p.obj_cmpfn ?
515 ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
517 pprev = &head->next;
521 if (!ht->rhlist)
527 RCU_INIT_POINTER(list->next, plist);
528 head = rht_dereference_bucket(head->next, tbl, hash);
529 RCU_INIT_POINTER(list->rhead.next, head);
540 return ERR_PTR(-EAGAIN);
542 return ERR_PTR(-ENOENT);
554 return ERR_PTR(-EEXIST);
556 if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
559 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
563 if (PTR_ERR(data) != -ENOENT)
567 return ERR_PTR(-E2BIG);
570 return ERR_PTR(-EAGAIN);
574 RCU_INIT_POINTER(obj->next, head);
575 if (ht->rhlist) {
579 RCU_INIT_POINTER(list->next, NULL);
600 new_tbl = rcu_dereference(ht->tbl);
604 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
605 if (rcu_access_pointer(tbl->future_tbl))
611 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
612 data = ERR_PTR(-EAGAIN);
623 atomic_inc(&ht->nelems);
624 if (PTR_ERR(new_tbl) != -EEXIST)
630 schedule_work(&ht->run_work);
634 if (PTR_ERR(data) == -EAGAIN)
636 -EAGAIN);
650 } while (PTR_ERR(data) == -EAGAIN);
657 * rhashtable_walk_enter - Initialise an iterator
672 * non-preemptible context, but cannot be called from softirq or
679 iter->ht = ht;
680 iter->p = NULL;
681 iter->slot = 0;
682 iter->skip = 0;
683 iter->end_of_table = 0;
685 spin_lock(&ht->lock);
686 iter->walker.tbl =
687 rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
688 list_add(&iter->walker.list, &iter->walker.tbl->walkers);
689 spin_unlock(&ht->lock);
694 * rhashtable_walk_exit - Free an iterator
701 spin_lock(&iter->ht->lock);
702 if (iter->walker.tbl)
703 list_del(&iter->walker.list);
704 spin_unlock(&iter->ht->lock);
709 * rhashtable_walk_start_check - Start a hash table walk
718 * Returns -EAGAIN if resize event occurred. Note that the iterator
729 struct rhashtable *ht = iter->ht;
730 bool rhlist = ht->rhlist;
734 spin_lock(&ht->lock);
735 if (iter->walker.tbl)
736 list_del(&iter->walker.list);
737 spin_unlock(&ht->lock);
739 if (iter->end_of_table)
741 if (!iter->walker.tbl) {
742 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
743 iter->slot = 0;
744 iter->skip = 0;
745 return -EAGAIN;
748 if (iter->p && !rhlist) {
755 rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
757 if (p == iter->p) {
758 iter->skip = skip;
762 iter->p = NULL;
763 } else if (iter->p && rhlist) {
770 rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
773 list = rcu_dereference(list->next)) {
775 if (list == iter->list) {
776 iter->p = p;
777 iter->skip = skip;
782 iter->p = NULL;
790 * __rhashtable_walk_find_next - Find the next element in a table (or the first
797 * Returns -EAGAIN if resize event occurred.
801 struct bucket_table *tbl = iter->walker.tbl;
802 struct rhlist_head *list = iter->list;
803 struct rhashtable *ht = iter->ht;
804 struct rhash_head *p = iter->p;
805 bool rhlist = ht->rhlist;
810 for (; iter->slot < tbl->size; iter->slot++) {
811 int skip = iter->skip;
813 rht_for_each_rcu(p, tbl, iter->slot) {
820 skip--;
821 list = rcu_dereference(list->next);
828 skip--;
833 iter->skip++;
834 iter->p = p;
835 iter->list = list;
836 return rht_obj(ht, rhlist ? &list->rhead : p);
839 iter->skip = 0;
842 iter->p = NULL;
847 iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
848 if (iter->walker.tbl) {
849 iter->slot = 0;
850 iter->skip = 0;
851 return ERR_PTR(-EAGAIN);
853 iter->end_of_table = true;
860 * rhashtable_walk_next - Return the next object and advance the iterator
868 * Returns -EAGAIN if resize event occurred. Note that the iterator
873 struct rhlist_head *list = iter->list;
874 struct rhashtable *ht = iter->ht;
875 struct rhash_head *p = iter->p;
876 bool rhlist = ht->rhlist;
879 if (!rhlist || !(list = rcu_dereference(list->next))) {
880 p = rcu_dereference(p->next);
884 iter->skip++;
885 iter->p = p;
886 iter->list = list;
887 return rht_obj(ht, rhlist ? &list->rhead : p);
893 iter->skip = 0;
894 iter->slot++;
902 * rhashtable_walk_peek - Return the next object but don't advance the iterator
907 * Returns -EAGAIN if resize event occurred. Note that the iterator
912 struct rhlist_head *list = iter->list;
913 struct rhashtable *ht = iter->ht;
914 struct rhash_head *p = iter->p;
917 return rht_obj(ht, ht->rhlist ? &list->rhead : p);
921 if (iter->skip) {
928 iter->skip--;
936 * rhashtable_walk_stop - Finish a hash table walk
946 struct bucket_table *tbl = iter->walker.tbl;
951 ht = iter->ht;
953 spin_lock(&ht->lock);
954 if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu))
955 /* This bucket table is being freed, don't re-link it. */
956 iter->walker.tbl = NULL;
958 list_add(&iter->walker.list, &tbl->walkers);
959 spin_unlock(&ht->lock);
970 if (params->nelem_hint)
971 retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
972 (unsigned long)params->min_size);
975 (unsigned long)params->min_size);
986 * rhashtable_init - initialize a new hash table
1033 if ((!params->key_len && !params->obj_hashfn) ||
1034 (params->obj_hashfn && !params->obj_cmpfn))
1035 return -EINVAL;
1038 mutex_init(&ht->mutex);
1039 spin_lock_init(&ht->lock);
1040 memcpy(&ht->p, params, sizeof(*params));
1042 alloc_tag_record(ht->alloc_tag);
1044 if (params->min_size)
1045 ht->p.min_size = roundup_pow_of_two(params->min_size);
1048 ht->max_elems = 1u << 31;
1050 if (params->max_size) {
1051 ht->p.max_size = rounddown_pow_of_two(params->max_size);
1052 if (ht->p.max_size < ht->max_elems / 2)
1053 ht->max_elems = ht->p.max_size * 2;
1056 ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1058 size = rounded_hashtable_size(&ht->p);
1060 ht->key_len = ht->p.key_len;
1061 if (!params->hashfn) {
1062 ht->p.hashfn = jhash;
1064 if (!(ht->key_len & (sizeof(u32) - 1))) {
1065 ht->key_len /= sizeof(u32);
1066 ht->p.hashfn = rhashtable_jhash2;
1077 size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1081 atomic_set(&ht->nelems, 0);
1083 RCU_INIT_POINTER(ht->tbl, tbl);
1085 INIT_WORK(&ht->run_work, rht_deferred_worker);
1092 * rhltable_init - initialize a new hash list table
1104 err = rhashtable_init_noprof(&hlt->ht, params);
1105 hlt->ht.rhlist = true;
1116 if (!ht->rhlist) {
1123 obj = &list->rhead;
1124 list = rht_dereference(list->next, ht);
1130 * rhashtable_free_and_destroy - free elements and destroy hash table
1138 * must occur in a compatible manner. Then frees the bucket array.
1151 cancel_work_sync(&ht->run_work);
1153 mutex_lock(&ht->mutex);
1154 tbl = rht_dereference(ht->tbl, ht);
1157 for (i = 0; i < tbl->size; i++) {
1163 rht_dereference(pos->next, ht) : NULL;
1167 rht_dereference(pos->next, ht) : NULL)
1172 next_tbl = rht_dereference(tbl->future_tbl, ht);
1178 mutex_unlock(&ht->mutex);
1191 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1192 unsigned int index = hash & ((1 << tbl->nest) - 1);
1193 unsigned int size = tbl->size >> tbl->nest;
1199 subhash >>= tbl->nest;
1202 index = subhash & ((1 << shift) - 1);
1231 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1232 unsigned int index = hash & ((1 << tbl->nest) - 1);
1233 unsigned int size = tbl->size >> tbl->nest;
1237 hash >>= tbl->nest;
1242 index = hash & ((1 << shift) - 1);