rhashtable.c (3527a86b7ae17c949307d00e1eb7087604bca1b4) | rhashtable.c (49f7b33e63fec9d16e7ee62ba8f8ab4159cbdc26) |
---|---|
1/* 2 * Resizable, Scalable, Concurrent Hash Table 3 * | 1/* 2 * Resizable, Scalable, Concurrent Hash Table 3 * |
4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> |
|
4 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> 5 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> 6 * | 5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> 6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> 7 * |
7 * Based on the following paper: 8 * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf 9 * | |
10 * Code partially derived from nft_hash | 8 * Code partially derived from nft_hash |
9 * Rewritten with rehash code from br_multicast plus single list 10 * pointer as suggested by Josh Triplett |
|
11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License version 2 as 14 * published by the Free Software Foundation. 15 */ 16 17#include <linux/kernel.h> 18#include <linux/init.h> 19#include <linux/log2.h> 20#include <linux/sched.h> 21#include <linux/slab.h> 22#include <linux/vmalloc.h> 23#include <linux/mm.h> 24#include <linux/jhash.h> 25#include <linux/random.h> 26#include <linux/rhashtable.h> 27#include <linux/err.h> 28 29#define HASH_DEFAULT_SIZE 64UL | 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License version 2 as 14 * published by the Free Software Foundation. 15 */ 16 17#include <linux/kernel.h> 18#include <linux/init.h> 19#include <linux/log2.h> 20#include <linux/sched.h> 21#include <linux/slab.h> 22#include <linux/vmalloc.h> 23#include <linux/mm.h> 24#include <linux/jhash.h> 25#include <linux/random.h> 26#include <linux/rhashtable.h> 27#include <linux/err.h> 28 29#define HASH_DEFAULT_SIZE 64UL |
30#define HASH_MIN_SIZE 4UL | 30#define HASH_MIN_SIZE 4U |
31#define BUCKET_LOCKS_PER_CPU 128UL 32 | 31#define BUCKET_LOCKS_PER_CPU 128UL 32 |
33/* Base bits plus 1 bit for nulls marker */ 34#define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1) 35 36enum { 37 RHT_LOCK_NORMAL, 38 RHT_LOCK_NESTED, 39}; 40 41/* The bucket lock is selected based on the hash and protects mutations 42 * on a group of hash buckets. 43 * 44 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that 45 * a single lock always covers both buckets which may both contains 46 * entries which link to the same bucket of the old table during resizing. 47 * This allows to simplify the locking as locking the bucket in both 48 * tables during resize always guarantee protection. 49 * 50 * IMPORTANT: When holding the bucket lock of both the old and new table 51 * during expansions and shrinking, the old bucket lock must always be 52 * acquired first. 53 */ 54static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash) 55{ 56 return &tbl->locks[hash & tbl->locks_mask]; 57} 58 59static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) 60{ 61 return (void *) he - ht->p.head_offset; 62} 63 64static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash) 65{ 66 return hash & (tbl->size - 1); 67} 68 69static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr) 70{ 71 u32 hash; 72 73 if (unlikely(!ht->p.key_len)) 74 hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd); 75 else 76 hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len, 77 ht->p.hash_rnd); 78 79 return hash >> HASH_RESERVED_SPACE; 80} 81 82static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len) 83{ 84 return ht->p.hashfn(key, len, ht->p.hash_rnd) >> HASH_RESERVED_SPACE; 85} 86 87static u32 head_hashfn(const struct rhashtable *ht, | 33static u32 head_hashfn(struct rhashtable *ht, |
88 const struct bucket_table *tbl, 89 const struct rhash_head *he) 90{ | 34 const struct bucket_table *tbl, 35 const struct rhash_head *he) 36{ |
91 return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he))); | 37 return rht_head_hashfn(ht, tbl, he, ht->p); |
92} 93 94#ifdef CONFIG_PROVE_LOCKING | 38} 39 40#ifdef CONFIG_PROVE_LOCKING |
95static void debug_dump_buckets(const struct rhashtable *ht, 96 const struct bucket_table *tbl) 97{ 98 struct rhash_head *he; 99 unsigned int i, hash; 100 101 for (i = 0; i < tbl->size; i++) { 102 pr_warn(" [Bucket %d] ", i); 103 rht_for_each_rcu(he, tbl, i) { 104 hash = head_hashfn(ht, tbl, he); 105 pr_cont("[hash = %#x, lock = %p] ", 106 hash, bucket_lock(tbl, hash)); 107 } 108 pr_cont("\n"); 109 } 110 111} 112 113static void debug_dump_table(struct rhashtable *ht, 114 const struct bucket_table *tbl, 115 unsigned int hash) 116{ 117 struct bucket_table *old_tbl, *future_tbl; 118 119 pr_emerg("BUG: lock for hash %#x in table %p not held\n", 120 hash, tbl); 121 122 rcu_read_lock(); 123 future_tbl = rht_dereference_rcu(ht->future_tbl, ht); 124 old_tbl = rht_dereference_rcu(ht->tbl, ht); 125 if (future_tbl != old_tbl) { 126 pr_warn("Future table %p (size: %zd)\n", 127 future_tbl, future_tbl->size); 128 debug_dump_buckets(ht, future_tbl); 129 } 130 131 pr_warn("Table %p (size: %zd)\n", old_tbl, old_tbl->size); 132 debug_dump_buckets(ht, old_tbl); 133 134 rcu_read_unlock(); 135} 136 | |
137#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) | 41#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) |
138#define ASSERT_BUCKET_LOCK(HT, TBL, HASH) \ 139 do { \ 140 if (unlikely(!lockdep_rht_bucket_is_held(TBL, HASH))) { \ 141 debug_dump_table(HT, TBL, HASH); \ 142 BUG(); \ 143 } \ 144 } while (0) | |
145 146int lockdep_rht_mutex_is_held(struct rhashtable *ht) 147{ 148 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; 149} 150EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); 151 152int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) 153{ | 42 43int lockdep_rht_mutex_is_held(struct rhashtable *ht) 44{ 45 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; 46} 47EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); 48 49int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) 50{ |
154 spinlock_t *lock = bucket_lock(tbl, hash); | 51 spinlock_t *lock = rht_bucket_lock(tbl, hash); |
155 156 return (debug_locks) ? lockdep_is_held(lock) : 1; 157} 158EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); 159#else 160#define ASSERT_RHT_MUTEX(HT) | 52 53 return (debug_locks) ? lockdep_is_held(lock) : 1; 54} 55EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); 56#else 57#define ASSERT_RHT_MUTEX(HT) |
161#define ASSERT_BUCKET_LOCK(HT, TBL, HASH) | |
162#endif 163 164 | 58#endif 59 60 |
165static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n) | 61static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, 62 gfp_t gfp) |
166{ | 63{ |
167 struct rhash_head __rcu **pprev; 168 169 for (pprev = &tbl->buckets[n]; 170 !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n)); 171 pprev = &rht_dereference_bucket(*pprev, tbl, n)->next) 172 ; 173 174 return pprev; 175} 176 177static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl) 178{ | |
179 unsigned int i, size; 180#if defined(CONFIG_PROVE_LOCKING) 181 unsigned int nr_pcpus = 2; 182#else 183 unsigned int nr_pcpus = num_possible_cpus(); 184#endif 185 186 nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); 187 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); 188 189 /* Never allocate more than 0.5 locks per bucket */ 190 size = min_t(unsigned int, size, tbl->size >> 1); 191 192 if (sizeof(spinlock_t) != 0) { 193#ifdef CONFIG_NUMA | 64 unsigned int i, size; 65#if defined(CONFIG_PROVE_LOCKING) 66 unsigned int nr_pcpus = 2; 67#else 68 unsigned int nr_pcpus = num_possible_cpus(); 69#endif 70 71 nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); 72 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); 73 74 /* Never allocate more than 0.5 locks per bucket */ 75 size = min_t(unsigned int, size, tbl->size >> 1); 76 77 if (sizeof(spinlock_t) != 0) { 78#ifdef CONFIG_NUMA |
194 if (size * sizeof(spinlock_t) > PAGE_SIZE) | 79 if (size * sizeof(spinlock_t) > PAGE_SIZE && 80 gfp == GFP_KERNEL) |
195 tbl->locks = vmalloc(size * sizeof(spinlock_t)); 196 else 197#endif 198 tbl->locks = kmalloc_array(size, sizeof(spinlock_t), | 81 tbl->locks = vmalloc(size * sizeof(spinlock_t)); 82 else 83#endif 84 tbl->locks = kmalloc_array(size, sizeof(spinlock_t), |
199 GFP_KERNEL); | 85 gfp); |
200 if (!tbl->locks) 201 return -ENOMEM; 202 for (i = 0; i < size; i++) 203 spin_lock_init(&tbl->locks[i]); 204 } 205 tbl->locks_mask = size - 1; 206 207 return 0; 208} 209 210static void bucket_table_free(const struct bucket_table *tbl) 211{ 212 if (tbl) 213 kvfree(tbl->locks); 214 215 kvfree(tbl); 216} 217 | 86 if (!tbl->locks) 87 return -ENOMEM; 88 for (i = 0; i < size; i++) 89 spin_lock_init(&tbl->locks[i]); 90 } 91 tbl->locks_mask = size - 1; 92 93 return 0; 94} 95 96static void bucket_table_free(const struct bucket_table *tbl) 97{ 98 if (tbl) 99 kvfree(tbl->locks); 100 101 kvfree(tbl); 102} 103 |
104static void bucket_table_free_rcu(struct rcu_head *head) 105{ 106 bucket_table_free(container_of(head, struct bucket_table, rcu)); 107} 108 |
|
218static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, | 109static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, |
219 size_t nbuckets) | 110 size_t nbuckets, 111 gfp_t gfp) |
220{ 221 struct bucket_table *tbl = NULL; 222 size_t size; 223 int i; 224 225 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); | 112{ 113 struct bucket_table *tbl = NULL; 114 size_t size; 115 int i; 116 117 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); |
226 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) 227 tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); 228 if (tbl == NULL) | 118 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) || 119 gfp != GFP_KERNEL) 120 tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); 121 if (tbl == NULL && gfp == GFP_KERNEL) |
229 tbl = vzalloc(size); 230 if (tbl == NULL) 231 return NULL; 232 233 tbl->size = nbuckets; 234 | 122 tbl = vzalloc(size); 123 if (tbl == NULL) 124 return NULL; 125 126 tbl->size = nbuckets; 127 |
235 if (alloc_bucket_locks(ht, tbl) < 0) { | 128 if (alloc_bucket_locks(ht, tbl, gfp) < 0) { |
236 bucket_table_free(tbl); 237 return NULL; 238 } 239 | 129 bucket_table_free(tbl); 130 return NULL; 131 } 132 |
133 INIT_LIST_HEAD(&tbl->walkers); 134 135 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); 136 |
|
240 for (i = 0; i < nbuckets; i++) 241 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); 242 243 return tbl; 244} 245 | 137 for (i = 0; i < nbuckets; i++) 138 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); 139 140 return tbl; 141} 142 |
246/** 247 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size 248 * @ht: hash table 249 * @new_size: new table size 250 */ 251static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) | 143static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, 144 struct bucket_table *tbl) |
252{ | 145{ |
253 /* Expand table when exceeding 75% load */ 254 return atomic_read(&ht->nelems) > (new_size / 4 * 3) && 255 (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift); 256} | 146 struct bucket_table *new_tbl; |
257 | 147 |
258/** 259 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size 260 * @ht: hash table 261 * @new_size: new table size 262 */ 263static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) 264{ 265 /* Shrink table beneath 30% load */ 266 return atomic_read(&ht->nelems) < (new_size * 3 / 10) && 267 (atomic_read(&ht->shift) > ht->p.min_shift); 268} | 148 do { 149 new_tbl = tbl; 150 tbl = rht_dereference_rcu(tbl->future_tbl, ht); 151 } while (tbl); |
269 | 152 |
270static void lock_buckets(struct bucket_table *new_tbl, 271 struct bucket_table *old_tbl, unsigned int hash) 272 __acquires(old_bucket_lock) 273{ 274 spin_lock_bh(bucket_lock(old_tbl, hash)); 275 if (new_tbl != old_tbl) 276 spin_lock_bh_nested(bucket_lock(new_tbl, hash), 277 RHT_LOCK_NESTED); | 153 return new_tbl; |
278} 279 | 154} 155 |
280static void unlock_buckets(struct bucket_table *new_tbl, 281 struct bucket_table *old_tbl, unsigned int hash) 282 __releases(old_bucket_lock) | 156static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) |
283{ | 157{ |
284 if (new_tbl != old_tbl) 285 spin_unlock_bh(bucket_lock(new_tbl, hash)); 286 spin_unlock_bh(bucket_lock(old_tbl, hash)); | 158 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 159 struct bucket_table *new_tbl = rhashtable_last_table(ht, 160 rht_dereference_rcu(old_tbl->future_tbl, ht)); 161 struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash]; 162 int err = -ENOENT; 163 struct rhash_head *head, *next, *entry; 164 spinlock_t *new_bucket_lock; 165 unsigned int new_hash; 166 167 rht_for_each(entry, old_tbl, old_hash) { 168 err = 0; 169 next = rht_dereference_bucket(entry->next, old_tbl, old_hash); 170 171 if (rht_is_a_nulls(next)) 172 break; 173 174 pprev = &entry->next; 175 } 176 177 if (err) 178 goto out; 179 180 new_hash = head_hashfn(ht, new_tbl, entry); 181 182 new_bucket_lock = rht_bucket_lock(new_tbl, new_hash); 183 184 spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING); 185 head = rht_dereference_bucket(new_tbl->buckets[new_hash], 186 new_tbl, new_hash); 187 188 if (rht_is_a_nulls(head)) 189 INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash); 190 else 191 RCU_INIT_POINTER(entry->next, head); 192 193 rcu_assign_pointer(new_tbl->buckets[new_hash], entry); 194 spin_unlock(new_bucket_lock); 195 196 rcu_assign_pointer(*pprev, next); 197 198out: 199 return err; |
287} 288 | 200} 201 |
289/** 290 * Unlink entries on bucket which hash to different bucket. 291 * 292 * Returns true if no more work needs to be performed on the bucket. 293 */ 294static bool hashtable_chain_unzip(struct rhashtable *ht, 295 const struct bucket_table *new_tbl, 296 struct bucket_table *old_tbl, 297 size_t old_hash) | 202static void rhashtable_rehash_chain(struct rhashtable *ht, 203 unsigned int old_hash) |
298{ | 204{ |
299 struct rhash_head *he, *p, *next; 300 unsigned int new_hash, new_hash2; | 205 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 206 spinlock_t *old_bucket_lock; |
301 | 207 |
302 ASSERT_BUCKET_LOCK(ht, old_tbl, old_hash); | 208 old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); |
303 | 209 |
304 /* Old bucket empty, no work needed. */ 305 p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, 306 old_hash); 307 if (rht_is_a_nulls(p)) 308 return false; | 210 spin_lock_bh(old_bucket_lock); 211 while (!rhashtable_rehash_one(ht, old_hash)) 212 ; 213 old_tbl->rehash++; 214 spin_unlock_bh(old_bucket_lock); 215} |
309 | 216 |
310 new_hash = head_hashfn(ht, new_tbl, p); 311 ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash); | 217static int rhashtable_rehash_attach(struct rhashtable *ht, 218 struct bucket_table *old_tbl, 219 struct bucket_table *new_tbl) 220{ 221 /* Protect future_tbl using the first bucket lock. */ 222 spin_lock_bh(old_tbl->locks); |
312 | 223 |
313 /* Advance the old bucket pointer one or more times until it 314 * reaches a node that doesn't hash to the same bucket as the 315 * previous node p. Call the previous node p; 316 */ 317 rht_for_each_continue(he, p->next, old_tbl, old_hash) { 318 new_hash2 = head_hashfn(ht, new_tbl, he); 319 ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash2); 320 321 if (new_hash != new_hash2) 322 break; 323 p = he; | 224 /* Did somebody beat us to it? */ 225 if (rcu_access_pointer(old_tbl->future_tbl)) { 226 spin_unlock_bh(old_tbl->locks); 227 return -EEXIST; |
324 } | 228 } |
325 rcu_assign_pointer(old_tbl->buckets[old_hash], p->next); | |
326 | 229 |
327 /* Find the subsequent node which does hash to the same 328 * bucket as node P, or NULL if no such node exists. | 230 /* Make insertions go into the new, empty table right away. Deletions 231 * and lookups will be attempted in both tables until we synchronize. |
329 */ | 232 */ |
330 INIT_RHT_NULLS_HEAD(next, ht, old_hash); 331 if (!rht_is_a_nulls(he)) { 332 rht_for_each_continue(he, he->next, old_tbl, old_hash) { 333 if (head_hashfn(ht, new_tbl, he) == new_hash) { 334 next = he; 335 break; 336 } 337 } 338 } | 233 rcu_assign_pointer(old_tbl->future_tbl, new_tbl); |
339 | 234 |
340 /* Set p's next pointer to that subsequent node pointer, 341 * bypassing the nodes which do not hash to p's bucket 342 */ 343 rcu_assign_pointer(p->next, next); | 235 /* Ensure the new table is visible to readers. */ 236 smp_wmb(); |
344 | 237 |
345 p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, 346 old_hash); | 238 spin_unlock_bh(old_tbl->locks); |
347 | 239 |
348 return !rht_is_a_nulls(p); | 240 return 0; |
349} 350 | 241} 242 |
351static void link_old_to_new(struct rhashtable *ht, struct bucket_table *new_tbl, 352 unsigned int new_hash, struct rhash_head *entry) | 243static int rhashtable_rehash_table(struct rhashtable *ht) |
353{ | 244{ |
354 ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash); | 245 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 246 struct bucket_table *new_tbl; 247 struct rhashtable_walker *walker; 248 unsigned int old_hash; |
355 | 249 |
356 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry); | 250 new_tbl = rht_dereference(old_tbl->future_tbl, ht); 251 if (!new_tbl) 252 return 0; 253 254 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) 255 rhashtable_rehash_chain(ht, old_hash); 256 257 /* Publish the new table pointer. */ 258 rcu_assign_pointer(ht->tbl, new_tbl); 259 260 spin_lock(&ht->lock); 261 list_for_each_entry(walker, &old_tbl->walkers, list) 262 walker->tbl = NULL; 263 spin_unlock(&ht->lock); 264 265 /* Wait for readers. All new readers will see the new 266 * table, and thus no references to the old table will 267 * remain. 268 */ 269 call_rcu(&old_tbl->rcu, bucket_table_free_rcu); 270 271 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; |
357} 358 359/** 360 * rhashtable_expand - Expand hash table while allowing concurrent lookups 361 * @ht: the hash table to expand 362 * | 272} 273 274/** 275 * rhashtable_expand - Expand hash table while allowing concurrent lookups 276 * @ht: the hash table to expand 277 * |
363 * A secondary bucket array is allocated and the hash entries are migrated 364 * while keeping them on both lists until the end of the RCU grace period. | 278 * A secondary bucket array is allocated and the hash entries are migrated. |
365 * 366 * This function may only be called in a context where it is safe to call 367 * synchronize_rcu(), e.g. not within a rcu_read_lock() section. 368 * 369 * The caller must ensure that no concurrent resizing occurs by holding 370 * ht->mutex. 371 * 372 * It is valid to have concurrent insertions and deletions protected by per 373 * bucket locks or concurrent RCU protected lookups and traversals. 374 */ | 279 * 280 * This function may only be called in a context where it is safe to call 281 * synchronize_rcu(), e.g. not within a rcu_read_lock() section. 282 * 283 * The caller must ensure that no concurrent resizing occurs by holding 284 * ht->mutex. 285 * 286 * It is valid to have concurrent insertions and deletions protected by per 287 * bucket locks or concurrent RCU protected lookups and traversals. 288 */ |
375int rhashtable_expand(struct rhashtable *ht) | 289static int rhashtable_expand(struct rhashtable *ht) |
376{ 377 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); | 290{ 291 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); |
378 struct rhash_head *he; 379 unsigned int new_hash, old_hash; 380 bool complete = false; | 292 int err; |
381 382 ASSERT_RHT_MUTEX(ht); 383 | 293 294 ASSERT_RHT_MUTEX(ht); 295 |
384 new_tbl = bucket_table_alloc(ht, old_tbl->size * 2); | 296 old_tbl = rhashtable_last_table(ht, old_tbl); 297 298 new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL); |
385 if (new_tbl == NULL) 386 return -ENOMEM; 387 | 299 if (new_tbl == NULL) 300 return -ENOMEM; 301 |
388 atomic_inc(&ht->shift); | 302 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); 303 if (err) 304 bucket_table_free(new_tbl); |
389 | 305 |
390 /* Make insertions go into the new, empty table right away. Deletions 391 * and lookups will be attempted in both tables until we synchronize. 392 * The synchronize_rcu() guarantees for the new table to be picked up 393 * so no new additions go into the old table while we relink. 394 */ 395 rcu_assign_pointer(ht->future_tbl, new_tbl); 396 synchronize_rcu(); 397 398 /* For each new bucket, search the corresponding old bucket for the 399 * first entry that hashes to the new bucket, and link the end of 400 * newly formed bucket chain (containing entries added to future 401 * table) to that entry. Since all the entries which will end up in 402 * the new bucket appear in the same old bucket, this constructs an 403 * entirely valid new hash table, but with multiple buckets 404 * "zipped" together into a single imprecise chain. 405 */ 406 for (new_hash = 0; new_hash < new_tbl->size; new_hash++) { 407 old_hash = rht_bucket_index(old_tbl, new_hash); 408 lock_buckets(new_tbl, old_tbl, new_hash); 409 rht_for_each(he, old_tbl, old_hash) { 410 if (head_hashfn(ht, new_tbl, he) == new_hash) { 411 link_old_to_new(ht, new_tbl, new_hash, he); 412 break; 413 } 414 } 415 unlock_buckets(new_tbl, old_tbl, new_hash); 416 cond_resched(); 417 } 418 419 /* Unzip interleaved hash chains */ 420 while (!complete && !ht->being_destroyed) { 421 /* Wait for readers. All new readers will see the new 422 * table, and thus no references to the old table will 423 * remain. 424 */ 425 synchronize_rcu(); 426 427 /* For each bucket in the old table (each of which 428 * contains items from multiple buckets of the new 429 * table): ... 430 */ 431 complete = true; 432 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { 433 lock_buckets(new_tbl, old_tbl, old_hash); 434 435 if (hashtable_chain_unzip(ht, new_tbl, old_tbl, 436 old_hash)) 437 complete = false; 438 439 unlock_buckets(new_tbl, old_tbl, old_hash); 440 cond_resched(); 441 } 442 } 443 444 rcu_assign_pointer(ht->tbl, new_tbl); 445 synchronize_rcu(); 446 447 bucket_table_free(old_tbl); 448 return 0; | 306 return err; |
449} | 307} |
450EXPORT_SYMBOL_GPL(rhashtable_expand); | |
451 452/** 453 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups 454 * @ht: the hash table to shrink 455 * | 308 309/** 310 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups 311 * @ht: the hash table to shrink 312 * |
456 * This function may only be called in a context where it is safe to call 457 * synchronize_rcu(), e.g. not within a rcu_read_lock() section. | 313 * This function shrinks the hash table to fit, i.e., the smallest 314 * size would not cause it to expand right away automatically. |
458 * 459 * The caller must ensure that no concurrent resizing occurs by holding 460 * ht->mutex. 461 * 462 * The caller must ensure that no concurrent table mutations take place. 463 * It is however valid to have concurrent lookups if they are RCU protected. 464 * 465 * It is valid to have concurrent insertions and deletions protected by per 466 * bucket locks or concurrent RCU protected lookups and traversals. 467 */ | 315 * 316 * The caller must ensure that no concurrent resizing occurs by holding 317 * ht->mutex. 318 * 319 * The caller must ensure that no concurrent table mutations take place. 320 * It is however valid to have concurrent lookups if they are RCU protected. 321 * 322 * It is valid to have concurrent insertions and deletions protected by per 323 * bucket locks or concurrent RCU protected lookups and traversals. 324 */ |
468int rhashtable_shrink(struct rhashtable *ht) | 325static int rhashtable_shrink(struct rhashtable *ht) |
469{ | 326{ |
470 struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht); 471 unsigned int new_hash; | 327 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); 328 unsigned int size; 329 int err; |
472 473 ASSERT_RHT_MUTEX(ht); 474 | 330 331 ASSERT_RHT_MUTEX(ht); 332 |
475 new_tbl = bucket_table_alloc(ht, tbl->size / 2); 476 if (new_tbl == NULL) 477 return -ENOMEM; | 333 size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2); 334 if (size < ht->p.min_size) 335 size = ht->p.min_size; |
478 | 336 |
479 rcu_assign_pointer(ht->future_tbl, new_tbl); 480 synchronize_rcu(); | 337 if (old_tbl->size <= size) 338 return 0; |
481 | 339 |
482 /* Link the first entry in the old bucket to the end of the 483 * bucket in the new table. As entries are concurrently being 484 * added to the new table, lock down the new bucket. As we 485 * always divide the size in half when shrinking, each bucket 486 * in the new table maps to exactly two buckets in the old 487 * table. 488 */ 489 for (new_hash = 0; new_hash < new_tbl->size; new_hash++) { 490 lock_buckets(new_tbl, tbl, new_hash); | 340 if (rht_dereference(old_tbl->future_tbl, ht)) 341 return -EEXIST; |
491 | 342 |
492 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), 493 tbl->buckets[new_hash]); 494 ASSERT_BUCKET_LOCK(ht, tbl, new_hash + new_tbl->size); 495 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), 496 tbl->buckets[new_hash + new_tbl->size]); | 343 new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); 344 if (new_tbl == NULL) 345 return -ENOMEM; |
497 | 346 |
498 unlock_buckets(new_tbl, tbl, new_hash); 499 cond_resched(); 500 } | 347 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); 348 if (err) 349 bucket_table_free(new_tbl); |
501 | 350 |
502 /* Publish the new, valid hash table */ 503 rcu_assign_pointer(ht->tbl, new_tbl); 504 atomic_dec(&ht->shift); 505 506 /* Wait for readers. No new readers will have references to the 507 * old hash table. 508 */ 509 synchronize_rcu(); 510 511 bucket_table_free(tbl); 512 513 return 0; | 351 return err; |
514} | 352} |
515EXPORT_SYMBOL_GPL(rhashtable_shrink); | |
516 517static void rht_deferred_worker(struct work_struct *work) 518{ 519 struct rhashtable *ht; 520 struct bucket_table *tbl; | 353 354static void rht_deferred_worker(struct work_struct *work) 355{ 356 struct rhashtable *ht; 357 struct bucket_table *tbl; |
521 struct rhashtable_walker *walker; | 358 int err = 0; |
522 523 ht = container_of(work, struct rhashtable, run_work); 524 mutex_lock(&ht->mutex); | 359 360 ht = container_of(work, struct rhashtable, run_work); 361 mutex_lock(&ht->mutex); |
525 if (ht->being_destroyed) 526 goto unlock; | |
527 528 tbl = rht_dereference(ht->tbl, ht); | 362 363 tbl = rht_dereference(ht->tbl, ht); |
364 tbl = rhashtable_last_table(ht, tbl); |
|
529 | 365 |
530 list_for_each_entry(walker, &ht->walkers, list) 531 walker->resize = true; 532 533 if (rht_grow_above_75(ht, tbl->size)) | 366 if (rht_grow_above_75(ht, tbl)) |
534 rhashtable_expand(ht); | 367 rhashtable_expand(ht); |
535 else if (rht_shrink_below_30(ht, tbl->size)) | 368 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) |
536 rhashtable_shrink(ht); | 369 rhashtable_shrink(ht); |
537unlock: 538 mutex_unlock(&ht->mutex); 539} | |
540 | 370 |
541static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, 542 struct bucket_table *tbl, 543 const struct bucket_table *old_tbl, u32 hash) 544{ 545 bool no_resize_running = tbl == old_tbl; 546 struct rhash_head *head; | 371 err = rhashtable_rehash_table(ht); |
547 | 372 |
548 hash = rht_bucket_index(tbl, hash); 549 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); | 373 mutex_unlock(&ht->mutex); |
550 | 374 |
551 ASSERT_BUCKET_LOCK(ht, tbl, hash); 552 553 if (rht_is_a_nulls(head)) 554 INIT_RHT_NULLS_HEAD(obj->next, ht, hash); 555 else 556 RCU_INIT_POINTER(obj->next, head); 557 558 rcu_assign_pointer(tbl->buckets[hash], obj); 559 560 atomic_inc(&ht->nelems); 561 if (no_resize_running && rht_grow_above_75(ht, tbl->size)) | 375 if (err) |
562 schedule_work(&ht->run_work); 563} 564 | 376 schedule_work(&ht->run_work); 377} 378 |
565/** 566 * rhashtable_insert - insert object into hash table 567 * @ht: hash table 568 * @obj: pointer to hash head inside object 569 * 570 * Will take a per bucket spinlock to protect against mutual mutations 571 * on the same bucket. Multiple insertions may occur in parallel unless 572 * they map to the same bucket lock. 573 * 574 * It is safe to call this function from atomic context. 575 * 576 * Will trigger an automatic deferred table resizing if the size grows 577 * beyond the watermark indicated by grow_decision() which can be passed 578 * to rhashtable_init(). 579 */ 580void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) | 379static bool rhashtable_check_elasticity(struct rhashtable *ht, 380 struct bucket_table *tbl, 381 unsigned int hash) |
581{ | 382{ |
582 struct bucket_table *tbl, *old_tbl; 583 unsigned hash; | 383 unsigned int elasticity = ht->elasticity; 384 struct rhash_head *head; |
584 | 385 |
585 rcu_read_lock(); | 386 rht_for_each(head, tbl, hash) 387 if (!--elasticity) 388 return true; |
586 | 389 |
587 tbl = rht_dereference_rcu(ht->future_tbl, ht); 588 old_tbl = rht_dereference_rcu(ht->tbl, ht); 589 hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); 590 591 lock_buckets(tbl, old_tbl, hash); 592 __rhashtable_insert(ht, obj, tbl, old_tbl, hash); 593 unlock_buckets(tbl, old_tbl, hash); 594 595 rcu_read_unlock(); | 390 return false; |
596} | 391} |
597EXPORT_SYMBOL_GPL(rhashtable_insert); | |
598 | 392 |
599/** 600 * rhashtable_remove - remove object from hash table 601 * @ht: hash table 602 * @obj: pointer to hash head inside object 603 * 604 * Since the hash chain is single linked, the removal operation needs to 605 * walk the bucket chain upon removal. The removal operation is thus 606 * considerable slow if the hash table is not correctly sized. 607 * 608 * Will automatically shrink the table via rhashtable_expand() if the 609 * shrink_decision function specified at rhashtable_init() returns true. 610 * 611 * The caller must ensure that no concurrent table mutations occur. It is 612 * however valid to have concurrent lookups if they are RCU protected. 613 */ 614bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) | 393int rhashtable_insert_rehash(struct rhashtable *ht) |
615{ | 394{ |
616 struct bucket_table *tbl, *new_tbl, *old_tbl; 617 struct rhash_head __rcu **pprev; 618 struct rhash_head *he, *he2; 619 unsigned int hash, new_hash; 620 bool ret = false; | 395 struct bucket_table *old_tbl; 396 struct bucket_table *new_tbl; 397 struct bucket_table *tbl; 398 unsigned int size; 399 int err; |
621 | 400 |
622 rcu_read_lock(); | |
623 old_tbl = rht_dereference_rcu(ht->tbl, ht); | 401 old_tbl = rht_dereference_rcu(ht->tbl, ht); |
624 tbl = new_tbl = rht_dereference_rcu(ht->future_tbl, ht); 625 new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); | 402 tbl = rhashtable_last_table(ht, old_tbl); |
626 | 403 |
627 lock_buckets(new_tbl, old_tbl, new_hash); 628restart: 629 hash = rht_bucket_index(tbl, new_hash); 630 pprev = &tbl->buckets[hash]; 631 rht_for_each(he, tbl, hash) { 632 if (he != obj) { 633 pprev = &he->next; 634 continue; 635 } | 404 size = tbl->size; |
636 | 405 |
637 ASSERT_BUCKET_LOCK(ht, tbl, hash); | 406 if (rht_grow_above_75(ht, tbl)) 407 size *= 2; 408 /* More than two rehashes (not resizes) detected. */ 409 else if (WARN_ON(old_tbl != tbl && old_tbl->size == size)) 410 return -EBUSY; |
638 | 411 |
639 if (old_tbl->size > new_tbl->size && tbl == old_tbl && 640 !rht_is_a_nulls(obj->next) && 641 head_hashfn(ht, tbl, obj->next) != hash) { 642 rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash)); 643 } else if (unlikely(old_tbl->size < new_tbl->size && tbl == new_tbl)) { 644 rht_for_each_continue(he2, obj->next, tbl, hash) { 645 if (head_hashfn(ht, tbl, he2) == hash) { 646 rcu_assign_pointer(*pprev, he2); 647 goto found; 648 } 649 } | 412 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); 413 if (new_tbl == NULL) 414 return -ENOMEM; |
650 | 415 |
651 rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash)); 652 } else { 653 rcu_assign_pointer(*pprev, obj->next); 654 } | 416 err = rhashtable_rehash_attach(ht, tbl, new_tbl); 417 if (err) { 418 bucket_table_free(new_tbl); 419 if (err == -EEXIST) 420 err = 0; 421 } else 422 schedule_work(&ht->run_work); |
655 | 423 |
656found: 657 ret = true; 658 break; 659 } 660 661 /* The entry may be linked in either 'tbl', 'future_tbl', or both. 662 * 'future_tbl' only exists for a short period of time during 663 * resizing. Thus traversing both is fine and the added cost is 664 * very rare. 665 */ 666 if (tbl != old_tbl) { 667 tbl = old_tbl; 668 goto restart; 669 } 670 671 unlock_buckets(new_tbl, old_tbl, new_hash); 672 673 if (ret) { 674 bool no_resize_running = new_tbl == old_tbl; 675 676 atomic_dec(&ht->nelems); 677 if (no_resize_running && rht_shrink_below_30(ht, new_tbl->size)) 678 schedule_work(&ht->run_work); 679 } 680 681 rcu_read_unlock(); 682 683 return ret; | 424 return err; |
684} | 425} |
685EXPORT_SYMBOL_GPL(rhashtable_remove); | 426EXPORT_SYMBOL_GPL(rhashtable_insert_rehash); |
686 | 427 |
687struct rhashtable_compare_arg { 688 struct rhashtable *ht; 689 const void *key; 690}; 691 692static bool rhashtable_compare(void *ptr, void *arg) | 428int rhashtable_insert_slow(struct rhashtable *ht, const void *key, 429 struct rhash_head *obj, 430 struct bucket_table *tbl) |
693{ | 431{ |
694 struct rhashtable_compare_arg *x = arg; 695 struct rhashtable *ht = x->ht; | 432 struct rhash_head *head; 433 unsigned int hash; 434 int err; |
696 | 435 |
697 return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len); 698} | 436 tbl = rhashtable_last_table(ht, tbl); 437 hash = head_hashfn(ht, tbl, obj); 438 spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING); |
699 | 439 |
700/** 701 * rhashtable_lookup - lookup key in hash table 702 * @ht: hash table 703 * @key: pointer to key 704 * 705 * Computes the hash value for the key and traverses the bucket chain looking 706 * for a entry with an identical key. The first matching entry is returned. 707 * 708 * This lookup function may only be used for fixed key hash table (key_len 709 * parameter set). It will BUG() if used inappropriately. 710 * 711 * Lookups may occur in parallel with hashtable mutations and resizing. 712 */ 713void *rhashtable_lookup(struct rhashtable *ht, const void *key) 714{ 715 struct rhashtable_compare_arg arg = { 716 .ht = ht, 717 .key = key, 718 }; | 440 err = -EEXIST; 441 if (key && rhashtable_lookup_fast(ht, key, ht->p)) 442 goto exit; |
719 | 443 |
720 BUG_ON(!ht->p.key_len); | 444 err = -EAGAIN; 445 if (rhashtable_check_elasticity(ht, tbl, hash) || 446 rht_grow_above_100(ht, tbl)) 447 goto exit; |
721 | 448 |
722 return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg); 723} 724EXPORT_SYMBOL_GPL(rhashtable_lookup); | 449 err = 0; |
725 | 450 |
726/** 727 * rhashtable_lookup_compare - search hash table with compare function 728 * @ht: hash table 729 * @key: the pointer to the key 730 * @compare: compare function, must return true on match 731 * @arg: argument passed on to compare function 732 * 733 * Traverses the bucket chain behind the provided hash value and calls the 734 * specified compare function for each entry. 735 * 736 * Lookups may occur in parallel with hashtable mutations and resizing. 737 * 738 * Returns the first entry on which the compare function returned true. 739 */ 740void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key, 741 bool (*compare)(void *, void *), void *arg) 742{ 743 const struct bucket_table *tbl, *old_tbl; 744 struct rhash_head *he; 745 u32 hash; | 451 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); |
746 | 452 |
747 rcu_read_lock(); | 453 RCU_INIT_POINTER(obj->next, head); |
748 | 454 |
749 old_tbl = rht_dereference_rcu(ht->tbl, ht); 750 tbl = rht_dereference_rcu(ht->future_tbl, ht); 751 hash = key_hashfn(ht, key, ht->p.key_len); 752restart: 753 rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) { 754 if (!compare(rht_obj(ht, he), arg)) 755 continue; 756 rcu_read_unlock(); 757 return rht_obj(ht, he); 758 } | 455 rcu_assign_pointer(tbl->buckets[hash], obj); |
759 | 456 |
760 if (unlikely(tbl != old_tbl)) { 761 tbl = old_tbl; 762 goto restart; 763 } 764 rcu_read_unlock(); | 457 atomic_inc(&ht->nelems); |
765 | 458 |
766 return NULL; 767} 768EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); 769 770/** 771 * rhashtable_lookup_insert - lookup and insert object into hash table 772 * @ht: hash table 773 * @obj: pointer to hash head inside object 774 * 775 * Locks down the bucket chain in both the old and new table if a resize 776 * is in progress to ensure that writers can't remove from the old table 777 * and can't insert to the new table during the atomic operation of search 778 * and insertion. Searches for duplicates in both the old and new table if 779 * a resize is in progress. 780 * 781 * This lookup function may only be used for fixed key hash table (key_len 782 * parameter set). It will BUG() if used inappropriately. 783 * 784 * It is safe to call this function from atomic context. 785 * 786 * Will trigger an automatic deferred table resizing if the size grows 787 * beyond the watermark indicated by grow_decision() which can be passed 788 * to rhashtable_init(). 789 */ 790bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj) 791{ 792 struct rhashtable_compare_arg arg = { 793 .ht = ht, 794 .key = rht_obj(ht, obj) + ht->p.key_offset, 795 }; 796 797 BUG_ON(!ht->p.key_len); 798 799 return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare, 800 &arg); 801} 802EXPORT_SYMBOL_GPL(rhashtable_lookup_insert); 803 804/** 805 * rhashtable_lookup_compare_insert - search and insert object to hash table 806 * with compare function 807 * @ht: hash table 808 * @obj: pointer to hash head inside object 809 * @compare: compare function, must return true on match 810 * @arg: argument passed on to compare function 811 * 812 * Locks down the bucket chain in both the old and new table if a resize 813 * is in progress to ensure that writers can't remove from the old table 814 * and can't insert to the new table during the atomic operation of search 815 * and insertion. Searches for duplicates in both the old and new table if 816 * a resize is in progress. 817 * 818 * Lookups may occur in parallel with hashtable mutations and resizing. 819 * 820 * Will trigger an automatic deferred table resizing if the size grows 821 * beyond the watermark indicated by grow_decision() which can be passed 822 * to rhashtable_init(). 823 */ 824bool rhashtable_lookup_compare_insert(struct rhashtable *ht, 825 struct rhash_head *obj, 826 bool (*compare)(void *, void *), 827 void *arg) 828{ 829 struct bucket_table *new_tbl, *old_tbl; 830 u32 new_hash; 831 bool success = true; 832 833 BUG_ON(!ht->p.key_len); 834 835 rcu_read_lock(); 836 old_tbl = rht_dereference_rcu(ht->tbl, ht); 837 new_tbl = rht_dereference_rcu(ht->future_tbl, ht); 838 new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); 839 840 lock_buckets(new_tbl, old_tbl, new_hash); 841 842 if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset, 843 compare, arg)) { 844 success = false; 845 goto exit; 846 } 847 848 __rhashtable_insert(ht, obj, new_tbl, old_tbl, new_hash); 849 | |
850exit: | 459exit: |
851 unlock_buckets(new_tbl, old_tbl, new_hash); 852 rcu_read_unlock(); | 460 spin_unlock(rht_bucket_lock(tbl, hash)); |
853 | 461 |
854 return success; | 462 return err; |
855} | 463} |
856EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert); | 464EXPORT_SYMBOL_GPL(rhashtable_insert_slow); |
857 858/** 859 * rhashtable_walk_init - Initialise an iterator 860 * @ht: Table to walk over 861 * @iter: Hash table Iterator 862 * 863 * This function prepares a hash table walk. 864 * --- 17 unchanged lines hidden (view full) --- 882 iter->p = NULL; 883 iter->slot = 0; 884 iter->skip = 0; 885 886 iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL); 887 if (!iter->walker) 888 return -ENOMEM; 889 | 465 466/** 467 * rhashtable_walk_init - Initialise an iterator 468 * @ht: Table to walk over 469 * @iter: Hash table Iterator 470 * 471 * This function prepares a hash table walk. 472 * --- 17 unchanged lines hidden (view full) --- 490 iter->p = NULL; 491 iter->slot = 0; 492 iter->skip = 0; 493 494 iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL); 495 if (!iter->walker) 496 return -ENOMEM; 497 |
890 INIT_LIST_HEAD(&iter->walker->list); 891 iter->walker->resize = false; 892 | |
893 mutex_lock(&ht->mutex); | 498 mutex_lock(&ht->mutex); |
894 list_add(&iter->walker->list, &ht->walkers); | 499 iter->walker->tbl = rht_dereference(ht->tbl, ht); 500 list_add(&iter->walker->list, &iter->walker->tbl->walkers); |
895 mutex_unlock(&ht->mutex); 896 897 return 0; 898} 899EXPORT_SYMBOL_GPL(rhashtable_walk_init); 900 901/** 902 * rhashtable_walk_exit - Free an iterator 903 * @iter: Hash table Iterator 904 * 905 * This function frees resources allocated by rhashtable_walk_init. 906 */ 907void rhashtable_walk_exit(struct rhashtable_iter *iter) 908{ 909 mutex_lock(&iter->ht->mutex); | 501 mutex_unlock(&ht->mutex); 502 503 return 0; 504} 505EXPORT_SYMBOL_GPL(rhashtable_walk_init); 506 507/** 508 * rhashtable_walk_exit - Free an iterator 509 * @iter: Hash table Iterator 510 * 511 * This function frees resources allocated by rhashtable_walk_init. 512 */ 513void rhashtable_walk_exit(struct rhashtable_iter *iter) 514{ 515 mutex_lock(&iter->ht->mutex); |
910 list_del(&iter->walker->list); | 516 if (iter->walker->tbl) 517 list_del(&iter->walker->list); |
911 mutex_unlock(&iter->ht->mutex); 912 kfree(iter->walker); 913} 914EXPORT_SYMBOL_GPL(rhashtable_walk_exit); 915 916/** 917 * rhashtable_walk_start - Start a hash table walk 918 * @iter: Hash table iterator --- 4 unchanged lines hidden (view full) --- 923 * 924 * Returns zero if successful. 925 * 926 * Returns -EAGAIN if resize event occured. Note that the iterator 927 * will rewind back to the beginning and you may use it immediately 928 * by calling rhashtable_walk_next. 929 */ 930int rhashtable_walk_start(struct rhashtable_iter *iter) | 518 mutex_unlock(&iter->ht->mutex); 519 kfree(iter->walker); 520} 521EXPORT_SYMBOL_GPL(rhashtable_walk_exit); 522 523/** 524 * rhashtable_walk_start - Start a hash table walk 525 * @iter: Hash table iterator --- 4 unchanged lines hidden (view full) --- 530 * 531 * Returns zero if successful. 532 * 533 * Returns -EAGAIN if resize event occured. Note that the iterator 534 * will rewind back to the beginning and you may use it immediately 535 * by calling rhashtable_walk_next. 536 */ 537int rhashtable_walk_start(struct rhashtable_iter *iter) |
538 __acquires(RCU) |
|
931{ | 539{ |
540 struct rhashtable *ht = iter->ht; 541 542 mutex_lock(&ht->mutex); 543 544 if (iter->walker->tbl) 545 list_del(&iter->walker->list); 546 |
|
932 rcu_read_lock(); 933 | 547 rcu_read_lock(); 548 |
934 if (iter->walker->resize) { 935 iter->slot = 0; 936 iter->skip = 0; 937 iter->walker->resize = false; | 549 mutex_unlock(&ht->mutex); 550 551 if (!iter->walker->tbl) { 552 iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); |
938 return -EAGAIN; 939 } 940 941 return 0; 942} 943EXPORT_SYMBOL_GPL(rhashtable_walk_start); 944 945/** --- 5 unchanged lines hidden (view full) --- 951 * 952 * Returns the next object or NULL when the end of the table is reached. 953 * 954 * Returns -EAGAIN if resize event occured. Note that the iterator 955 * will rewind back to the beginning and you may continue to use it. 956 */ 957void *rhashtable_walk_next(struct rhashtable_iter *iter) 958{ | 553 return -EAGAIN; 554 } 555 556 return 0; 557} 558EXPORT_SYMBOL_GPL(rhashtable_walk_start); 559 560/** --- 5 unchanged lines hidden (view full) --- 566 * 567 * Returns the next object or NULL when the end of the table is reached. 568 * 569 * Returns -EAGAIN if resize event occured. Note that the iterator 570 * will rewind back to the beginning and you may continue to use it. 571 */ 572void *rhashtable_walk_next(struct rhashtable_iter *iter) 573{ |
959 const struct bucket_table *tbl; | 574 struct bucket_table *tbl = iter->walker->tbl; |
960 struct rhashtable *ht = iter->ht; 961 struct rhash_head *p = iter->p; 962 void *obj = NULL; 963 | 575 struct rhashtable *ht = iter->ht; 576 struct rhash_head *p = iter->p; 577 void *obj = NULL; 578 |
964 tbl = rht_dereference_rcu(ht->tbl, ht); 965 | |
966 if (p) { 967 p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot); 968 goto next; 969 } 970 971 for (; iter->slot < tbl->size; iter->slot++) { 972 int skip = iter->skip; 973 --- 9 unchanged lines hidden (view full) --- 983 iter->p = p; 984 obj = rht_obj(ht, p); 985 goto out; 986 } 987 988 iter->skip = 0; 989 } 990 | 579 if (p) { 580 p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot); 581 goto next; 582 } 583 584 for (; iter->slot < tbl->size; iter->slot++) { 585 int skip = iter->skip; 586 --- 9 unchanged lines hidden (view full) --- 596 iter->p = p; 597 obj = rht_obj(ht, p); 598 goto out; 599 } 600 601 iter->skip = 0; 602 } 603 |
991 iter->p = NULL; | 604 /* Ensure we see any new tables. */ 605 smp_rmb(); |
992 | 606 |
993out: 994 if (iter->walker->resize) { 995 iter->p = NULL; | 607 iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht); 608 if (iter->walker->tbl) { |
996 iter->slot = 0; 997 iter->skip = 0; | 609 iter->slot = 0; 610 iter->skip = 0; |
998 iter->walker->resize = false; | |
999 return ERR_PTR(-EAGAIN); 1000 } 1001 | 611 return ERR_PTR(-EAGAIN); 612 } 613 |
614 iter->p = NULL; 615 616out: 617 |
|
1002 return obj; 1003} 1004EXPORT_SYMBOL_GPL(rhashtable_walk_next); 1005 1006/** 1007 * rhashtable_walk_stop - Finish a hash table walk 1008 * @iter: Hash table iterator 1009 * 1010 * Finish a hash table walk. 1011 */ 1012void rhashtable_walk_stop(struct rhashtable_iter *iter) | 618 return obj; 619} 620EXPORT_SYMBOL_GPL(rhashtable_walk_next); 621 622/** 623 * rhashtable_walk_stop - Finish a hash table walk 624 * @iter: Hash table iterator 625 * 626 * Finish a hash table walk. 627 */ 628void rhashtable_walk_stop(struct rhashtable_iter *iter) |
629 __releases(RCU) |
|
1013{ | 630{ |
1014 rcu_read_unlock(); | 631 struct rhashtable *ht; 632 struct bucket_table *tbl = iter->walker->tbl; 633 634 if (!tbl) 635 goto out; 636 637 ht = iter->ht; 638 639 spin_lock(&ht->lock); 640 if (tbl->rehash < tbl->size) 641 list_add(&iter->walker->list, &tbl->walkers); 642 else 643 iter->walker->tbl = NULL; 644 spin_unlock(&ht->lock); 645 |
1015 iter->p = NULL; | 646 iter->p = NULL; |
647 648out: 649 rcu_read_unlock(); |
|
1016} 1017EXPORT_SYMBOL_GPL(rhashtable_walk_stop); 1018 | 650} 651EXPORT_SYMBOL_GPL(rhashtable_walk_stop); 652 |
1019static size_t rounded_hashtable_size(struct rhashtable_params *params) | 653static size_t rounded_hashtable_size(const struct rhashtable_params *params) |
1020{ 1021 return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), | 654{ 655 return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), |
1022 1UL << params->min_shift); | 656 (unsigned long)params->min_size); |
1023} 1024 | 657} 658 |
659static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) 660{ 661 return jhash2(key, length, seed); 662} 663 |
|
1025/** 1026 * rhashtable_init - initialize a new hash table 1027 * @ht: hash table to be initialized 1028 * @params: configuration parameters 1029 * 1030 * Initializes a new hash table based on the provided configuration 1031 * parameters. A table can be configured either with a variable or 1032 * fixed length key: --- 14 unchanged lines hidden (view full) --- 1047 * }; 1048 * 1049 * Configuration Example 2: Variable length keys 1050 * struct test_obj { 1051 * [...] 1052 * struct rhash_head node; 1053 * }; 1054 * | 664/** 665 * rhashtable_init - initialize a new hash table 666 * @ht: hash table to be initialized 667 * @params: configuration parameters 668 * 669 * Initializes a new hash table based on the provided configuration 670 * parameters. A table can be configured either with a variable or 671 * fixed length key: --- 14 unchanged lines hidden (view full) --- 686 * }; 687 * 688 * Configuration Example 2: Variable length keys 689 * struct test_obj { 690 * [...] 691 * struct rhash_head node; 692 * }; 693 * |
1055 * u32 my_hash_fn(const void *data, u32 seed) | 694 * u32 my_hash_fn(const void *data, u32 len, u32 seed) |
1056 * { 1057 * struct test_obj *obj = data; 1058 * 1059 * return [... hash ...]; 1060 * } 1061 * 1062 * struct rhashtable_params params = { 1063 * .head_offset = offsetof(struct test_obj, node), 1064 * .hashfn = jhash, 1065 * .obj_hashfn = my_hash_fn, 1066 * }; 1067 */ | 695 * { 696 * struct test_obj *obj = data; 697 * 698 * return [... hash ...]; 699 * } 700 * 701 * struct rhashtable_params params = { 702 * .head_offset = offsetof(struct test_obj, node), 703 * .hashfn = jhash, 704 * .obj_hashfn = my_hash_fn, 705 * }; 706 */ |
1068int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) | 707int rhashtable_init(struct rhashtable *ht, 708 const struct rhashtable_params *params) |
1069{ 1070 struct bucket_table *tbl; 1071 size_t size; 1072 1073 size = HASH_DEFAULT_SIZE; 1074 | 709{ 710 struct bucket_table *tbl; 711 size_t size; 712 713 size = HASH_DEFAULT_SIZE; 714 |
1075 if ((params->key_len && !params->hashfn) || 1076 (!params->key_len && !params->obj_hashfn)) | 715 if ((!params->key_len && !params->obj_hashfn) || 716 (params->obj_hashfn && !params->obj_cmpfn)) |
1077 return -EINVAL; 1078 1079 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) 1080 return -EINVAL; 1081 | 717 return -EINVAL; 718 719 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) 720 return -EINVAL; 721 |
1082 params->min_shift = max_t(size_t, params->min_shift, 1083 ilog2(HASH_MIN_SIZE)); 1084 | |
1085 if (params->nelem_hint) 1086 size = rounded_hashtable_size(params); 1087 1088 memset(ht, 0, sizeof(*ht)); 1089 mutex_init(&ht->mutex); | 722 if (params->nelem_hint) 723 size = rounded_hashtable_size(params); 724 725 memset(ht, 0, sizeof(*ht)); 726 mutex_init(&ht->mutex); |
727 spin_lock_init(&ht->lock); |
|
1090 memcpy(&ht->p, params, sizeof(*params)); | 728 memcpy(&ht->p, params, sizeof(*params)); |
1091 INIT_LIST_HEAD(&ht->walkers); | |
1092 | 729 |
730 if (params->min_size) 731 ht->p.min_size = roundup_pow_of_two(params->min_size); 732 733 if (params->max_size) 734 ht->p.max_size = rounddown_pow_of_two(params->max_size); 735 736 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); 737 738 /* The maximum (not average) chain length grows with the 739 * size of the hash table, at a rate of (log N)/(log log N). 740 * The value of 16 is selected so that even if the hash 741 * table grew to 2^32 you would not expect the maximum 742 * chain length to exceed it unless we are under attack 743 * (or extremely unlucky). 744 * 745 * As this limit is only to detect attacks, we don't need 746 * to set it to a lower value as you'd need the chain 747 * length to vastly exceed 16 to have any real effect 748 * on the system. 749 */ 750 if (!params->insecure_elasticity) 751 ht->elasticity = 16; 752 |
|
1093 if (params->locks_mul) 1094 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); 1095 else 1096 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; 1097 | 753 if (params->locks_mul) 754 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); 755 else 756 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; 757 |
1098 tbl = bucket_table_alloc(ht, size); | 758 ht->key_len = ht->p.key_len; 759 if (!params->hashfn) { 760 ht->p.hashfn = jhash; 761 762 if (!(ht->key_len & (sizeof(u32) - 1))) { 763 ht->key_len /= sizeof(u32); 764 ht->p.hashfn = rhashtable_jhash2; 765 } 766 } 767 768 tbl = bucket_table_alloc(ht, size, GFP_KERNEL); |
1099 if (tbl == NULL) 1100 return -ENOMEM; 1101 1102 atomic_set(&ht->nelems, 0); | 769 if (tbl == NULL) 770 return -ENOMEM; 771 772 atomic_set(&ht->nelems, 0); |
1103 atomic_set(&ht->shift, ilog2(tbl->size)); | 773 |
1104 RCU_INIT_POINTER(ht->tbl, tbl); | 774 RCU_INIT_POINTER(ht->tbl, tbl); |
1105 RCU_INIT_POINTER(ht->future_tbl, tbl); | |
1106 | 775 |
1107 if (!ht->p.hash_rnd) 1108 get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); 1109 | |
1110 INIT_WORK(&ht->run_work, rht_deferred_worker); 1111 1112 return 0; 1113} 1114EXPORT_SYMBOL_GPL(rhashtable_init); 1115 1116/** | 776 INIT_WORK(&ht->run_work, rht_deferred_worker); 777 778 return 0; 779} 780EXPORT_SYMBOL_GPL(rhashtable_init); 781 782/** |
1117 * rhashtable_destroy - destroy hash table | 783 * rhashtable_free_and_destroy - free elements and destroy hash table |
1118 * @ht: the hash table to destroy | 784 * @ht: the hash table to destroy |
785 * @free_fn: callback to release resources of element 786 * @arg: pointer passed to free_fn |
|
1119 * | 787 * |
1120 * Frees the bucket array. This function is not rcu safe, therefore the caller 1121 * has to make sure that no resizing may happen by unpublishing the hashtable 1122 * and waiting for the quiescent cycle before releasing the bucket array. | 788 * Stops an eventual async resize. If defined, invokes free_fn for each 789 * element to releasal resources. Please note that RCU protected 790 * readers may still be accessing the elements. Releasing of resources 791 * must occur in a compatible manner. Then frees the bucket array. 792 * 793 * This function will eventually sleep to wait for an async resize 794 * to complete. The caller is responsible that no further write operations 795 * occurs in parallel. |
1123 */ | 796 */ |
1124void rhashtable_destroy(struct rhashtable *ht) | 797void rhashtable_free_and_destroy(struct rhashtable *ht, 798 void (*free_fn)(void *ptr, void *arg), 799 void *arg) |
1125{ | 800{ |
1126 ht->being_destroyed = true; | 801 const struct bucket_table *tbl; 802 unsigned int i; |
1127 1128 cancel_work_sync(&ht->run_work); 1129 1130 mutex_lock(&ht->mutex); | 803 804 cancel_work_sync(&ht->run_work); 805 806 mutex_lock(&ht->mutex); |
1131 bucket_table_free(rht_dereference(ht->tbl, ht)); | 807 tbl = rht_dereference(ht->tbl, ht); 808 if (free_fn) { 809 for (i = 0; i < tbl->size; i++) { 810 struct rhash_head *pos, *next; 811 812 for (pos = rht_dereference(tbl->buckets[i], ht), 813 next = !rht_is_a_nulls(pos) ? 814 rht_dereference(pos->next, ht) : NULL; 815 !rht_is_a_nulls(pos); 816 pos = next, 817 next = !rht_is_a_nulls(pos) ? 818 rht_dereference(pos->next, ht) : NULL) 819 free_fn(rht_obj(ht, pos), arg); 820 } 821 } 822 823 bucket_table_free(tbl); |
1132 mutex_unlock(&ht->mutex); 1133} | 824 mutex_unlock(&ht->mutex); 825} |
826EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); 827 828void rhashtable_destroy(struct rhashtable *ht) 829{ 830 return rhashtable_free_and_destroy(ht, NULL, NULL); 831} |
|
1134EXPORT_SYMBOL_GPL(rhashtable_destroy); | 832EXPORT_SYMBOL_GPL(rhashtable_destroy); |