1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Resizable, Scalable, Concurrent Hash Table 4 * 5 * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au> 6 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> 7 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> 8 * 9 * Code partially derived from nft_hash 10 * Rewritten with rehash code from br_multicast plus single list 11 * pointer as suggested by Josh Triplett 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License version 2 as 15 * published by the Free Software Foundation. 16 */ 17 18 #ifndef _LINUX_RHASHTABLE_H 19 #define _LINUX_RHASHTABLE_H 20 21 #include <linux/err.h> 22 #include <linux/errno.h> 23 #include <linux/jhash.h> 24 #include <linux/list_nulls.h> 25 #include <linux/workqueue.h> 26 #include <linux/rculist.h> 27 #include <linux/bit_spinlock.h> 28 29 #include <linux/rhashtable-types.h> 30 /* 31 * Objects in an rhashtable have an embedded struct rhash_head 32 * which is linked into as hash chain from the hash table - or one 33 * of two or more hash tables when the rhashtable is being resized. 34 * The end of the chain is marked with a special nulls marks which has 35 * the least significant bit set but otherwise stores the address of 36 * the hash bucket. This allows us to be be sure we've found the end 37 * of the right list. 38 * The value stored in the hash bucket has BIT(0) used as a lock bit. 39 * This bit must be atomically set before any changes are made to 40 * the chain. To avoid dereferencing this pointer without clearing 41 * the bit first, we use an opaque 'struct rhash_lock_head *' for the 42 * pointer stored in the bucket. This struct needs to be defined so 43 * that rcu_dereference() works on it, but it has no content so a 44 * cast is needed for it to be useful. This ensures it isn't 45 * used by mistake with clearing the lock bit first. 46 */ 47 struct rhash_lock_head {}; 48 49 /* Maximum chain length before rehash 50 * 51 * The maximum (not average) chain length grows with the size of the hash 52 * table, at a rate of (log N)/(log log N). 53 * 54 * The value of 16 is selected so that even if the hash table grew to 55 * 2^32 you would not expect the maximum chain length to exceed it 56 * unless we are under attack (or extremely unlucky). 57 * 58 * As this limit is only to detect attacks, we don't need to set it to a 59 * lower value as you'd need the chain length to vastly exceed 16 to have 60 * any real effect on the system. 61 */ 62 #define RHT_ELASTICITY 16u 63 64 /** 65 * struct bucket_table - Table of hash buckets 66 * @size: Number of hash buckets 67 * @nest: Number of bits of first-level nested table. 68 * @rehash: Current bucket being rehashed 69 * @hash_rnd: Random seed to fold into hash 70 * @walkers: List of active walkers 71 * @rcu: RCU structure for freeing the table 72 * @future_tbl: Table under construction during rehashing 73 * @ntbl: Nested table used when out of memory. 74 * @buckets: size * hash buckets 75 */ 76 struct bucket_table { 77 unsigned int size; 78 unsigned int nest; 79 u32 hash_rnd; 80 struct list_head walkers; 81 struct rcu_head rcu; 82 83 struct bucket_table __rcu *future_tbl; 84 85 struct lockdep_map dep_map; 86 87 struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp; 88 }; 89 90 /* 91 * NULLS_MARKER() expects a hash value with the low 92 * bits mostly likely to be significant, and it discards 93 * the msb. 94 * We give it an address, in which the bottom bit is 95 * always 0, and the msb might be significant. 96 * So we shift the address down one bit to align with 97 * expectations and avoid losing a significant bit. 98 * 99 * We never store the NULLS_MARKER in the hash table 100 * itself as we need the lsb for locking. 101 * Instead we store a NULL 102 */ 103 #define RHT_NULLS_MARKER(ptr) \ 104 ((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1)) 105 #define INIT_RHT_NULLS_HEAD(ptr) \ 106 ((ptr) = NULL) 107 108 static inline bool rht_is_a_nulls(const struct rhash_head *ptr) 109 { 110 return ((unsigned long) ptr & 1); 111 } 112 113 static inline void *rht_obj(const struct rhashtable *ht, 114 const struct rhash_head *he) 115 { 116 return (char *)he - ht->p.head_offset; 117 } 118 119 static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, 120 unsigned int hash) 121 { 122 return hash & (tbl->size - 1); 123 } 124 125 static inline unsigned int rht_key_get_hash(struct rhashtable *ht, 126 const void *key, const struct rhashtable_params params, 127 unsigned int hash_rnd) 128 { 129 unsigned int hash; 130 131 /* params must be equal to ht->p if it isn't constant. */ 132 if (!__builtin_constant_p(params.key_len)) 133 hash = ht->p.hashfn(key, ht->key_len, hash_rnd); 134 else if (params.key_len) { 135 unsigned int key_len = params.key_len; 136 137 if (params.hashfn) 138 hash = params.hashfn(key, key_len, hash_rnd); 139 else if (key_len & (sizeof(u32) - 1)) 140 hash = jhash(key, key_len, hash_rnd); 141 else 142 hash = jhash2(key, key_len / sizeof(u32), hash_rnd); 143 } else { 144 unsigned int key_len = ht->p.key_len; 145 146 if (params.hashfn) 147 hash = params.hashfn(key, key_len, hash_rnd); 148 else 149 hash = jhash(key, key_len, hash_rnd); 150 } 151 152 return hash; 153 } 154 155 static inline unsigned int rht_key_hashfn( 156 struct rhashtable *ht, const struct bucket_table *tbl, 157 const void *key, const struct rhashtable_params params) 158 { 159 unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd); 160 161 return rht_bucket_index(tbl, hash); 162 } 163 164 static inline unsigned int rht_head_hashfn( 165 struct rhashtable *ht, const struct bucket_table *tbl, 166 const struct rhash_head *he, const struct rhashtable_params params) 167 { 168 const char *ptr = rht_obj(ht, he); 169 170 return likely(params.obj_hashfn) ? 171 rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?: 172 ht->p.key_len, 173 tbl->hash_rnd)) : 174 rht_key_hashfn(ht, tbl, ptr + params.key_offset, params); 175 } 176 177 /** 178 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size 179 * @ht: hash table 180 * @tbl: current table 181 */ 182 static inline bool rht_grow_above_75(const struct rhashtable *ht, 183 const struct bucket_table *tbl) 184 { 185 /* Expand table when exceeding 75% load */ 186 return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) && 187 (!ht->p.max_size || tbl->size < ht->p.max_size); 188 } 189 190 /** 191 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size 192 * @ht: hash table 193 * @tbl: current table 194 */ 195 static inline bool rht_shrink_below_30(const struct rhashtable *ht, 196 const struct bucket_table *tbl) 197 { 198 /* Shrink table beneath 30% load */ 199 return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) && 200 tbl->size > ht->p.min_size; 201 } 202 203 /** 204 * rht_grow_above_100 - returns true if nelems > table-size 205 * @ht: hash table 206 * @tbl: current table 207 */ 208 static inline bool rht_grow_above_100(const struct rhashtable *ht, 209 const struct bucket_table *tbl) 210 { 211 return atomic_read(&ht->nelems) > tbl->size && 212 (!ht->p.max_size || tbl->size < ht->p.max_size); 213 } 214 215 /** 216 * rht_grow_above_max - returns true if table is above maximum 217 * @ht: hash table 218 * @tbl: current table 219 */ 220 static inline bool rht_grow_above_max(const struct rhashtable *ht, 221 const struct bucket_table *tbl) 222 { 223 return atomic_read(&ht->nelems) >= ht->max_elems; 224 } 225 226 #ifdef CONFIG_PROVE_LOCKING 227 int lockdep_rht_mutex_is_held(struct rhashtable *ht); 228 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash); 229 #else 230 static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht) 231 { 232 return 1; 233 } 234 235 static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, 236 u32 hash) 237 { 238 return 1; 239 } 240 #endif /* CONFIG_PROVE_LOCKING */ 241 242 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, 243 struct rhash_head *obj); 244 245 void rhashtable_walk_enter(struct rhashtable *ht, 246 struct rhashtable_iter *iter); 247 void rhashtable_walk_exit(struct rhashtable_iter *iter); 248 int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU); 249 250 static inline void rhashtable_walk_start(struct rhashtable_iter *iter) 251 { 252 (void)rhashtable_walk_start_check(iter); 253 } 254 255 void *rhashtable_walk_next(struct rhashtable_iter *iter); 256 void *rhashtable_walk_peek(struct rhashtable_iter *iter); 257 void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); 258 259 void rhashtable_free_and_destroy(struct rhashtable *ht, 260 void (*free_fn)(void *ptr, void *arg), 261 void *arg); 262 void rhashtable_destroy(struct rhashtable *ht); 263 264 struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, 265 unsigned int hash); 266 struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl, 267 unsigned int hash); 268 struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, 269 struct bucket_table *tbl, 270 unsigned int hash); 271 272 #define rht_dereference(p, ht) \ 273 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) 274 275 #define rht_dereference_rcu(p, ht) \ 276 rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht)) 277 278 #define rht_dereference_bucket(p, tbl, hash) \ 279 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash)) 280 281 #define rht_dereference_bucket_rcu(p, tbl, hash) \ 282 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash)) 283 284 #define rht_entry(tpos, pos, member) \ 285 ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) 286 287 static inline struct rhash_lock_head __rcu *const *rht_bucket( 288 const struct bucket_table *tbl, unsigned int hash) 289 { 290 return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : 291 &tbl->buckets[hash]; 292 } 293 294 static inline struct rhash_lock_head __rcu **rht_bucket_var( 295 struct bucket_table *tbl, unsigned int hash) 296 { 297 return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) : 298 &tbl->buckets[hash]; 299 } 300 301 static inline struct rhash_lock_head __rcu **rht_bucket_insert( 302 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) 303 { 304 return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) : 305 &tbl->buckets[hash]; 306 } 307 308 /* 309 * We lock a bucket by setting BIT(0) in the pointer - this is always 310 * zero in real pointers. The NULLS mark is never stored in the bucket, 311 * rather we store NULL if the bucket is empty. 312 * bit_spin_locks do not handle contention well, but the whole point 313 * of the hashtable design is to achieve minimum per-bucket contention. 314 * A nested hash table might not have a bucket pointer. In that case 315 * we cannot get a lock. For remove and replace the bucket cannot be 316 * interesting and doesn't need locking. 317 * For insert we allocate the bucket if this is the last bucket_table, 318 * and then take the lock. 319 * Sometimes we unlock a bucket by writing a new pointer there. In that 320 * case we don't need to unlock, but we do need to reset state such as 321 * local_bh. For that we have rht_assign_unlock(). As rcu_assign_pointer() 322 * provides the same release semantics that bit_spin_unlock() provides, 323 * this is safe. 324 * When we write to a bucket without unlocking, we use rht_assign_locked(). 325 */ 326 327 static inline void rht_lock(struct bucket_table *tbl, 328 struct rhash_lock_head **bkt) 329 { 330 local_bh_disable(); 331 bit_spin_lock(0, (unsigned long *)bkt); 332 lock_map_acquire(&tbl->dep_map); 333 } 334 335 static inline void rht_lock_nested(struct bucket_table *tbl, 336 struct rhash_lock_head **bucket, 337 unsigned int subclass) 338 { 339 local_bh_disable(); 340 bit_spin_lock(0, (unsigned long *)bucket); 341 lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_); 342 } 343 344 static inline void rht_unlock(struct bucket_table *tbl, 345 struct rhash_lock_head **bkt) 346 { 347 lock_map_release(&tbl->dep_map); 348 bit_spin_unlock(0, (unsigned long *)bkt); 349 local_bh_enable(); 350 } 351 352 /* 353 * Where 'bkt' is a bucket and might be locked: 354 * rht_ptr() dereferences that pointer and clears the lock bit. 355 * rht_ptr_exclusive() dereferences in a context where exclusive 356 * access is guaranteed, such as when destroying the table. 357 */ 358 static inline struct rhash_head *rht_ptr( 359 struct rhash_lock_head __rcu * const *bkt, 360 struct bucket_table *tbl, 361 unsigned int hash) 362 { 363 const struct rhash_lock_head *p = 364 rht_dereference_bucket_rcu(*bkt, tbl, hash); 365 366 if ((((unsigned long)p) & ~BIT(0)) == 0) 367 return RHT_NULLS_MARKER(bkt); 368 return (void *)(((unsigned long)p) & ~BIT(0)); 369 } 370 371 static inline struct rhash_head *rht_ptr_exclusive( 372 struct rhash_lock_head __rcu * const *bkt) 373 { 374 const struct rhash_lock_head *p = 375 rcu_dereference_protected(*bkt, 1); 376 377 if (!p) 378 return RHT_NULLS_MARKER(bkt); 379 return (void *)(((unsigned long)p) & ~BIT(0)); 380 } 381 382 static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt, 383 struct rhash_head *obj) 384 { 385 struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; 386 387 if (rht_is_a_nulls(obj)) 388 obj = NULL; 389 rcu_assign_pointer(*p, (void *)((unsigned long)obj | BIT(0))); 390 } 391 392 static inline void rht_assign_unlock(struct bucket_table *tbl, 393 struct rhash_lock_head __rcu **bkt, 394 struct rhash_head *obj) 395 { 396 struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; 397 398 if (rht_is_a_nulls(obj)) 399 obj = NULL; 400 lock_map_release(&tbl->dep_map); 401 rcu_assign_pointer(*p, obj); 402 preempt_enable(); 403 __release(bitlock); 404 local_bh_enable(); 405 } 406 407 /** 408 * rht_for_each_from - iterate over hash chain from given head 409 * @pos: the &struct rhash_head to use as a loop cursor. 410 * @head: the &struct rhash_head to start from 411 * @tbl: the &struct bucket_table 412 * @hash: the hash value / bucket index 413 */ 414 #define rht_for_each_from(pos, head, tbl, hash) \ 415 for (pos = head; \ 416 !rht_is_a_nulls(pos); \ 417 pos = rht_dereference_bucket((pos)->next, tbl, hash)) 418 419 /** 420 * rht_for_each - iterate over hash chain 421 * @pos: the &struct rhash_head to use as a loop cursor. 422 * @tbl: the &struct bucket_table 423 * @hash: the hash value / bucket index 424 */ 425 #define rht_for_each(pos, tbl, hash) \ 426 rht_for_each_from(pos, rht_ptr(rht_bucket(tbl, hash), tbl, hash), \ 427 tbl, hash) 428 429 /** 430 * rht_for_each_entry_from - iterate over hash chain from given head 431 * @tpos: the type * to use as a loop cursor. 432 * @pos: the &struct rhash_head to use as a loop cursor. 433 * @head: the &struct rhash_head to start from 434 * @tbl: the &struct bucket_table 435 * @hash: the hash value / bucket index 436 * @member: name of the &struct rhash_head within the hashable struct. 437 */ 438 #define rht_for_each_entry_from(tpos, pos, head, tbl, hash, member) \ 439 for (pos = head; \ 440 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ 441 pos = rht_dereference_bucket((pos)->next, tbl, hash)) 442 443 /** 444 * rht_for_each_entry - iterate over hash chain of given type 445 * @tpos: the type * to use as a loop cursor. 446 * @pos: the &struct rhash_head to use as a loop cursor. 447 * @tbl: the &struct bucket_table 448 * @hash: the hash value / bucket index 449 * @member: name of the &struct rhash_head within the hashable struct. 450 */ 451 #define rht_for_each_entry(tpos, pos, tbl, hash, member) \ 452 rht_for_each_entry_from(tpos, pos, \ 453 rht_ptr(rht_bucket(tbl, hash), tbl, hash), \ 454 tbl, hash, member) 455 456 /** 457 * rht_for_each_entry_safe - safely iterate over hash chain of given type 458 * @tpos: the type * to use as a loop cursor. 459 * @pos: the &struct rhash_head to use as a loop cursor. 460 * @next: the &struct rhash_head to use as next in loop cursor. 461 * @tbl: the &struct bucket_table 462 * @hash: the hash value / bucket index 463 * @member: name of the &struct rhash_head within the hashable struct. 464 * 465 * This hash chain list-traversal primitive allows for the looped code to 466 * remove the loop cursor from the list. 467 */ 468 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ 469 for (pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash), \ 470 next = !rht_is_a_nulls(pos) ? \ 471 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ 472 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ 473 pos = next, \ 474 next = !rht_is_a_nulls(pos) ? \ 475 rht_dereference_bucket(pos->next, tbl, hash) : NULL) 476 477 /** 478 * rht_for_each_rcu_from - iterate over rcu hash chain from given head 479 * @pos: the &struct rhash_head to use as a loop cursor. 480 * @head: the &struct rhash_head to start from 481 * @tbl: the &struct bucket_table 482 * @hash: the hash value / bucket index 483 * 484 * This hash chain list-traversal primitive may safely run concurrently with 485 * the _rcu mutation primitives such as rhashtable_insert() as long as the 486 * traversal is guarded by rcu_read_lock(). 487 */ 488 #define rht_for_each_rcu_from(pos, head, tbl, hash) \ 489 for (({barrier(); }), \ 490 pos = head; \ 491 !rht_is_a_nulls(pos); \ 492 pos = rcu_dereference_raw(pos->next)) 493 494 /** 495 * rht_for_each_rcu - iterate over rcu hash chain 496 * @pos: the &struct rhash_head to use as a loop cursor. 497 * @tbl: the &struct bucket_table 498 * @hash: the hash value / bucket index 499 * 500 * This hash chain list-traversal primitive may safely run concurrently with 501 * the _rcu mutation primitives such as rhashtable_insert() as long as the 502 * traversal is guarded by rcu_read_lock(). 503 */ 504 #define rht_for_each_rcu(pos, tbl, hash) \ 505 for (({barrier(); }), \ 506 pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash); \ 507 !rht_is_a_nulls(pos); \ 508 pos = rcu_dereference_raw(pos->next)) 509 510 /** 511 * rht_for_each_entry_rcu_from - iterated over rcu hash chain from given head 512 * @tpos: the type * to use as a loop cursor. 513 * @pos: the &struct rhash_head to use as a loop cursor. 514 * @head: the &struct rhash_head to start from 515 * @tbl: the &struct bucket_table 516 * @hash: the hash value / bucket index 517 * @member: name of the &struct rhash_head within the hashable struct. 518 * 519 * This hash chain list-traversal primitive may safely run concurrently with 520 * the _rcu mutation primitives such as rhashtable_insert() as long as the 521 * traversal is guarded by rcu_read_lock(). 522 */ 523 #define rht_for_each_entry_rcu_from(tpos, pos, head, tbl, hash, member) \ 524 for (({barrier(); }), \ 525 pos = head; \ 526 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ 527 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash)) 528 529 /** 530 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type 531 * @tpos: the type * to use as a loop cursor. 532 * @pos: the &struct rhash_head to use as a loop cursor. 533 * @tbl: the &struct bucket_table 534 * @hash: the hash value / bucket index 535 * @member: name of the &struct rhash_head within the hashable struct. 536 * 537 * This hash chain list-traversal primitive may safely run concurrently with 538 * the _rcu mutation primitives such as rhashtable_insert() as long as the 539 * traversal is guarded by rcu_read_lock(). 540 */ 541 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ 542 rht_for_each_entry_rcu_from(tpos, pos, \ 543 rht_ptr(rht_bucket(tbl, hash), \ 544 tbl, hash), \ 545 tbl, hash, member) 546 547 /** 548 * rhl_for_each_rcu - iterate over rcu hash table list 549 * @pos: the &struct rlist_head to use as a loop cursor. 550 * @list: the head of the list 551 * 552 * This hash chain list-traversal primitive should be used on the 553 * list returned by rhltable_lookup. 554 */ 555 #define rhl_for_each_rcu(pos, list) \ 556 for (pos = list; pos; pos = rcu_dereference_raw(pos->next)) 557 558 /** 559 * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type 560 * @tpos: the type * to use as a loop cursor. 561 * @pos: the &struct rlist_head to use as a loop cursor. 562 * @list: the head of the list 563 * @member: name of the &struct rlist_head within the hashable struct. 564 * 565 * This hash chain list-traversal primitive should be used on the 566 * list returned by rhltable_lookup. 567 */ 568 #define rhl_for_each_entry_rcu(tpos, pos, list, member) \ 569 for (pos = list; pos && rht_entry(tpos, pos, member); \ 570 pos = rcu_dereference_raw(pos->next)) 571 572 static inline int rhashtable_compare(struct rhashtable_compare_arg *arg, 573 const void *obj) 574 { 575 struct rhashtable *ht = arg->ht; 576 const char *ptr = obj; 577 578 return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len); 579 } 580 581 /* Internal function, do not use. */ 582 static inline struct rhash_head *__rhashtable_lookup( 583 struct rhashtable *ht, const void *key, 584 const struct rhashtable_params params) 585 { 586 struct rhashtable_compare_arg arg = { 587 .ht = ht, 588 .key = key, 589 }; 590 struct rhash_lock_head __rcu * const *bkt; 591 struct bucket_table *tbl; 592 struct rhash_head *he; 593 unsigned int hash; 594 595 tbl = rht_dereference_rcu(ht->tbl, ht); 596 restart: 597 hash = rht_key_hashfn(ht, tbl, key, params); 598 bkt = rht_bucket(tbl, hash); 599 do { 600 rht_for_each_rcu_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) { 601 if (params.obj_cmpfn ? 602 params.obj_cmpfn(&arg, rht_obj(ht, he)) : 603 rhashtable_compare(&arg, rht_obj(ht, he))) 604 continue; 605 return he; 606 } 607 /* An object might have been moved to a different hash chain, 608 * while we walk along it - better check and retry. 609 */ 610 } while (he != RHT_NULLS_MARKER(bkt)); 611 612 /* Ensure we see any new tables. */ 613 smp_rmb(); 614 615 tbl = rht_dereference_rcu(tbl->future_tbl, ht); 616 if (unlikely(tbl)) 617 goto restart; 618 619 return NULL; 620 } 621 622 /** 623 * rhashtable_lookup - search hash table 624 * @ht: hash table 625 * @key: the pointer to the key 626 * @params: hash table parameters 627 * 628 * Computes the hash value for the key and traverses the bucket chain looking 629 * for a entry with an identical key. The first matching entry is returned. 630 * 631 * This must only be called under the RCU read lock. 632 * 633 * Returns the first entry on which the compare function returned true. 634 */ 635 static inline void *rhashtable_lookup( 636 struct rhashtable *ht, const void *key, 637 const struct rhashtable_params params) 638 { 639 struct rhash_head *he = __rhashtable_lookup(ht, key, params); 640 641 return he ? rht_obj(ht, he) : NULL; 642 } 643 644 /** 645 * rhashtable_lookup_fast - search hash table, without RCU read lock 646 * @ht: hash table 647 * @key: the pointer to the key 648 * @params: hash table parameters 649 * 650 * Computes the hash value for the key and traverses the bucket chain looking 651 * for a entry with an identical key. The first matching entry is returned. 652 * 653 * Only use this function when you have other mechanisms guaranteeing 654 * that the object won't go away after the RCU read lock is released. 655 * 656 * Returns the first entry on which the compare function returned true. 657 */ 658 static inline void *rhashtable_lookup_fast( 659 struct rhashtable *ht, const void *key, 660 const struct rhashtable_params params) 661 { 662 void *obj; 663 664 rcu_read_lock(); 665 obj = rhashtable_lookup(ht, key, params); 666 rcu_read_unlock(); 667 668 return obj; 669 } 670 671 /** 672 * rhltable_lookup - search hash list table 673 * @hlt: hash table 674 * @key: the pointer to the key 675 * @params: hash table parameters 676 * 677 * Computes the hash value for the key and traverses the bucket chain looking 678 * for a entry with an identical key. All matching entries are returned 679 * in a list. 680 * 681 * This must only be called under the RCU read lock. 682 * 683 * Returns the list of entries that match the given key. 684 */ 685 static inline struct rhlist_head *rhltable_lookup( 686 struct rhltable *hlt, const void *key, 687 const struct rhashtable_params params) 688 { 689 struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params); 690 691 return he ? container_of(he, struct rhlist_head, rhead) : NULL; 692 } 693 694 /* Internal function, please use rhashtable_insert_fast() instead. This 695 * function returns the existing element already in hashes in there is a clash, 696 * otherwise it returns an error via ERR_PTR(). 697 */ 698 static inline void *__rhashtable_insert_fast( 699 struct rhashtable *ht, const void *key, struct rhash_head *obj, 700 const struct rhashtable_params params, bool rhlist) 701 { 702 struct rhashtable_compare_arg arg = { 703 .ht = ht, 704 .key = key, 705 }; 706 struct rhash_lock_head __rcu **bkt; 707 struct rhash_head __rcu **pprev; 708 struct bucket_table *tbl; 709 struct rhash_head *head; 710 unsigned int hash; 711 int elasticity; 712 void *data; 713 714 rcu_read_lock(); 715 716 tbl = rht_dereference_rcu(ht->tbl, ht); 717 hash = rht_head_hashfn(ht, tbl, obj, params); 718 elasticity = RHT_ELASTICITY; 719 bkt = rht_bucket_insert(ht, tbl, hash); 720 data = ERR_PTR(-ENOMEM); 721 if (!bkt) 722 goto out; 723 pprev = NULL; 724 rht_lock(tbl, bkt); 725 726 if (unlikely(rcu_access_pointer(tbl->future_tbl))) { 727 slow_path: 728 rht_unlock(tbl, bkt); 729 rcu_read_unlock(); 730 return rhashtable_insert_slow(ht, key, obj); 731 } 732 733 rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) { 734 struct rhlist_head *plist; 735 struct rhlist_head *list; 736 737 elasticity--; 738 if (!key || 739 (params.obj_cmpfn ? 740 params.obj_cmpfn(&arg, rht_obj(ht, head)) : 741 rhashtable_compare(&arg, rht_obj(ht, head)))) { 742 pprev = &head->next; 743 continue; 744 } 745 746 data = rht_obj(ht, head); 747 748 if (!rhlist) 749 goto out_unlock; 750 751 752 list = container_of(obj, struct rhlist_head, rhead); 753 plist = container_of(head, struct rhlist_head, rhead); 754 755 RCU_INIT_POINTER(list->next, plist); 756 head = rht_dereference_bucket(head->next, tbl, hash); 757 RCU_INIT_POINTER(list->rhead.next, head); 758 if (pprev) { 759 rcu_assign_pointer(*pprev, obj); 760 rht_unlock(tbl, bkt); 761 } else 762 rht_assign_unlock(tbl, bkt, obj); 763 data = NULL; 764 goto out; 765 } 766 767 if (elasticity <= 0) 768 goto slow_path; 769 770 data = ERR_PTR(-E2BIG); 771 if (unlikely(rht_grow_above_max(ht, tbl))) 772 goto out_unlock; 773 774 if (unlikely(rht_grow_above_100(ht, tbl))) 775 goto slow_path; 776 777 /* Inserting at head of list makes unlocking free. */ 778 head = rht_ptr(bkt, tbl, hash); 779 780 RCU_INIT_POINTER(obj->next, head); 781 if (rhlist) { 782 struct rhlist_head *list; 783 784 list = container_of(obj, struct rhlist_head, rhead); 785 RCU_INIT_POINTER(list->next, NULL); 786 } 787 788 atomic_inc(&ht->nelems); 789 rht_assign_unlock(tbl, bkt, obj); 790 791 if (rht_grow_above_75(ht, tbl)) 792 schedule_work(&ht->run_work); 793 794 data = NULL; 795 out: 796 rcu_read_unlock(); 797 798 return data; 799 800 out_unlock: 801 rht_unlock(tbl, bkt); 802 goto out; 803 } 804 805 /** 806 * rhashtable_insert_fast - insert object into hash table 807 * @ht: hash table 808 * @obj: pointer to hash head inside object 809 * @params: hash table parameters 810 * 811 * Will take the per bucket bitlock to protect against mutual mutations 812 * on the same bucket. Multiple insertions may occur in parallel unless 813 * they map to the same bucket. 814 * 815 * It is safe to call this function from atomic context. 816 * 817 * Will trigger an automatic deferred table resizing if residency in the 818 * table grows beyond 70%. 819 */ 820 static inline int rhashtable_insert_fast( 821 struct rhashtable *ht, struct rhash_head *obj, 822 const struct rhashtable_params params) 823 { 824 void *ret; 825 826 ret = __rhashtable_insert_fast(ht, NULL, obj, params, false); 827 if (IS_ERR(ret)) 828 return PTR_ERR(ret); 829 830 return ret == NULL ? 0 : -EEXIST; 831 } 832 833 /** 834 * rhltable_insert_key - insert object into hash list table 835 * @hlt: hash list table 836 * @key: the pointer to the key 837 * @list: pointer to hash list head inside object 838 * @params: hash table parameters 839 * 840 * Will take the per bucket bitlock to protect against mutual mutations 841 * on the same bucket. Multiple insertions may occur in parallel unless 842 * they map to the same bucket. 843 * 844 * It is safe to call this function from atomic context. 845 * 846 * Will trigger an automatic deferred table resizing if residency in the 847 * table grows beyond 70%. 848 */ 849 static inline int rhltable_insert_key( 850 struct rhltable *hlt, const void *key, struct rhlist_head *list, 851 const struct rhashtable_params params) 852 { 853 return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead, 854 params, true)); 855 } 856 857 /** 858 * rhltable_insert - insert object into hash list table 859 * @hlt: hash list table 860 * @list: pointer to hash list head inside object 861 * @params: hash table parameters 862 * 863 * Will take the per bucket bitlock to protect against mutual mutations 864 * on the same bucket. Multiple insertions may occur in parallel unless 865 * they map to the same bucket. 866 * 867 * It is safe to call this function from atomic context. 868 * 869 * Will trigger an automatic deferred table resizing if residency in the 870 * table grows beyond 70%. 871 */ 872 static inline int rhltable_insert( 873 struct rhltable *hlt, struct rhlist_head *list, 874 const struct rhashtable_params params) 875 { 876 const char *key = rht_obj(&hlt->ht, &list->rhead); 877 878 key += params.key_offset; 879 880 return rhltable_insert_key(hlt, key, list, params); 881 } 882 883 /** 884 * rhashtable_lookup_insert_fast - lookup and insert object into hash table 885 * @ht: hash table 886 * @obj: pointer to hash head inside object 887 * @params: hash table parameters 888 * 889 * This lookup function may only be used for fixed key hash table (key_len 890 * parameter set). It will BUG() if used inappropriately. 891 * 892 * It is safe to call this function from atomic context. 893 * 894 * Will trigger an automatic deferred table resizing if residency in the 895 * table grows beyond 70%. 896 */ 897 static inline int rhashtable_lookup_insert_fast( 898 struct rhashtable *ht, struct rhash_head *obj, 899 const struct rhashtable_params params) 900 { 901 const char *key = rht_obj(ht, obj); 902 void *ret; 903 904 BUG_ON(ht->p.obj_hashfn); 905 906 ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params, 907 false); 908 if (IS_ERR(ret)) 909 return PTR_ERR(ret); 910 911 return ret == NULL ? 0 : -EEXIST; 912 } 913 914 /** 915 * rhashtable_lookup_get_insert_fast - lookup and insert object into hash table 916 * @ht: hash table 917 * @obj: pointer to hash head inside object 918 * @params: hash table parameters 919 * 920 * Just like rhashtable_lookup_insert_fast(), but this function returns the 921 * object if it exists, NULL if it did not and the insertion was successful, 922 * and an ERR_PTR otherwise. 923 */ 924 static inline void *rhashtable_lookup_get_insert_fast( 925 struct rhashtable *ht, struct rhash_head *obj, 926 const struct rhashtable_params params) 927 { 928 const char *key = rht_obj(ht, obj); 929 930 BUG_ON(ht->p.obj_hashfn); 931 932 return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params, 933 false); 934 } 935 936 /** 937 * rhashtable_lookup_insert_key - search and insert object to hash table 938 * with explicit key 939 * @ht: hash table 940 * @key: key 941 * @obj: pointer to hash head inside object 942 * @params: hash table parameters 943 * 944 * Lookups may occur in parallel with hashtable mutations and resizing. 945 * 946 * Will trigger an automatic deferred table resizing if residency in the 947 * table grows beyond 70%. 948 * 949 * Returns zero on success. 950 */ 951 static inline int rhashtable_lookup_insert_key( 952 struct rhashtable *ht, const void *key, struct rhash_head *obj, 953 const struct rhashtable_params params) 954 { 955 void *ret; 956 957 BUG_ON(!ht->p.obj_hashfn || !key); 958 959 ret = __rhashtable_insert_fast(ht, key, obj, params, false); 960 if (IS_ERR(ret)) 961 return PTR_ERR(ret); 962 963 return ret == NULL ? 0 : -EEXIST; 964 } 965 966 /** 967 * rhashtable_lookup_get_insert_key - lookup and insert object into hash table 968 * @ht: hash table 969 * @obj: pointer to hash head inside object 970 * @params: hash table parameters 971 * @data: pointer to element data already in hashes 972 * 973 * Just like rhashtable_lookup_insert_key(), but this function returns the 974 * object if it exists, NULL if it does not and the insertion was successful, 975 * and an ERR_PTR otherwise. 976 */ 977 static inline void *rhashtable_lookup_get_insert_key( 978 struct rhashtable *ht, const void *key, struct rhash_head *obj, 979 const struct rhashtable_params params) 980 { 981 BUG_ON(!ht->p.obj_hashfn || !key); 982 983 return __rhashtable_insert_fast(ht, key, obj, params, false); 984 } 985 986 /* Internal function, please use rhashtable_remove_fast() instead */ 987 static inline int __rhashtable_remove_fast_one( 988 struct rhashtable *ht, struct bucket_table *tbl, 989 struct rhash_head *obj, const struct rhashtable_params params, 990 bool rhlist) 991 { 992 struct rhash_lock_head __rcu **bkt; 993 struct rhash_head __rcu **pprev; 994 struct rhash_head *he; 995 unsigned int hash; 996 int err = -ENOENT; 997 998 hash = rht_head_hashfn(ht, tbl, obj, params); 999 bkt = rht_bucket_var(tbl, hash); 1000 if (!bkt) 1001 return -ENOENT; 1002 pprev = NULL; 1003 rht_lock(tbl, bkt); 1004 1005 rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) { 1006 struct rhlist_head *list; 1007 1008 list = container_of(he, struct rhlist_head, rhead); 1009 1010 if (he != obj) { 1011 struct rhlist_head __rcu **lpprev; 1012 1013 pprev = &he->next; 1014 1015 if (!rhlist) 1016 continue; 1017 1018 do { 1019 lpprev = &list->next; 1020 list = rht_dereference_bucket(list->next, 1021 tbl, hash); 1022 } while (list && obj != &list->rhead); 1023 1024 if (!list) 1025 continue; 1026 1027 list = rht_dereference_bucket(list->next, tbl, hash); 1028 RCU_INIT_POINTER(*lpprev, list); 1029 err = 0; 1030 break; 1031 } 1032 1033 obj = rht_dereference_bucket(obj->next, tbl, hash); 1034 err = 1; 1035 1036 if (rhlist) { 1037 list = rht_dereference_bucket(list->next, tbl, hash); 1038 if (list) { 1039 RCU_INIT_POINTER(list->rhead.next, obj); 1040 obj = &list->rhead; 1041 err = 0; 1042 } 1043 } 1044 1045 if (pprev) { 1046 rcu_assign_pointer(*pprev, obj); 1047 rht_unlock(tbl, bkt); 1048 } else { 1049 rht_assign_unlock(tbl, bkt, obj); 1050 } 1051 goto unlocked; 1052 } 1053 1054 rht_unlock(tbl, bkt); 1055 unlocked: 1056 if (err > 0) { 1057 atomic_dec(&ht->nelems); 1058 if (unlikely(ht->p.automatic_shrinking && 1059 rht_shrink_below_30(ht, tbl))) 1060 schedule_work(&ht->run_work); 1061 err = 0; 1062 } 1063 1064 return err; 1065 } 1066 1067 /* Internal function, please use rhashtable_remove_fast() instead */ 1068 static inline int __rhashtable_remove_fast( 1069 struct rhashtable *ht, struct rhash_head *obj, 1070 const struct rhashtable_params params, bool rhlist) 1071 { 1072 struct bucket_table *tbl; 1073 int err; 1074 1075 rcu_read_lock(); 1076 1077 tbl = rht_dereference_rcu(ht->tbl, ht); 1078 1079 /* Because we have already taken (and released) the bucket 1080 * lock in old_tbl, if we find that future_tbl is not yet 1081 * visible then that guarantees the entry to still be in 1082 * the old tbl if it exists. 1083 */ 1084 while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params, 1085 rhlist)) && 1086 (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) 1087 ; 1088 1089 rcu_read_unlock(); 1090 1091 return err; 1092 } 1093 1094 /** 1095 * rhashtable_remove_fast - remove object from hash table 1096 * @ht: hash table 1097 * @obj: pointer to hash head inside object 1098 * @params: hash table parameters 1099 * 1100 * Since the hash chain is single linked, the removal operation needs to 1101 * walk the bucket chain upon removal. The removal operation is thus 1102 * considerable slow if the hash table is not correctly sized. 1103 * 1104 * Will automatically shrink the table if permitted when residency drops 1105 * below 30%. 1106 * 1107 * Returns zero on success, -ENOENT if the entry could not be found. 1108 */ 1109 static inline int rhashtable_remove_fast( 1110 struct rhashtable *ht, struct rhash_head *obj, 1111 const struct rhashtable_params params) 1112 { 1113 return __rhashtable_remove_fast(ht, obj, params, false); 1114 } 1115 1116 /** 1117 * rhltable_remove - remove object from hash list table 1118 * @hlt: hash list table 1119 * @list: pointer to hash list head inside object 1120 * @params: hash table parameters 1121 * 1122 * Since the hash chain is single linked, the removal operation needs to 1123 * walk the bucket chain upon removal. The removal operation is thus 1124 * considerable slow if the hash table is not correctly sized. 1125 * 1126 * Will automatically shrink the table if permitted when residency drops 1127 * below 30% 1128 * 1129 * Returns zero on success, -ENOENT if the entry could not be found. 1130 */ 1131 static inline int rhltable_remove( 1132 struct rhltable *hlt, struct rhlist_head *list, 1133 const struct rhashtable_params params) 1134 { 1135 return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true); 1136 } 1137 1138 /* Internal function, please use rhashtable_replace_fast() instead */ 1139 static inline int __rhashtable_replace_fast( 1140 struct rhashtable *ht, struct bucket_table *tbl, 1141 struct rhash_head *obj_old, struct rhash_head *obj_new, 1142 const struct rhashtable_params params) 1143 { 1144 struct rhash_lock_head __rcu **bkt; 1145 struct rhash_head __rcu **pprev; 1146 struct rhash_head *he; 1147 unsigned int hash; 1148 int err = -ENOENT; 1149 1150 /* Minimally, the old and new objects must have same hash 1151 * (which should mean identifiers are the same). 1152 */ 1153 hash = rht_head_hashfn(ht, tbl, obj_old, params); 1154 if (hash != rht_head_hashfn(ht, tbl, obj_new, params)) 1155 return -EINVAL; 1156 1157 bkt = rht_bucket_var(tbl, hash); 1158 if (!bkt) 1159 return -ENOENT; 1160 1161 pprev = NULL; 1162 rht_lock(tbl, bkt); 1163 1164 rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) { 1165 if (he != obj_old) { 1166 pprev = &he->next; 1167 continue; 1168 } 1169 1170 rcu_assign_pointer(obj_new->next, obj_old->next); 1171 if (pprev) { 1172 rcu_assign_pointer(*pprev, obj_new); 1173 rht_unlock(tbl, bkt); 1174 } else { 1175 rht_assign_unlock(tbl, bkt, obj_new); 1176 } 1177 err = 0; 1178 goto unlocked; 1179 } 1180 1181 rht_unlock(tbl, bkt); 1182 1183 unlocked: 1184 return err; 1185 } 1186 1187 /** 1188 * rhashtable_replace_fast - replace an object in hash table 1189 * @ht: hash table 1190 * @obj_old: pointer to hash head inside object being replaced 1191 * @obj_new: pointer to hash head inside object which is new 1192 * @params: hash table parameters 1193 * 1194 * Replacing an object doesn't affect the number of elements in the hash table 1195 * or bucket, so we don't need to worry about shrinking or expanding the 1196 * table here. 1197 * 1198 * Returns zero on success, -ENOENT if the entry could not be found, 1199 * -EINVAL if hash is not the same for the old and new objects. 1200 */ 1201 static inline int rhashtable_replace_fast( 1202 struct rhashtable *ht, struct rhash_head *obj_old, 1203 struct rhash_head *obj_new, 1204 const struct rhashtable_params params) 1205 { 1206 struct bucket_table *tbl; 1207 int err; 1208 1209 rcu_read_lock(); 1210 1211 tbl = rht_dereference_rcu(ht->tbl, ht); 1212 1213 /* Because we have already taken (and released) the bucket 1214 * lock in old_tbl, if we find that future_tbl is not yet 1215 * visible then that guarantees the entry to still be in 1216 * the old tbl if it exists. 1217 */ 1218 while ((err = __rhashtable_replace_fast(ht, tbl, obj_old, 1219 obj_new, params)) && 1220 (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) 1221 ; 1222 1223 rcu_read_unlock(); 1224 1225 return err; 1226 } 1227 1228 /** 1229 * rhltable_walk_enter - Initialise an iterator 1230 * @hlt: Table to walk over 1231 * @iter: Hash table Iterator 1232 * 1233 * This function prepares a hash table walk. 1234 * 1235 * Note that if you restart a walk after rhashtable_walk_stop you 1236 * may see the same object twice. Also, you may miss objects if 1237 * there are removals in between rhashtable_walk_stop and the next 1238 * call to rhashtable_walk_start. 1239 * 1240 * For a completely stable walk you should construct your own data 1241 * structure outside the hash table. 1242 * 1243 * This function may be called from any process context, including 1244 * non-preemptable context, but cannot be called from softirq or 1245 * hardirq context. 1246 * 1247 * You must call rhashtable_walk_exit after this function returns. 1248 */ 1249 static inline void rhltable_walk_enter(struct rhltable *hlt, 1250 struct rhashtable_iter *iter) 1251 { 1252 return rhashtable_walk_enter(&hlt->ht, iter); 1253 } 1254 1255 /** 1256 * rhltable_free_and_destroy - free elements and destroy hash list table 1257 * @hlt: the hash list table to destroy 1258 * @free_fn: callback to release resources of element 1259 * @arg: pointer passed to free_fn 1260 * 1261 * See documentation for rhashtable_free_and_destroy. 1262 */ 1263 static inline void rhltable_free_and_destroy(struct rhltable *hlt, 1264 void (*free_fn)(void *ptr, 1265 void *arg), 1266 void *arg) 1267 { 1268 return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg); 1269 } 1270 1271 static inline void rhltable_destroy(struct rhltable *hlt) 1272 { 1273 return rhltable_free_and_destroy(hlt, NULL, NULL); 1274 } 1275 1276 #endif /* _LINUX_RHASHTABLE_H */ 1277