1 /* 2 * Resizable, Scalable, Concurrent Hash Table 3 * 4 * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au> 5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> 6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> 7 * 8 * Code partially derived from nft_hash 9 * Rewritten with rehash code from br_multicast plus single list 10 * pointer as suggested by Josh Triplett 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License version 2 as 14 * published by the Free Software Foundation. 15 */ 16 17 #ifndef _LINUX_RHASHTABLE_H 18 #define _LINUX_RHASHTABLE_H 19 20 #include <linux/atomic.h> 21 #include <linux/compiler.h> 22 #include <linux/err.h> 23 #include <linux/errno.h> 24 #include <linux/jhash.h> 25 #include <linux/list_nulls.h> 26 #include <linux/workqueue.h> 27 #include <linux/mutex.h> 28 #include <linux/rcupdate.h> 29 30 /* 31 * The end of the chain is marked with a special nulls marks which has 32 * the following format: 33 * 34 * +-------+-----------------------------------------------------+-+ 35 * | Base | Hash |1| 36 * +-------+-----------------------------------------------------+-+ 37 * 38 * Base (4 bits) : Reserved to distinguish between multiple tables. 39 * Specified via &struct rhashtable_params.nulls_base. 40 * Hash (27 bits): Full hash (unmasked) of first element added to bucket 41 * 1 (1 bit) : Nulls marker (always set) 42 * 43 * The remaining bits of the next pointer remain unused for now. 44 */ 45 #define RHT_BASE_BITS 4 46 #define RHT_HASH_BITS 27 47 #define RHT_BASE_SHIFT RHT_HASH_BITS 48 49 /* Base bits plus 1 bit for nulls marker */ 50 #define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1) 51 52 struct rhash_head { 53 struct rhash_head __rcu *next; 54 }; 55 56 struct rhlist_head { 57 struct rhash_head rhead; 58 struct rhlist_head __rcu *next; 59 }; 60 61 /** 62 * struct bucket_table - Table of hash buckets 63 * @size: Number of hash buckets 64 * @rehash: Current bucket being rehashed 65 * @hash_rnd: Random seed to fold into hash 66 * @locks_mask: Mask to apply before accessing locks[] 67 * @locks: Array of spinlocks protecting individual buckets 68 * @walkers: List of active walkers 69 * @rcu: RCU structure for freeing the table 70 * @future_tbl: Table under construction during rehashing 71 * @buckets: size * hash buckets 72 */ 73 struct bucket_table { 74 unsigned int size; 75 unsigned int rehash; 76 u32 hash_rnd; 77 unsigned int locks_mask; 78 spinlock_t *locks; 79 struct list_head walkers; 80 struct rcu_head rcu; 81 82 struct bucket_table __rcu *future_tbl; 83 84 struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; 85 }; 86 87 /** 88 * struct rhashtable_compare_arg - Key for the function rhashtable_compare 89 * @ht: Hash table 90 * @key: Key to compare against 91 */ 92 struct rhashtable_compare_arg { 93 struct rhashtable *ht; 94 const void *key; 95 }; 96 97 typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed); 98 typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed); 99 typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg, 100 const void *obj); 101 102 struct rhashtable; 103 104 /** 105 * struct rhashtable_params - Hash table construction parameters 106 * @nelem_hint: Hint on number of elements, should be 75% of desired size 107 * @key_len: Length of key 108 * @key_offset: Offset of key in struct to be hashed 109 * @head_offset: Offset of rhash_head in struct to be hashed 110 * @insecure_max_entries: Maximum number of entries (may be exceeded) 111 * @max_size: Maximum size while expanding 112 * @min_size: Minimum size while shrinking 113 * @nulls_base: Base value to generate nulls marker 114 * @insecure_elasticity: Set to true to disable chain length checks 115 * @automatic_shrinking: Enable automatic shrinking of tables 116 * @locks_mul: Number of bucket locks to allocate per cpu (default: 128) 117 * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash) 118 * @obj_hashfn: Function to hash object 119 * @obj_cmpfn: Function to compare key with object 120 */ 121 struct rhashtable_params { 122 size_t nelem_hint; 123 size_t key_len; 124 size_t key_offset; 125 size_t head_offset; 126 unsigned int insecure_max_entries; 127 unsigned int max_size; 128 unsigned int min_size; 129 u32 nulls_base; 130 bool insecure_elasticity; 131 bool automatic_shrinking; 132 size_t locks_mul; 133 rht_hashfn_t hashfn; 134 rht_obj_hashfn_t obj_hashfn; 135 rht_obj_cmpfn_t obj_cmpfn; 136 }; 137 138 /** 139 * struct rhashtable - Hash table handle 140 * @tbl: Bucket table 141 * @nelems: Number of elements in table 142 * @key_len: Key length for hashfn 143 * @elasticity: Maximum chain length before rehash 144 * @p: Configuration parameters 145 * @rhlist: True if this is an rhltable 146 * @run_work: Deferred worker to expand/shrink asynchronously 147 * @mutex: Mutex to protect current/future table swapping 148 * @lock: Spin lock to protect walker list 149 */ 150 struct rhashtable { 151 struct bucket_table __rcu *tbl; 152 atomic_t nelems; 153 unsigned int key_len; 154 unsigned int elasticity; 155 struct rhashtable_params p; 156 bool rhlist; 157 struct work_struct run_work; 158 struct mutex mutex; 159 spinlock_t lock; 160 }; 161 162 /** 163 * struct rhltable - Hash table with duplicate objects in a list 164 * @ht: Underlying rhtable 165 */ 166 struct rhltable { 167 struct rhashtable ht; 168 }; 169 170 /** 171 * struct rhashtable_walker - Hash table walker 172 * @list: List entry on list of walkers 173 * @tbl: The table that we were walking over 174 */ 175 struct rhashtable_walker { 176 struct list_head list; 177 struct bucket_table *tbl; 178 }; 179 180 /** 181 * struct rhashtable_iter - Hash table iterator 182 * @ht: Table to iterate through 183 * @p: Current pointer 184 * @list: Current hash list pointer 185 * @walker: Associated rhashtable walker 186 * @slot: Current slot 187 * @skip: Number of entries to skip in slot 188 */ 189 struct rhashtable_iter { 190 struct rhashtable *ht; 191 struct rhash_head *p; 192 struct rhlist_head *list; 193 struct rhashtable_walker walker; 194 unsigned int slot; 195 unsigned int skip; 196 }; 197 198 static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash) 199 { 200 return NULLS_MARKER(ht->p.nulls_base + hash); 201 } 202 203 #define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \ 204 ((ptr) = (typeof(ptr)) rht_marker(ht, hash)) 205 206 static inline bool rht_is_a_nulls(const struct rhash_head *ptr) 207 { 208 return ((unsigned long) ptr & 1); 209 } 210 211 static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr) 212 { 213 return ((unsigned long) ptr) >> 1; 214 } 215 216 static inline void *rht_obj(const struct rhashtable *ht, 217 const struct rhash_head *he) 218 { 219 return (char *)he - ht->p.head_offset; 220 } 221 222 static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, 223 unsigned int hash) 224 { 225 return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1); 226 } 227 228 static inline unsigned int rht_key_hashfn( 229 struct rhashtable *ht, const struct bucket_table *tbl, 230 const void *key, const struct rhashtable_params params) 231 { 232 unsigned int hash; 233 234 /* params must be equal to ht->p if it isn't constant. */ 235 if (!__builtin_constant_p(params.key_len)) 236 hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd); 237 else if (params.key_len) { 238 unsigned int key_len = params.key_len; 239 240 if (params.hashfn) 241 hash = params.hashfn(key, key_len, tbl->hash_rnd); 242 else if (key_len & (sizeof(u32) - 1)) 243 hash = jhash(key, key_len, tbl->hash_rnd); 244 else 245 hash = jhash2(key, key_len / sizeof(u32), 246 tbl->hash_rnd); 247 } else { 248 unsigned int key_len = ht->p.key_len; 249 250 if (params.hashfn) 251 hash = params.hashfn(key, key_len, tbl->hash_rnd); 252 else 253 hash = jhash(key, key_len, tbl->hash_rnd); 254 } 255 256 return rht_bucket_index(tbl, hash); 257 } 258 259 static inline unsigned int rht_head_hashfn( 260 struct rhashtable *ht, const struct bucket_table *tbl, 261 const struct rhash_head *he, const struct rhashtable_params params) 262 { 263 const char *ptr = rht_obj(ht, he); 264 265 return likely(params.obj_hashfn) ? 266 rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?: 267 ht->p.key_len, 268 tbl->hash_rnd)) : 269 rht_key_hashfn(ht, tbl, ptr + params.key_offset, params); 270 } 271 272 /** 273 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size 274 * @ht: hash table 275 * @tbl: current table 276 */ 277 static inline bool rht_grow_above_75(const struct rhashtable *ht, 278 const struct bucket_table *tbl) 279 { 280 /* Expand table when exceeding 75% load */ 281 return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) && 282 (!ht->p.max_size || tbl->size < ht->p.max_size); 283 } 284 285 /** 286 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size 287 * @ht: hash table 288 * @tbl: current table 289 */ 290 static inline bool rht_shrink_below_30(const struct rhashtable *ht, 291 const struct bucket_table *tbl) 292 { 293 /* Shrink table beneath 30% load */ 294 return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) && 295 tbl->size > ht->p.min_size; 296 } 297 298 /** 299 * rht_grow_above_100 - returns true if nelems > table-size 300 * @ht: hash table 301 * @tbl: current table 302 */ 303 static inline bool rht_grow_above_100(const struct rhashtable *ht, 304 const struct bucket_table *tbl) 305 { 306 return atomic_read(&ht->nelems) > tbl->size && 307 (!ht->p.max_size || tbl->size < ht->p.max_size); 308 } 309 310 /** 311 * rht_grow_above_max - returns true if table is above maximum 312 * @ht: hash table 313 * @tbl: current table 314 */ 315 static inline bool rht_grow_above_max(const struct rhashtable *ht, 316 const struct bucket_table *tbl) 317 { 318 return ht->p.insecure_max_entries && 319 atomic_read(&ht->nelems) >= ht->p.insecure_max_entries; 320 } 321 322 /* The bucket lock is selected based on the hash and protects mutations 323 * on a group of hash buckets. 324 * 325 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that 326 * a single lock always covers both buckets which may both contains 327 * entries which link to the same bucket of the old table during resizing. 328 * This allows to simplify the locking as locking the bucket in both 329 * tables during resize always guarantee protection. 330 * 331 * IMPORTANT: When holding the bucket lock of both the old and new table 332 * during expansions and shrinking, the old bucket lock must always be 333 * acquired first. 334 */ 335 static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl, 336 unsigned int hash) 337 { 338 return &tbl->locks[hash & tbl->locks_mask]; 339 } 340 341 #ifdef CONFIG_PROVE_LOCKING 342 int lockdep_rht_mutex_is_held(struct rhashtable *ht); 343 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash); 344 #else 345 static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht) 346 { 347 return 1; 348 } 349 350 static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, 351 u32 hash) 352 { 353 return 1; 354 } 355 #endif /* CONFIG_PROVE_LOCKING */ 356 357 int rhashtable_init(struct rhashtable *ht, 358 const struct rhashtable_params *params); 359 int rhltable_init(struct rhltable *hlt, 360 const struct rhashtable_params *params); 361 362 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, 363 struct rhash_head *obj); 364 365 void rhashtable_walk_enter(struct rhashtable *ht, 366 struct rhashtable_iter *iter); 367 void rhashtable_walk_exit(struct rhashtable_iter *iter); 368 int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); 369 void *rhashtable_walk_next(struct rhashtable_iter *iter); 370 void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); 371 372 void rhashtable_free_and_destroy(struct rhashtable *ht, 373 void (*free_fn)(void *ptr, void *arg), 374 void *arg); 375 void rhashtable_destroy(struct rhashtable *ht); 376 377 #define rht_dereference(p, ht) \ 378 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) 379 380 #define rht_dereference_rcu(p, ht) \ 381 rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht)) 382 383 #define rht_dereference_bucket(p, tbl, hash) \ 384 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash)) 385 386 #define rht_dereference_bucket_rcu(p, tbl, hash) \ 387 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash)) 388 389 #define rht_entry(tpos, pos, member) \ 390 ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) 391 392 /** 393 * rht_for_each_continue - continue iterating over hash chain 394 * @pos: the &struct rhash_head to use as a loop cursor. 395 * @head: the previous &struct rhash_head to continue from 396 * @tbl: the &struct bucket_table 397 * @hash: the hash value / bucket index 398 */ 399 #define rht_for_each_continue(pos, head, tbl, hash) \ 400 for (pos = rht_dereference_bucket(head, tbl, hash); \ 401 !rht_is_a_nulls(pos); \ 402 pos = rht_dereference_bucket((pos)->next, tbl, hash)) 403 404 /** 405 * rht_for_each - iterate over hash chain 406 * @pos: the &struct rhash_head to use as a loop cursor. 407 * @tbl: the &struct bucket_table 408 * @hash: the hash value / bucket index 409 */ 410 #define rht_for_each(pos, tbl, hash) \ 411 rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash) 412 413 /** 414 * rht_for_each_entry_continue - continue iterating over hash chain 415 * @tpos: the type * to use as a loop cursor. 416 * @pos: the &struct rhash_head to use as a loop cursor. 417 * @head: the previous &struct rhash_head to continue from 418 * @tbl: the &struct bucket_table 419 * @hash: the hash value / bucket index 420 * @member: name of the &struct rhash_head within the hashable struct. 421 */ 422 #define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \ 423 for (pos = rht_dereference_bucket(head, tbl, hash); \ 424 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ 425 pos = rht_dereference_bucket((pos)->next, tbl, hash)) 426 427 /** 428 * rht_for_each_entry - iterate over hash chain of given type 429 * @tpos: the type * to use as a loop cursor. 430 * @pos: the &struct rhash_head to use as a loop cursor. 431 * @tbl: the &struct bucket_table 432 * @hash: the hash value / bucket index 433 * @member: name of the &struct rhash_head within the hashable struct. 434 */ 435 #define rht_for_each_entry(tpos, pos, tbl, hash, member) \ 436 rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \ 437 tbl, hash, member) 438 439 /** 440 * rht_for_each_entry_safe - safely iterate over hash chain of given type 441 * @tpos: the type * to use as a loop cursor. 442 * @pos: the &struct rhash_head to use as a loop cursor. 443 * @next: the &struct rhash_head to use as next in loop cursor. 444 * @tbl: the &struct bucket_table 445 * @hash: the hash value / bucket index 446 * @member: name of the &struct rhash_head within the hashable struct. 447 * 448 * This hash chain list-traversal primitive allows for the looped code to 449 * remove the loop cursor from the list. 450 */ 451 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ 452 for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \ 453 next = !rht_is_a_nulls(pos) ? \ 454 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ 455 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ 456 pos = next, \ 457 next = !rht_is_a_nulls(pos) ? \ 458 rht_dereference_bucket(pos->next, tbl, hash) : NULL) 459 460 /** 461 * rht_for_each_rcu_continue - continue iterating over rcu hash chain 462 * @pos: the &struct rhash_head to use as a loop cursor. 463 * @head: the previous &struct rhash_head to continue from 464 * @tbl: the &struct bucket_table 465 * @hash: the hash value / bucket index 466 * 467 * This hash chain list-traversal primitive may safely run concurrently with 468 * the _rcu mutation primitives such as rhashtable_insert() as long as the 469 * traversal is guarded by rcu_read_lock(). 470 */ 471 #define rht_for_each_rcu_continue(pos, head, tbl, hash) \ 472 for (({barrier(); }), \ 473 pos = rht_dereference_bucket_rcu(head, tbl, hash); \ 474 !rht_is_a_nulls(pos); \ 475 pos = rcu_dereference_raw(pos->next)) 476 477 /** 478 * rht_for_each_rcu - iterate over rcu hash chain 479 * @pos: the &struct rhash_head to use as a loop cursor. 480 * @tbl: the &struct bucket_table 481 * @hash: the hash value / bucket index 482 * 483 * This hash chain list-traversal primitive may safely run concurrently with 484 * the _rcu mutation primitives such as rhashtable_insert() as long as the 485 * traversal is guarded by rcu_read_lock(). 486 */ 487 #define rht_for_each_rcu(pos, tbl, hash) \ 488 rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash) 489 490 /** 491 * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain 492 * @tpos: the type * to use as a loop cursor. 493 * @pos: the &struct rhash_head to use as a loop cursor. 494 * @head: the previous &struct rhash_head to continue from 495 * @tbl: the &struct bucket_table 496 * @hash: the hash value / bucket index 497 * @member: name of the &struct rhash_head within the hashable struct. 498 * 499 * This hash chain list-traversal primitive may safely run concurrently with 500 * the _rcu mutation primitives such as rhashtable_insert() as long as the 501 * traversal is guarded by rcu_read_lock(). 502 */ 503 #define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \ 504 for (({barrier(); }), \ 505 pos = rht_dereference_bucket_rcu(head, tbl, hash); \ 506 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ 507 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash)) 508 509 /** 510 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type 511 * @tpos: the type * to use as a loop cursor. 512 * @pos: the &struct rhash_head to use as a loop cursor. 513 * @tbl: the &struct bucket_table 514 * @hash: the hash value / bucket index 515 * @member: name of the &struct rhash_head within the hashable struct. 516 * 517 * This hash chain list-traversal primitive may safely run concurrently with 518 * the _rcu mutation primitives such as rhashtable_insert() as long as the 519 * traversal is guarded by rcu_read_lock(). 520 */ 521 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ 522 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\ 523 tbl, hash, member) 524 525 /** 526 * rhl_for_each_rcu - iterate over rcu hash table list 527 * @pos: the &struct rlist_head to use as a loop cursor. 528 * @list: the head of the list 529 * 530 * This hash chain list-traversal primitive should be used on the 531 * list returned by rhltable_lookup. 532 */ 533 #define rhl_for_each_rcu(pos, list) \ 534 for (pos = list; pos; pos = rcu_dereference_raw(pos->next)) 535 536 /** 537 * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type 538 * @tpos: the type * to use as a loop cursor. 539 * @pos: the &struct rlist_head to use as a loop cursor. 540 * @list: the head of the list 541 * @member: name of the &struct rlist_head within the hashable struct. 542 * 543 * This hash chain list-traversal primitive should be used on the 544 * list returned by rhltable_lookup. 545 */ 546 #define rhl_for_each_entry_rcu(tpos, pos, list, member) \ 547 for (pos = list; pos && rht_entry(tpos, pos, member); \ 548 pos = rcu_dereference_raw(pos->next)) 549 550 static inline int rhashtable_compare(struct rhashtable_compare_arg *arg, 551 const void *obj) 552 { 553 struct rhashtable *ht = arg->ht; 554 const char *ptr = obj; 555 556 return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len); 557 } 558 559 /* Internal function, do not use. */ 560 static inline struct rhash_head *__rhashtable_lookup( 561 struct rhashtable *ht, const void *key, 562 const struct rhashtable_params params) 563 { 564 struct rhashtable_compare_arg arg = { 565 .ht = ht, 566 .key = key, 567 }; 568 const struct bucket_table *tbl; 569 struct rhash_head *he; 570 unsigned int hash; 571 572 tbl = rht_dereference_rcu(ht->tbl, ht); 573 restart: 574 hash = rht_key_hashfn(ht, tbl, key, params); 575 rht_for_each_rcu(he, tbl, hash) { 576 if (params.obj_cmpfn ? 577 params.obj_cmpfn(&arg, rht_obj(ht, he)) : 578 rhashtable_compare(&arg, rht_obj(ht, he))) 579 continue; 580 return he; 581 } 582 583 /* Ensure we see any new tables. */ 584 smp_rmb(); 585 586 tbl = rht_dereference_rcu(tbl->future_tbl, ht); 587 if (unlikely(tbl)) 588 goto restart; 589 590 return NULL; 591 } 592 593 /** 594 * rhashtable_lookup - search hash table 595 * @ht: hash table 596 * @key: the pointer to the key 597 * @params: hash table parameters 598 * 599 * Computes the hash value for the key and traverses the bucket chain looking 600 * for a entry with an identical key. The first matching entry is returned. 601 * 602 * This must only be called under the RCU read lock. 603 * 604 * Returns the first entry on which the compare function returned true. 605 */ 606 static inline void *rhashtable_lookup( 607 struct rhashtable *ht, const void *key, 608 const struct rhashtable_params params) 609 { 610 struct rhash_head *he = __rhashtable_lookup(ht, key, params); 611 612 return he ? rht_obj(ht, he) : NULL; 613 } 614 615 /** 616 * rhashtable_lookup_fast - search hash table, without RCU read lock 617 * @ht: hash table 618 * @key: the pointer to the key 619 * @params: hash table parameters 620 * 621 * Computes the hash value for the key and traverses the bucket chain looking 622 * for a entry with an identical key. The first matching entry is returned. 623 * 624 * Only use this function when you have other mechanisms guaranteeing 625 * that the object won't go away after the RCU read lock is released. 626 * 627 * Returns the first entry on which the compare function returned true. 628 */ 629 static inline void *rhashtable_lookup_fast( 630 struct rhashtable *ht, const void *key, 631 const struct rhashtable_params params) 632 { 633 void *obj; 634 635 rcu_read_lock(); 636 obj = rhashtable_lookup(ht, key, params); 637 rcu_read_unlock(); 638 639 return obj; 640 } 641 642 /** 643 * rhltable_lookup - search hash list table 644 * @hlt: hash table 645 * @key: the pointer to the key 646 * @params: hash table parameters 647 * 648 * Computes the hash value for the key and traverses the bucket chain looking 649 * for a entry with an identical key. All matching entries are returned 650 * in a list. 651 * 652 * This must only be called under the RCU read lock. 653 * 654 * Returns the list of entries that match the given key. 655 */ 656 static inline struct rhlist_head *rhltable_lookup( 657 struct rhltable *hlt, const void *key, 658 const struct rhashtable_params params) 659 { 660 struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params); 661 662 return he ? container_of(he, struct rhlist_head, rhead) : NULL; 663 } 664 665 /* Internal function, please use rhashtable_insert_fast() instead. This 666 * function returns the existing element already in hashes in there is a clash, 667 * otherwise it returns an error via ERR_PTR(). 668 */ 669 static inline void *__rhashtable_insert_fast( 670 struct rhashtable *ht, const void *key, struct rhash_head *obj, 671 const struct rhashtable_params params, bool rhlist) 672 { 673 struct rhashtable_compare_arg arg = { 674 .ht = ht, 675 .key = key, 676 }; 677 struct rhash_head __rcu **pprev; 678 struct bucket_table *tbl; 679 struct rhash_head *head; 680 spinlock_t *lock; 681 unsigned int hash; 682 int elasticity; 683 void *data; 684 685 rcu_read_lock(); 686 687 tbl = rht_dereference_rcu(ht->tbl, ht); 688 hash = rht_head_hashfn(ht, tbl, obj, params); 689 lock = rht_bucket_lock(tbl, hash); 690 spin_lock_bh(lock); 691 692 if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) { 693 slow_path: 694 spin_unlock_bh(lock); 695 rcu_read_unlock(); 696 return rhashtable_insert_slow(ht, key, obj); 697 } 698 699 elasticity = ht->elasticity; 700 pprev = &tbl->buckets[hash]; 701 rht_for_each(head, tbl, hash) { 702 struct rhlist_head *plist; 703 struct rhlist_head *list; 704 705 elasticity--; 706 if (!key || 707 (params.obj_cmpfn ? 708 params.obj_cmpfn(&arg, rht_obj(ht, head)) : 709 rhashtable_compare(&arg, rht_obj(ht, head)))) 710 continue; 711 712 data = rht_obj(ht, head); 713 714 if (!rhlist) 715 goto out; 716 717 718 list = container_of(obj, struct rhlist_head, rhead); 719 plist = container_of(head, struct rhlist_head, rhead); 720 721 RCU_INIT_POINTER(list->next, plist); 722 head = rht_dereference_bucket(head->next, tbl, hash); 723 RCU_INIT_POINTER(list->rhead.next, head); 724 rcu_assign_pointer(*pprev, obj); 725 726 goto good; 727 } 728 729 if (elasticity <= 0) 730 goto slow_path; 731 732 data = ERR_PTR(-E2BIG); 733 if (unlikely(rht_grow_above_max(ht, tbl))) 734 goto out; 735 736 if (unlikely(rht_grow_above_100(ht, tbl))) 737 goto slow_path; 738 739 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); 740 741 RCU_INIT_POINTER(obj->next, head); 742 if (rhlist) { 743 struct rhlist_head *list; 744 745 list = container_of(obj, struct rhlist_head, rhead); 746 RCU_INIT_POINTER(list->next, NULL); 747 } 748 749 rcu_assign_pointer(tbl->buckets[hash], obj); 750 751 atomic_inc(&ht->nelems); 752 if (rht_grow_above_75(ht, tbl)) 753 schedule_work(&ht->run_work); 754 755 good: 756 data = NULL; 757 758 out: 759 spin_unlock_bh(lock); 760 rcu_read_unlock(); 761 762 return data; 763 } 764 765 /** 766 * rhashtable_insert_fast - insert object into hash table 767 * @ht: hash table 768 * @obj: pointer to hash head inside object 769 * @params: hash table parameters 770 * 771 * Will take a per bucket spinlock to protect against mutual mutations 772 * on the same bucket. Multiple insertions may occur in parallel unless 773 * they map to the same bucket lock. 774 * 775 * It is safe to call this function from atomic context. 776 * 777 * Will trigger an automatic deferred table resizing if the size grows 778 * beyond the watermark indicated by grow_decision() which can be passed 779 * to rhashtable_init(). 780 */ 781 static inline int rhashtable_insert_fast( 782 struct rhashtable *ht, struct rhash_head *obj, 783 const struct rhashtable_params params) 784 { 785 void *ret; 786 787 ret = __rhashtable_insert_fast(ht, NULL, obj, params, false); 788 if (IS_ERR(ret)) 789 return PTR_ERR(ret); 790 791 return ret == NULL ? 0 : -EEXIST; 792 } 793 794 /** 795 * rhltable_insert_key - insert object into hash list table 796 * @hlt: hash list table 797 * @key: the pointer to the key 798 * @list: pointer to hash list head inside object 799 * @params: hash table parameters 800 * 801 * Will take a per bucket spinlock to protect against mutual mutations 802 * on the same bucket. Multiple insertions may occur in parallel unless 803 * they map to the same bucket lock. 804 * 805 * It is safe to call this function from atomic context. 806 * 807 * Will trigger an automatic deferred table resizing if the size grows 808 * beyond the watermark indicated by grow_decision() which can be passed 809 * to rhashtable_init(). 810 */ 811 static inline int rhltable_insert_key( 812 struct rhltable *hlt, const void *key, struct rhlist_head *list, 813 const struct rhashtable_params params) 814 { 815 return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead, 816 params, true)); 817 } 818 819 /** 820 * rhltable_insert - insert object into hash list table 821 * @hlt: hash list table 822 * @list: pointer to hash list head inside object 823 * @params: hash table parameters 824 * 825 * Will take a per bucket spinlock to protect against mutual mutations 826 * on the same bucket. Multiple insertions may occur in parallel unless 827 * they map to the same bucket lock. 828 * 829 * It is safe to call this function from atomic context. 830 * 831 * Will trigger an automatic deferred table resizing if the size grows 832 * beyond the watermark indicated by grow_decision() which can be passed 833 * to rhashtable_init(). 834 */ 835 static inline int rhltable_insert( 836 struct rhltable *hlt, struct rhlist_head *list, 837 const struct rhashtable_params params) 838 { 839 const char *key = rht_obj(&hlt->ht, &list->rhead); 840 841 key += params.key_offset; 842 843 return rhltable_insert_key(hlt, key, list, params); 844 } 845 846 /** 847 * rhashtable_lookup_insert_fast - lookup and insert object into hash table 848 * @ht: hash table 849 * @obj: pointer to hash head inside object 850 * @params: hash table parameters 851 * 852 * Locks down the bucket chain in both the old and new table if a resize 853 * is in progress to ensure that writers can't remove from the old table 854 * and can't insert to the new table during the atomic operation of search 855 * and insertion. Searches for duplicates in both the old and new table if 856 * a resize is in progress. 857 * 858 * This lookup function may only be used for fixed key hash table (key_len 859 * parameter set). It will BUG() if used inappropriately. 860 * 861 * It is safe to call this function from atomic context. 862 * 863 * Will trigger an automatic deferred table resizing if the size grows 864 * beyond the watermark indicated by grow_decision() which can be passed 865 * to rhashtable_init(). 866 */ 867 static inline int rhashtable_lookup_insert_fast( 868 struct rhashtable *ht, struct rhash_head *obj, 869 const struct rhashtable_params params) 870 { 871 const char *key = rht_obj(ht, obj); 872 void *ret; 873 874 BUG_ON(ht->p.obj_hashfn); 875 876 ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params, 877 false); 878 if (IS_ERR(ret)) 879 return PTR_ERR(ret); 880 881 return ret == NULL ? 0 : -EEXIST; 882 } 883 884 /** 885 * rhashtable_lookup_insert_key - search and insert object to hash table 886 * with explicit key 887 * @ht: hash table 888 * @key: key 889 * @obj: pointer to hash head inside object 890 * @params: hash table parameters 891 * 892 * Locks down the bucket chain in both the old and new table if a resize 893 * is in progress to ensure that writers can't remove from the old table 894 * and can't insert to the new table during the atomic operation of search 895 * and insertion. Searches for duplicates in both the old and new table if 896 * a resize is in progress. 897 * 898 * Lookups may occur in parallel with hashtable mutations and resizing. 899 * 900 * Will trigger an automatic deferred table resizing if the size grows 901 * beyond the watermark indicated by grow_decision() which can be passed 902 * to rhashtable_init(). 903 * 904 * Returns zero on success. 905 */ 906 static inline int rhashtable_lookup_insert_key( 907 struct rhashtable *ht, const void *key, struct rhash_head *obj, 908 const struct rhashtable_params params) 909 { 910 void *ret; 911 912 BUG_ON(!ht->p.obj_hashfn || !key); 913 914 ret = __rhashtable_insert_fast(ht, key, obj, params, false); 915 if (IS_ERR(ret)) 916 return PTR_ERR(ret); 917 918 return ret == NULL ? 0 : -EEXIST; 919 } 920 921 /** 922 * rhashtable_lookup_get_insert_key - lookup and insert object into hash table 923 * @ht: hash table 924 * @obj: pointer to hash head inside object 925 * @params: hash table parameters 926 * @data: pointer to element data already in hashes 927 * 928 * Just like rhashtable_lookup_insert_key(), but this function returns the 929 * object if it exists, NULL if it does not and the insertion was successful, 930 * and an ERR_PTR otherwise. 931 */ 932 static inline void *rhashtable_lookup_get_insert_key( 933 struct rhashtable *ht, const void *key, struct rhash_head *obj, 934 const struct rhashtable_params params) 935 { 936 BUG_ON(!ht->p.obj_hashfn || !key); 937 938 return __rhashtable_insert_fast(ht, key, obj, params, false); 939 } 940 941 /* Internal function, please use rhashtable_remove_fast() instead */ 942 static inline int __rhashtable_remove_fast_one( 943 struct rhashtable *ht, struct bucket_table *tbl, 944 struct rhash_head *obj, const struct rhashtable_params params, 945 bool rhlist) 946 { 947 struct rhash_head __rcu **pprev; 948 struct rhash_head *he; 949 spinlock_t * lock; 950 unsigned int hash; 951 int err = -ENOENT; 952 953 hash = rht_head_hashfn(ht, tbl, obj, params); 954 lock = rht_bucket_lock(tbl, hash); 955 956 spin_lock_bh(lock); 957 958 pprev = &tbl->buckets[hash]; 959 rht_for_each(he, tbl, hash) { 960 struct rhlist_head *list; 961 962 list = container_of(he, struct rhlist_head, rhead); 963 964 if (he != obj) { 965 struct rhlist_head __rcu **lpprev; 966 967 pprev = &he->next; 968 969 if (!rhlist) 970 continue; 971 972 do { 973 lpprev = &list->next; 974 list = rht_dereference_bucket(list->next, 975 tbl, hash); 976 } while (list && obj != &list->rhead); 977 978 if (!list) 979 continue; 980 981 list = rht_dereference_bucket(list->next, tbl, hash); 982 RCU_INIT_POINTER(*lpprev, list); 983 err = 0; 984 break; 985 } 986 987 obj = rht_dereference_bucket(obj->next, tbl, hash); 988 err = 1; 989 990 if (rhlist) { 991 list = rht_dereference_bucket(list->next, tbl, hash); 992 if (list) { 993 RCU_INIT_POINTER(list->rhead.next, obj); 994 obj = &list->rhead; 995 err = 0; 996 } 997 } 998 999 rcu_assign_pointer(*pprev, obj); 1000 break; 1001 } 1002 1003 spin_unlock_bh(lock); 1004 1005 if (err > 0) { 1006 atomic_dec(&ht->nelems); 1007 if (unlikely(ht->p.automatic_shrinking && 1008 rht_shrink_below_30(ht, tbl))) 1009 schedule_work(&ht->run_work); 1010 err = 0; 1011 } 1012 1013 return err; 1014 } 1015 1016 /* Internal function, please use rhashtable_remove_fast() instead */ 1017 static inline int __rhashtable_remove_fast( 1018 struct rhashtable *ht, struct rhash_head *obj, 1019 const struct rhashtable_params params, bool rhlist) 1020 { 1021 struct bucket_table *tbl; 1022 int err; 1023 1024 rcu_read_lock(); 1025 1026 tbl = rht_dereference_rcu(ht->tbl, ht); 1027 1028 /* Because we have already taken (and released) the bucket 1029 * lock in old_tbl, if we find that future_tbl is not yet 1030 * visible then that guarantees the entry to still be in 1031 * the old tbl if it exists. 1032 */ 1033 while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params, 1034 rhlist)) && 1035 (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) 1036 ; 1037 1038 rcu_read_unlock(); 1039 1040 return err; 1041 } 1042 1043 /** 1044 * rhashtable_remove_fast - remove object from hash table 1045 * @ht: hash table 1046 * @obj: pointer to hash head inside object 1047 * @params: hash table parameters 1048 * 1049 * Since the hash chain is single linked, the removal operation needs to 1050 * walk the bucket chain upon removal. The removal operation is thus 1051 * considerable slow if the hash table is not correctly sized. 1052 * 1053 * Will automatically shrink the table via rhashtable_expand() if the 1054 * shrink_decision function specified at rhashtable_init() returns true. 1055 * 1056 * Returns zero on success, -ENOENT if the entry could not be found. 1057 */ 1058 static inline int rhashtable_remove_fast( 1059 struct rhashtable *ht, struct rhash_head *obj, 1060 const struct rhashtable_params params) 1061 { 1062 return __rhashtable_remove_fast(ht, obj, params, false); 1063 } 1064 1065 /** 1066 * rhltable_remove - remove object from hash list table 1067 * @hlt: hash list table 1068 * @list: pointer to hash list head inside object 1069 * @params: hash table parameters 1070 * 1071 * Since the hash chain is single linked, the removal operation needs to 1072 * walk the bucket chain upon removal. The removal operation is thus 1073 * considerable slow if the hash table is not correctly sized. 1074 * 1075 * Will automatically shrink the table via rhashtable_expand() if the 1076 * shrink_decision function specified at rhashtable_init() returns true. 1077 * 1078 * Returns zero on success, -ENOENT if the entry could not be found. 1079 */ 1080 static inline int rhltable_remove( 1081 struct rhltable *hlt, struct rhlist_head *list, 1082 const struct rhashtable_params params) 1083 { 1084 return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true); 1085 } 1086 1087 /* Internal function, please use rhashtable_replace_fast() instead */ 1088 static inline int __rhashtable_replace_fast( 1089 struct rhashtable *ht, struct bucket_table *tbl, 1090 struct rhash_head *obj_old, struct rhash_head *obj_new, 1091 const struct rhashtable_params params) 1092 { 1093 struct rhash_head __rcu **pprev; 1094 struct rhash_head *he; 1095 spinlock_t *lock; 1096 unsigned int hash; 1097 int err = -ENOENT; 1098 1099 /* Minimally, the old and new objects must have same hash 1100 * (which should mean identifiers are the same). 1101 */ 1102 hash = rht_head_hashfn(ht, tbl, obj_old, params); 1103 if (hash != rht_head_hashfn(ht, tbl, obj_new, params)) 1104 return -EINVAL; 1105 1106 lock = rht_bucket_lock(tbl, hash); 1107 1108 spin_lock_bh(lock); 1109 1110 pprev = &tbl->buckets[hash]; 1111 rht_for_each(he, tbl, hash) { 1112 if (he != obj_old) { 1113 pprev = &he->next; 1114 continue; 1115 } 1116 1117 rcu_assign_pointer(obj_new->next, obj_old->next); 1118 rcu_assign_pointer(*pprev, obj_new); 1119 err = 0; 1120 break; 1121 } 1122 1123 spin_unlock_bh(lock); 1124 1125 return err; 1126 } 1127 1128 /** 1129 * rhashtable_replace_fast - replace an object in hash table 1130 * @ht: hash table 1131 * @obj_old: pointer to hash head inside object being replaced 1132 * @obj_new: pointer to hash head inside object which is new 1133 * @params: hash table parameters 1134 * 1135 * Replacing an object doesn't affect the number of elements in the hash table 1136 * or bucket, so we don't need to worry about shrinking or expanding the 1137 * table here. 1138 * 1139 * Returns zero on success, -ENOENT if the entry could not be found, 1140 * -EINVAL if hash is not the same for the old and new objects. 1141 */ 1142 static inline int rhashtable_replace_fast( 1143 struct rhashtable *ht, struct rhash_head *obj_old, 1144 struct rhash_head *obj_new, 1145 const struct rhashtable_params params) 1146 { 1147 struct bucket_table *tbl; 1148 int err; 1149 1150 rcu_read_lock(); 1151 1152 tbl = rht_dereference_rcu(ht->tbl, ht); 1153 1154 /* Because we have already taken (and released) the bucket 1155 * lock in old_tbl, if we find that future_tbl is not yet 1156 * visible then that guarantees the entry to still be in 1157 * the old tbl if it exists. 1158 */ 1159 while ((err = __rhashtable_replace_fast(ht, tbl, obj_old, 1160 obj_new, params)) && 1161 (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) 1162 ; 1163 1164 rcu_read_unlock(); 1165 1166 return err; 1167 } 1168 1169 /* Obsolete function, do not use in new code. */ 1170 static inline int rhashtable_walk_init(struct rhashtable *ht, 1171 struct rhashtable_iter *iter, gfp_t gfp) 1172 { 1173 rhashtable_walk_enter(ht, iter); 1174 return 0; 1175 } 1176 1177 /** 1178 * rhltable_walk_enter - Initialise an iterator 1179 * @hlt: Table to walk over 1180 * @iter: Hash table Iterator 1181 * 1182 * This function prepares a hash table walk. 1183 * 1184 * Note that if you restart a walk after rhashtable_walk_stop you 1185 * may see the same object twice. Also, you may miss objects if 1186 * there are removals in between rhashtable_walk_stop and the next 1187 * call to rhashtable_walk_start. 1188 * 1189 * For a completely stable walk you should construct your own data 1190 * structure outside the hash table. 1191 * 1192 * This function may sleep so you must not call it from interrupt 1193 * context or with spin locks held. 1194 * 1195 * You must call rhashtable_walk_exit after this function returns. 1196 */ 1197 static inline void rhltable_walk_enter(struct rhltable *hlt, 1198 struct rhashtable_iter *iter) 1199 { 1200 return rhashtable_walk_enter(&hlt->ht, iter); 1201 } 1202 1203 /** 1204 * rhltable_free_and_destroy - free elements and destroy hash list table 1205 * @hlt: the hash list table to destroy 1206 * @free_fn: callback to release resources of element 1207 * @arg: pointer passed to free_fn 1208 * 1209 * See documentation for rhashtable_free_and_destroy. 1210 */ 1211 static inline void rhltable_free_and_destroy(struct rhltable *hlt, 1212 void (*free_fn)(void *ptr, 1213 void *arg), 1214 void *arg) 1215 { 1216 return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg); 1217 } 1218 1219 static inline void rhltable_destroy(struct rhltable *hlt) 1220 { 1221 return rhltable_free_and_destroy(hlt, NULL, NULL); 1222 } 1223 1224 #endif /* _LINUX_RHASHTABLE_H */ 1225