1 /* 2 * Resizable, Scalable, Concurrent Hash Table 3 * 4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> 5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> 6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> 7 * 8 * Code partially derived from nft_hash 9 * Rewritten with rehash code from br_multicast plus single list 10 * pointer as suggested by Josh Triplett 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License version 2 as 14 * published by the Free Software Foundation. 15 */ 16 17 #include <linux/atomic.h> 18 #include <linux/kernel.h> 19 #include <linux/init.h> 20 #include <linux/log2.h> 21 #include <linux/sched.h> 22 #include <linux/rculist.h> 23 #include <linux/slab.h> 24 #include <linux/vmalloc.h> 25 #include <linux/mm.h> 26 #include <linux/jhash.h> 27 #include <linux/random.h> 28 #include <linux/rhashtable.h> 29 #include <linux/err.h> 30 #include <linux/export.h> 31 32 #define HASH_DEFAULT_SIZE 64UL 33 #define HASH_MIN_SIZE 4U 34 #define BUCKET_LOCKS_PER_CPU 32UL 35 36 union nested_table { 37 union nested_table __rcu *table; 38 struct rhash_head __rcu *bucket; 39 }; 40 41 static u32 head_hashfn(struct rhashtable *ht, 42 const struct bucket_table *tbl, 43 const struct rhash_head *he) 44 { 45 return rht_head_hashfn(ht, tbl, he, ht->p); 46 } 47 48 #ifdef CONFIG_PROVE_LOCKING 49 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) 50 51 int lockdep_rht_mutex_is_held(struct rhashtable *ht) 52 { 53 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; 54 } 55 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); 56 57 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) 58 { 59 spinlock_t *lock = rht_bucket_lock(tbl, hash); 60 61 return (debug_locks) ? lockdep_is_held(lock) : 1; 62 } 63 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); 64 #else 65 #define ASSERT_RHT_MUTEX(HT) 66 #endif 67 68 static void nested_table_free(union nested_table *ntbl, unsigned int size) 69 { 70 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); 71 const unsigned int len = 1 << shift; 72 unsigned int i; 73 74 ntbl = rcu_dereference_raw(ntbl->table); 75 if (!ntbl) 76 return; 77 78 if (size > len) { 79 size >>= shift; 80 for (i = 0; i < len; i++) 81 nested_table_free(ntbl + i, size); 82 } 83 84 kfree(ntbl); 85 } 86 87 static void nested_bucket_table_free(const struct bucket_table *tbl) 88 { 89 unsigned int size = tbl->size >> tbl->nest; 90 unsigned int len = 1 << tbl->nest; 91 union nested_table *ntbl; 92 unsigned int i; 93 94 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); 95 96 for (i = 0; i < len; i++) 97 nested_table_free(ntbl + i, size); 98 99 kfree(ntbl); 100 } 101 102 static void bucket_table_free(const struct bucket_table *tbl) 103 { 104 if (tbl->nest) 105 nested_bucket_table_free(tbl); 106 107 free_bucket_spinlocks(tbl->locks); 108 kvfree(tbl); 109 } 110 111 static void bucket_table_free_rcu(struct rcu_head *head) 112 { 113 bucket_table_free(container_of(head, struct bucket_table, rcu)); 114 } 115 116 static union nested_table *nested_table_alloc(struct rhashtable *ht, 117 union nested_table __rcu **prev, 118 bool leaf) 119 { 120 union nested_table *ntbl; 121 int i; 122 123 ntbl = rcu_dereference(*prev); 124 if (ntbl) 125 return ntbl; 126 127 ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC); 128 129 if (ntbl && leaf) { 130 for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++) 131 INIT_RHT_NULLS_HEAD(ntbl[i].bucket); 132 } 133 134 rcu_assign_pointer(*prev, ntbl); 135 136 return ntbl; 137 } 138 139 static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, 140 size_t nbuckets, 141 gfp_t gfp) 142 { 143 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); 144 struct bucket_table *tbl; 145 size_t size; 146 147 if (nbuckets < (1 << (shift + 1))) 148 return NULL; 149 150 size = sizeof(*tbl) + sizeof(tbl->buckets[0]); 151 152 tbl = kzalloc(size, gfp); 153 if (!tbl) 154 return NULL; 155 156 if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, 157 false)) { 158 kfree(tbl); 159 return NULL; 160 } 161 162 tbl->nest = (ilog2(nbuckets) - 1) % shift + 1; 163 164 return tbl; 165 } 166 167 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, 168 size_t nbuckets, 169 gfp_t gfp) 170 { 171 struct bucket_table *tbl = NULL; 172 size_t size, max_locks; 173 int i; 174 175 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); 176 tbl = kvzalloc(size, gfp); 177 178 size = nbuckets; 179 180 if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) { 181 tbl = nested_bucket_table_alloc(ht, nbuckets, gfp); 182 nbuckets = 0; 183 } 184 185 if (tbl == NULL) 186 return NULL; 187 188 tbl->size = size; 189 190 max_locks = size >> 1; 191 if (tbl->nest) 192 max_locks = min_t(size_t, max_locks, 1U << tbl->nest); 193 194 if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks, 195 ht->p.locks_mul, gfp) < 0) { 196 bucket_table_free(tbl); 197 return NULL; 198 } 199 200 INIT_LIST_HEAD(&tbl->walkers); 201 202 tbl->hash_rnd = get_random_u32(); 203 204 for (i = 0; i < nbuckets; i++) 205 INIT_RHT_NULLS_HEAD(tbl->buckets[i]); 206 207 return tbl; 208 } 209 210 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, 211 struct bucket_table *tbl) 212 { 213 struct bucket_table *new_tbl; 214 215 do { 216 new_tbl = tbl; 217 tbl = rht_dereference_rcu(tbl->future_tbl, ht); 218 } while (tbl); 219 220 return new_tbl; 221 } 222 223 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) 224 { 225 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 226 struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl); 227 struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash); 228 int err = -EAGAIN; 229 struct rhash_head *head, *next, *entry; 230 spinlock_t *new_bucket_lock; 231 unsigned int new_hash; 232 233 if (new_tbl->nest) 234 goto out; 235 236 err = -ENOENT; 237 238 rht_for_each(entry, old_tbl, old_hash) { 239 err = 0; 240 next = rht_dereference_bucket(entry->next, old_tbl, old_hash); 241 242 if (rht_is_a_nulls(next)) 243 break; 244 245 pprev = &entry->next; 246 } 247 248 if (err) 249 goto out; 250 251 new_hash = head_hashfn(ht, new_tbl, entry); 252 253 new_bucket_lock = rht_bucket_lock(new_tbl, new_hash); 254 255 spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING); 256 head = rht_dereference_bucket(new_tbl->buckets[new_hash], 257 new_tbl, new_hash); 258 259 RCU_INIT_POINTER(entry->next, head); 260 261 rcu_assign_pointer(new_tbl->buckets[new_hash], entry); 262 spin_unlock(new_bucket_lock); 263 264 rcu_assign_pointer(*pprev, next); 265 266 out: 267 return err; 268 } 269 270 static int rhashtable_rehash_chain(struct rhashtable *ht, 271 unsigned int old_hash) 272 { 273 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 274 spinlock_t *old_bucket_lock; 275 int err; 276 277 old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); 278 279 spin_lock_bh(old_bucket_lock); 280 while (!(err = rhashtable_rehash_one(ht, old_hash))) 281 ; 282 283 if (err == -ENOENT) { 284 old_tbl->rehash++; 285 err = 0; 286 } 287 spin_unlock_bh(old_bucket_lock); 288 289 return err; 290 } 291 292 static int rhashtable_rehash_attach(struct rhashtable *ht, 293 struct bucket_table *old_tbl, 294 struct bucket_table *new_tbl) 295 { 296 /* Make insertions go into the new, empty table right away. Deletions 297 * and lookups will be attempted in both tables until we synchronize. 298 * As cmpxchg() provides strong barriers, we do not need 299 * rcu_assign_pointer(). 300 */ 301 302 if (cmpxchg(&old_tbl->future_tbl, NULL, new_tbl) != NULL) 303 return -EEXIST; 304 305 return 0; 306 } 307 308 static int rhashtable_rehash_table(struct rhashtable *ht) 309 { 310 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 311 struct bucket_table *new_tbl; 312 struct rhashtable_walker *walker; 313 unsigned int old_hash; 314 int err; 315 316 new_tbl = rht_dereference(old_tbl->future_tbl, ht); 317 if (!new_tbl) 318 return 0; 319 320 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { 321 err = rhashtable_rehash_chain(ht, old_hash); 322 if (err) 323 return err; 324 cond_resched(); 325 } 326 327 /* Publish the new table pointer. */ 328 rcu_assign_pointer(ht->tbl, new_tbl); 329 330 spin_lock(&ht->lock); 331 list_for_each_entry(walker, &old_tbl->walkers, list) 332 walker->tbl = NULL; 333 spin_unlock(&ht->lock); 334 335 /* Wait for readers. All new readers will see the new 336 * table, and thus no references to the old table will 337 * remain. 338 */ 339 call_rcu(&old_tbl->rcu, bucket_table_free_rcu); 340 341 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; 342 } 343 344 static int rhashtable_rehash_alloc(struct rhashtable *ht, 345 struct bucket_table *old_tbl, 346 unsigned int size) 347 { 348 struct bucket_table *new_tbl; 349 int err; 350 351 ASSERT_RHT_MUTEX(ht); 352 353 new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); 354 if (new_tbl == NULL) 355 return -ENOMEM; 356 357 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); 358 if (err) 359 bucket_table_free(new_tbl); 360 361 return err; 362 } 363 364 /** 365 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups 366 * @ht: the hash table to shrink 367 * 368 * This function shrinks the hash table to fit, i.e., the smallest 369 * size would not cause it to expand right away automatically. 370 * 371 * The caller must ensure that no concurrent resizing occurs by holding 372 * ht->mutex. 373 * 374 * The caller must ensure that no concurrent table mutations take place. 375 * It is however valid to have concurrent lookups if they are RCU protected. 376 * 377 * It is valid to have concurrent insertions and deletions protected by per 378 * bucket locks or concurrent RCU protected lookups and traversals. 379 */ 380 static int rhashtable_shrink(struct rhashtable *ht) 381 { 382 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 383 unsigned int nelems = atomic_read(&ht->nelems); 384 unsigned int size = 0; 385 386 if (nelems) 387 size = roundup_pow_of_two(nelems * 3 / 2); 388 if (size < ht->p.min_size) 389 size = ht->p.min_size; 390 391 if (old_tbl->size <= size) 392 return 0; 393 394 if (rht_dereference(old_tbl->future_tbl, ht)) 395 return -EEXIST; 396 397 return rhashtable_rehash_alloc(ht, old_tbl, size); 398 } 399 400 static void rht_deferred_worker(struct work_struct *work) 401 { 402 struct rhashtable *ht; 403 struct bucket_table *tbl; 404 int err = 0; 405 406 ht = container_of(work, struct rhashtable, run_work); 407 mutex_lock(&ht->mutex); 408 409 tbl = rht_dereference(ht->tbl, ht); 410 tbl = rhashtable_last_table(ht, tbl); 411 412 if (rht_grow_above_75(ht, tbl)) 413 err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2); 414 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) 415 err = rhashtable_shrink(ht); 416 else if (tbl->nest) 417 err = rhashtable_rehash_alloc(ht, tbl, tbl->size); 418 419 if (!err || err == -EEXIST) { 420 int nerr; 421 422 nerr = rhashtable_rehash_table(ht); 423 err = err ?: nerr; 424 } 425 426 mutex_unlock(&ht->mutex); 427 428 if (err) 429 schedule_work(&ht->run_work); 430 } 431 432 static int rhashtable_insert_rehash(struct rhashtable *ht, 433 struct bucket_table *tbl) 434 { 435 struct bucket_table *old_tbl; 436 struct bucket_table *new_tbl; 437 unsigned int size; 438 int err; 439 440 old_tbl = rht_dereference_rcu(ht->tbl, ht); 441 442 size = tbl->size; 443 444 err = -EBUSY; 445 446 if (rht_grow_above_75(ht, tbl)) 447 size *= 2; 448 /* Do not schedule more than one rehash */ 449 else if (old_tbl != tbl) 450 goto fail; 451 452 err = -ENOMEM; 453 454 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN); 455 if (new_tbl == NULL) 456 goto fail; 457 458 err = rhashtable_rehash_attach(ht, tbl, new_tbl); 459 if (err) { 460 bucket_table_free(new_tbl); 461 if (err == -EEXIST) 462 err = 0; 463 } else 464 schedule_work(&ht->run_work); 465 466 return err; 467 468 fail: 469 /* Do not fail the insert if someone else did a rehash. */ 470 if (likely(rcu_access_pointer(tbl->future_tbl))) 471 return 0; 472 473 /* Schedule async rehash to retry allocation in process context. */ 474 if (err == -ENOMEM) 475 schedule_work(&ht->run_work); 476 477 return err; 478 } 479 480 static void *rhashtable_lookup_one(struct rhashtable *ht, 481 struct bucket_table *tbl, unsigned int hash, 482 const void *key, struct rhash_head *obj) 483 { 484 struct rhashtable_compare_arg arg = { 485 .ht = ht, 486 .key = key, 487 }; 488 struct rhash_head __rcu **pprev; 489 struct rhash_head *head; 490 int elasticity; 491 492 elasticity = RHT_ELASTICITY; 493 pprev = rht_bucket_var(tbl, hash); 494 rht_for_each_continue(head, *pprev, tbl, hash) { 495 struct rhlist_head *list; 496 struct rhlist_head *plist; 497 498 elasticity--; 499 if (!key || 500 (ht->p.obj_cmpfn ? 501 ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : 502 rhashtable_compare(&arg, rht_obj(ht, head)))) { 503 pprev = &head->next; 504 continue; 505 } 506 507 if (!ht->rhlist) 508 return rht_obj(ht, head); 509 510 list = container_of(obj, struct rhlist_head, rhead); 511 plist = container_of(head, struct rhlist_head, rhead); 512 513 RCU_INIT_POINTER(list->next, plist); 514 head = rht_dereference_bucket(head->next, tbl, hash); 515 RCU_INIT_POINTER(list->rhead.next, head); 516 rcu_assign_pointer(*pprev, obj); 517 518 return NULL; 519 } 520 521 if (elasticity <= 0) 522 return ERR_PTR(-EAGAIN); 523 524 return ERR_PTR(-ENOENT); 525 } 526 527 static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, 528 struct bucket_table *tbl, 529 unsigned int hash, 530 struct rhash_head *obj, 531 void *data) 532 { 533 struct rhash_head __rcu **pprev; 534 struct bucket_table *new_tbl; 535 struct rhash_head *head; 536 537 if (!IS_ERR_OR_NULL(data)) 538 return ERR_PTR(-EEXIST); 539 540 if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT) 541 return ERR_CAST(data); 542 543 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); 544 if (new_tbl) 545 return new_tbl; 546 547 if (PTR_ERR(data) != -ENOENT) 548 return ERR_CAST(data); 549 550 if (unlikely(rht_grow_above_max(ht, tbl))) 551 return ERR_PTR(-E2BIG); 552 553 if (unlikely(rht_grow_above_100(ht, tbl))) 554 return ERR_PTR(-EAGAIN); 555 556 pprev = rht_bucket_insert(ht, tbl, hash); 557 if (!pprev) 558 return ERR_PTR(-ENOMEM); 559 560 head = rht_dereference_bucket(*pprev, tbl, hash); 561 562 RCU_INIT_POINTER(obj->next, head); 563 if (ht->rhlist) { 564 struct rhlist_head *list; 565 566 list = container_of(obj, struct rhlist_head, rhead); 567 RCU_INIT_POINTER(list->next, NULL); 568 } 569 570 rcu_assign_pointer(*pprev, obj); 571 572 atomic_inc(&ht->nelems); 573 if (rht_grow_above_75(ht, tbl)) 574 schedule_work(&ht->run_work); 575 576 return NULL; 577 } 578 579 static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, 580 struct rhash_head *obj) 581 { 582 struct bucket_table *new_tbl; 583 struct bucket_table *tbl; 584 unsigned int hash; 585 spinlock_t *lock; 586 void *data; 587 588 tbl = rcu_dereference(ht->tbl); 589 590 /* All insertions must grab the oldest table containing 591 * the hashed bucket that is yet to be rehashed. 592 */ 593 for (;;) { 594 hash = rht_head_hashfn(ht, tbl, obj, ht->p); 595 lock = rht_bucket_lock(tbl, hash); 596 spin_lock_bh(lock); 597 598 if (tbl->rehash <= hash) 599 break; 600 601 spin_unlock_bh(lock); 602 tbl = rht_dereference_rcu(tbl->future_tbl, ht); 603 } 604 605 data = rhashtable_lookup_one(ht, tbl, hash, key, obj); 606 new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data); 607 if (PTR_ERR(new_tbl) != -EEXIST) 608 data = ERR_CAST(new_tbl); 609 610 while (!IS_ERR_OR_NULL(new_tbl)) { 611 tbl = new_tbl; 612 hash = rht_head_hashfn(ht, tbl, obj, ht->p); 613 spin_lock_nested(rht_bucket_lock(tbl, hash), 614 SINGLE_DEPTH_NESTING); 615 616 data = rhashtable_lookup_one(ht, tbl, hash, key, obj); 617 new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data); 618 if (PTR_ERR(new_tbl) != -EEXIST) 619 data = ERR_CAST(new_tbl); 620 621 spin_unlock(rht_bucket_lock(tbl, hash)); 622 } 623 624 spin_unlock_bh(lock); 625 626 if (PTR_ERR(data) == -EAGAIN) 627 data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?: 628 -EAGAIN); 629 630 return data; 631 } 632 633 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, 634 struct rhash_head *obj) 635 { 636 void *data; 637 638 do { 639 rcu_read_lock(); 640 data = rhashtable_try_insert(ht, key, obj); 641 rcu_read_unlock(); 642 } while (PTR_ERR(data) == -EAGAIN); 643 644 return data; 645 } 646 EXPORT_SYMBOL_GPL(rhashtable_insert_slow); 647 648 /** 649 * rhashtable_walk_enter - Initialise an iterator 650 * @ht: Table to walk over 651 * @iter: Hash table Iterator 652 * 653 * This function prepares a hash table walk. 654 * 655 * Note that if you restart a walk after rhashtable_walk_stop you 656 * may see the same object twice. Also, you may miss objects if 657 * there are removals in between rhashtable_walk_stop and the next 658 * call to rhashtable_walk_start. 659 * 660 * For a completely stable walk you should construct your own data 661 * structure outside the hash table. 662 * 663 * This function may be called from any process context, including 664 * non-preemptable context, but cannot be called from softirq or 665 * hardirq context. 666 * 667 * You must call rhashtable_walk_exit after this function returns. 668 */ 669 void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter) 670 { 671 iter->ht = ht; 672 iter->p = NULL; 673 iter->slot = 0; 674 iter->skip = 0; 675 iter->end_of_table = 0; 676 677 spin_lock(&ht->lock); 678 iter->walker.tbl = 679 rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); 680 list_add(&iter->walker.list, &iter->walker.tbl->walkers); 681 spin_unlock(&ht->lock); 682 } 683 EXPORT_SYMBOL_GPL(rhashtable_walk_enter); 684 685 /** 686 * rhashtable_walk_exit - Free an iterator 687 * @iter: Hash table Iterator 688 * 689 * This function frees resources allocated by rhashtable_walk_enter. 690 */ 691 void rhashtable_walk_exit(struct rhashtable_iter *iter) 692 { 693 spin_lock(&iter->ht->lock); 694 if (iter->walker.tbl) 695 list_del(&iter->walker.list); 696 spin_unlock(&iter->ht->lock); 697 } 698 EXPORT_SYMBOL_GPL(rhashtable_walk_exit); 699 700 /** 701 * rhashtable_walk_start_check - Start a hash table walk 702 * @iter: Hash table iterator 703 * 704 * Start a hash table walk at the current iterator position. Note that we take 705 * the RCU lock in all cases including when we return an error. So you must 706 * always call rhashtable_walk_stop to clean up. 707 * 708 * Returns zero if successful. 709 * 710 * Returns -EAGAIN if resize event occured. Note that the iterator 711 * will rewind back to the beginning and you may use it immediately 712 * by calling rhashtable_walk_next. 713 * 714 * rhashtable_walk_start is defined as an inline variant that returns 715 * void. This is preferred in cases where the caller would ignore 716 * resize events and always continue. 717 */ 718 int rhashtable_walk_start_check(struct rhashtable_iter *iter) 719 __acquires(RCU) 720 { 721 struct rhashtable *ht = iter->ht; 722 bool rhlist = ht->rhlist; 723 724 rcu_read_lock(); 725 726 spin_lock(&ht->lock); 727 if (iter->walker.tbl) 728 list_del(&iter->walker.list); 729 spin_unlock(&ht->lock); 730 731 if (iter->end_of_table) 732 return 0; 733 if (!iter->walker.tbl) { 734 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); 735 iter->slot = 0; 736 iter->skip = 0; 737 return -EAGAIN; 738 } 739 740 if (iter->p && !rhlist) { 741 /* 742 * We need to validate that 'p' is still in the table, and 743 * if so, update 'skip' 744 */ 745 struct rhash_head *p; 746 int skip = 0; 747 rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { 748 skip++; 749 if (p == iter->p) { 750 iter->skip = skip; 751 goto found; 752 } 753 } 754 iter->p = NULL; 755 } else if (iter->p && rhlist) { 756 /* Need to validate that 'list' is still in the table, and 757 * if so, update 'skip' and 'p'. 758 */ 759 struct rhash_head *p; 760 struct rhlist_head *list; 761 int skip = 0; 762 rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { 763 for (list = container_of(p, struct rhlist_head, rhead); 764 list; 765 list = rcu_dereference(list->next)) { 766 skip++; 767 if (list == iter->list) { 768 iter->p = p; 769 iter->skip = skip; 770 goto found; 771 } 772 } 773 } 774 iter->p = NULL; 775 } 776 found: 777 return 0; 778 } 779 EXPORT_SYMBOL_GPL(rhashtable_walk_start_check); 780 781 /** 782 * __rhashtable_walk_find_next - Find the next element in a table (or the first 783 * one in case of a new walk). 784 * 785 * @iter: Hash table iterator 786 * 787 * Returns the found object or NULL when the end of the table is reached. 788 * 789 * Returns -EAGAIN if resize event occurred. 790 */ 791 static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter) 792 { 793 struct bucket_table *tbl = iter->walker.tbl; 794 struct rhlist_head *list = iter->list; 795 struct rhashtable *ht = iter->ht; 796 struct rhash_head *p = iter->p; 797 bool rhlist = ht->rhlist; 798 799 if (!tbl) 800 return NULL; 801 802 for (; iter->slot < tbl->size; iter->slot++) { 803 int skip = iter->skip; 804 805 rht_for_each_rcu(p, tbl, iter->slot) { 806 if (rhlist) { 807 list = container_of(p, struct rhlist_head, 808 rhead); 809 do { 810 if (!skip) 811 goto next; 812 skip--; 813 list = rcu_dereference(list->next); 814 } while (list); 815 816 continue; 817 } 818 if (!skip) 819 break; 820 skip--; 821 } 822 823 next: 824 if (!rht_is_a_nulls(p)) { 825 iter->skip++; 826 iter->p = p; 827 iter->list = list; 828 return rht_obj(ht, rhlist ? &list->rhead : p); 829 } 830 831 iter->skip = 0; 832 } 833 834 iter->p = NULL; 835 836 /* Ensure we see any new tables. */ 837 smp_rmb(); 838 839 iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht); 840 if (iter->walker.tbl) { 841 iter->slot = 0; 842 iter->skip = 0; 843 return ERR_PTR(-EAGAIN); 844 } else { 845 iter->end_of_table = true; 846 } 847 848 return NULL; 849 } 850 851 /** 852 * rhashtable_walk_next - Return the next object and advance the iterator 853 * @iter: Hash table iterator 854 * 855 * Note that you must call rhashtable_walk_stop when you are finished 856 * with the walk. 857 * 858 * Returns the next object or NULL when the end of the table is reached. 859 * 860 * Returns -EAGAIN if resize event occurred. Note that the iterator 861 * will rewind back to the beginning and you may continue to use it. 862 */ 863 void *rhashtable_walk_next(struct rhashtable_iter *iter) 864 { 865 struct rhlist_head *list = iter->list; 866 struct rhashtable *ht = iter->ht; 867 struct rhash_head *p = iter->p; 868 bool rhlist = ht->rhlist; 869 870 if (p) { 871 if (!rhlist || !(list = rcu_dereference(list->next))) { 872 p = rcu_dereference(p->next); 873 list = container_of(p, struct rhlist_head, rhead); 874 } 875 if (!rht_is_a_nulls(p)) { 876 iter->skip++; 877 iter->p = p; 878 iter->list = list; 879 return rht_obj(ht, rhlist ? &list->rhead : p); 880 } 881 882 /* At the end of this slot, switch to next one and then find 883 * next entry from that point. 884 */ 885 iter->skip = 0; 886 iter->slot++; 887 } 888 889 return __rhashtable_walk_find_next(iter); 890 } 891 EXPORT_SYMBOL_GPL(rhashtable_walk_next); 892 893 /** 894 * rhashtable_walk_peek - Return the next object but don't advance the iterator 895 * @iter: Hash table iterator 896 * 897 * Returns the next object or NULL when the end of the table is reached. 898 * 899 * Returns -EAGAIN if resize event occurred. Note that the iterator 900 * will rewind back to the beginning and you may continue to use it. 901 */ 902 void *rhashtable_walk_peek(struct rhashtable_iter *iter) 903 { 904 struct rhlist_head *list = iter->list; 905 struct rhashtable *ht = iter->ht; 906 struct rhash_head *p = iter->p; 907 908 if (p) 909 return rht_obj(ht, ht->rhlist ? &list->rhead : p); 910 911 /* No object found in current iter, find next one in the table. */ 912 913 if (iter->skip) { 914 /* A nonzero skip value points to the next entry in the table 915 * beyond that last one that was found. Decrement skip so 916 * we find the current value. __rhashtable_walk_find_next 917 * will restore the original value of skip assuming that 918 * the table hasn't changed. 919 */ 920 iter->skip--; 921 } 922 923 return __rhashtable_walk_find_next(iter); 924 } 925 EXPORT_SYMBOL_GPL(rhashtable_walk_peek); 926 927 /** 928 * rhashtable_walk_stop - Finish a hash table walk 929 * @iter: Hash table iterator 930 * 931 * Finish a hash table walk. Does not reset the iterator to the start of the 932 * hash table. 933 */ 934 void rhashtable_walk_stop(struct rhashtable_iter *iter) 935 __releases(RCU) 936 { 937 struct rhashtable *ht; 938 struct bucket_table *tbl = iter->walker.tbl; 939 940 if (!tbl) 941 goto out; 942 943 ht = iter->ht; 944 945 spin_lock(&ht->lock); 946 if (tbl->rehash < tbl->size) 947 list_add(&iter->walker.list, &tbl->walkers); 948 else 949 iter->walker.tbl = NULL; 950 spin_unlock(&ht->lock); 951 952 out: 953 rcu_read_unlock(); 954 } 955 EXPORT_SYMBOL_GPL(rhashtable_walk_stop); 956 957 static size_t rounded_hashtable_size(const struct rhashtable_params *params) 958 { 959 size_t retsize; 960 961 if (params->nelem_hint) 962 retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3), 963 (unsigned long)params->min_size); 964 else 965 retsize = max(HASH_DEFAULT_SIZE, 966 (unsigned long)params->min_size); 967 968 return retsize; 969 } 970 971 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) 972 { 973 return jhash2(key, length, seed); 974 } 975 976 /** 977 * rhashtable_init - initialize a new hash table 978 * @ht: hash table to be initialized 979 * @params: configuration parameters 980 * 981 * Initializes a new hash table based on the provided configuration 982 * parameters. A table can be configured either with a variable or 983 * fixed length key: 984 * 985 * Configuration Example 1: Fixed length keys 986 * struct test_obj { 987 * int key; 988 * void * my_member; 989 * struct rhash_head node; 990 * }; 991 * 992 * struct rhashtable_params params = { 993 * .head_offset = offsetof(struct test_obj, node), 994 * .key_offset = offsetof(struct test_obj, key), 995 * .key_len = sizeof(int), 996 * .hashfn = jhash, 997 * }; 998 * 999 * Configuration Example 2: Variable length keys 1000 * struct test_obj { 1001 * [...] 1002 * struct rhash_head node; 1003 * }; 1004 * 1005 * u32 my_hash_fn(const void *data, u32 len, u32 seed) 1006 * { 1007 * struct test_obj *obj = data; 1008 * 1009 * return [... hash ...]; 1010 * } 1011 * 1012 * struct rhashtable_params params = { 1013 * .head_offset = offsetof(struct test_obj, node), 1014 * .hashfn = jhash, 1015 * .obj_hashfn = my_hash_fn, 1016 * }; 1017 */ 1018 int rhashtable_init(struct rhashtable *ht, 1019 const struct rhashtable_params *params) 1020 { 1021 struct bucket_table *tbl; 1022 size_t size; 1023 1024 if ((!params->key_len && !params->obj_hashfn) || 1025 (params->obj_hashfn && !params->obj_cmpfn)) 1026 return -EINVAL; 1027 1028 memset(ht, 0, sizeof(*ht)); 1029 mutex_init(&ht->mutex); 1030 spin_lock_init(&ht->lock); 1031 memcpy(&ht->p, params, sizeof(*params)); 1032 1033 if (params->min_size) 1034 ht->p.min_size = roundup_pow_of_two(params->min_size); 1035 1036 /* Cap total entries at 2^31 to avoid nelems overflow. */ 1037 ht->max_elems = 1u << 31; 1038 1039 if (params->max_size) { 1040 ht->p.max_size = rounddown_pow_of_two(params->max_size); 1041 if (ht->p.max_size < ht->max_elems / 2) 1042 ht->max_elems = ht->p.max_size * 2; 1043 } 1044 1045 ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); 1046 1047 size = rounded_hashtable_size(&ht->p); 1048 1049 if (params->locks_mul) 1050 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); 1051 else 1052 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; 1053 1054 ht->key_len = ht->p.key_len; 1055 if (!params->hashfn) { 1056 ht->p.hashfn = jhash; 1057 1058 if (!(ht->key_len & (sizeof(u32) - 1))) { 1059 ht->key_len /= sizeof(u32); 1060 ht->p.hashfn = rhashtable_jhash2; 1061 } 1062 } 1063 1064 /* 1065 * This is api initialization and thus we need to guarantee the 1066 * initial rhashtable allocation. Upon failure, retry with the 1067 * smallest possible size with __GFP_NOFAIL semantics. 1068 */ 1069 tbl = bucket_table_alloc(ht, size, GFP_KERNEL); 1070 if (unlikely(tbl == NULL)) { 1071 size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); 1072 tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL); 1073 } 1074 1075 atomic_set(&ht->nelems, 0); 1076 1077 RCU_INIT_POINTER(ht->tbl, tbl); 1078 1079 INIT_WORK(&ht->run_work, rht_deferred_worker); 1080 1081 return 0; 1082 } 1083 EXPORT_SYMBOL_GPL(rhashtable_init); 1084 1085 /** 1086 * rhltable_init - initialize a new hash list table 1087 * @hlt: hash list table to be initialized 1088 * @params: configuration parameters 1089 * 1090 * Initializes a new hash list table. 1091 * 1092 * See documentation for rhashtable_init. 1093 */ 1094 int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params) 1095 { 1096 int err; 1097 1098 err = rhashtable_init(&hlt->ht, params); 1099 hlt->ht.rhlist = true; 1100 return err; 1101 } 1102 EXPORT_SYMBOL_GPL(rhltable_init); 1103 1104 static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj, 1105 void (*free_fn)(void *ptr, void *arg), 1106 void *arg) 1107 { 1108 struct rhlist_head *list; 1109 1110 if (!ht->rhlist) { 1111 free_fn(rht_obj(ht, obj), arg); 1112 return; 1113 } 1114 1115 list = container_of(obj, struct rhlist_head, rhead); 1116 do { 1117 obj = &list->rhead; 1118 list = rht_dereference(list->next, ht); 1119 free_fn(rht_obj(ht, obj), arg); 1120 } while (list); 1121 } 1122 1123 /** 1124 * rhashtable_free_and_destroy - free elements and destroy hash table 1125 * @ht: the hash table to destroy 1126 * @free_fn: callback to release resources of element 1127 * @arg: pointer passed to free_fn 1128 * 1129 * Stops an eventual async resize. If defined, invokes free_fn for each 1130 * element to releasal resources. Please note that RCU protected 1131 * readers may still be accessing the elements. Releasing of resources 1132 * must occur in a compatible manner. Then frees the bucket array. 1133 * 1134 * This function will eventually sleep to wait for an async resize 1135 * to complete. The caller is responsible that no further write operations 1136 * occurs in parallel. 1137 */ 1138 void rhashtable_free_and_destroy(struct rhashtable *ht, 1139 void (*free_fn)(void *ptr, void *arg), 1140 void *arg) 1141 { 1142 struct bucket_table *tbl, *next_tbl; 1143 unsigned int i; 1144 1145 cancel_work_sync(&ht->run_work); 1146 1147 mutex_lock(&ht->mutex); 1148 tbl = rht_dereference(ht->tbl, ht); 1149 restart: 1150 if (free_fn) { 1151 for (i = 0; i < tbl->size; i++) { 1152 struct rhash_head *pos, *next; 1153 1154 cond_resched(); 1155 for (pos = rht_dereference(*rht_bucket(tbl, i), ht), 1156 next = !rht_is_a_nulls(pos) ? 1157 rht_dereference(pos->next, ht) : NULL; 1158 !rht_is_a_nulls(pos); 1159 pos = next, 1160 next = !rht_is_a_nulls(pos) ? 1161 rht_dereference(pos->next, ht) : NULL) 1162 rhashtable_free_one(ht, pos, free_fn, arg); 1163 } 1164 } 1165 1166 next_tbl = rht_dereference(tbl->future_tbl, ht); 1167 bucket_table_free(tbl); 1168 if (next_tbl) { 1169 tbl = next_tbl; 1170 goto restart; 1171 } 1172 mutex_unlock(&ht->mutex); 1173 } 1174 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); 1175 1176 void rhashtable_destroy(struct rhashtable *ht) 1177 { 1178 return rhashtable_free_and_destroy(ht, NULL, NULL); 1179 } 1180 EXPORT_SYMBOL_GPL(rhashtable_destroy); 1181 1182 struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, 1183 unsigned int hash) 1184 { 1185 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); 1186 static struct rhash_head __rcu *rhnull; 1187 unsigned int index = hash & ((1 << tbl->nest) - 1); 1188 unsigned int size = tbl->size >> tbl->nest; 1189 unsigned int subhash = hash; 1190 union nested_table *ntbl; 1191 1192 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); 1193 ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); 1194 subhash >>= tbl->nest; 1195 1196 while (ntbl && size > (1 << shift)) { 1197 index = subhash & ((1 << shift) - 1); 1198 ntbl = rht_dereference_bucket_rcu(ntbl[index].table, 1199 tbl, hash); 1200 size >>= shift; 1201 subhash >>= shift; 1202 } 1203 1204 if (!ntbl) { 1205 if (!rhnull) 1206 INIT_RHT_NULLS_HEAD(rhnull); 1207 return &rhnull; 1208 } 1209 1210 return &ntbl[subhash].bucket; 1211 1212 } 1213 EXPORT_SYMBOL_GPL(rht_bucket_nested); 1214 1215 struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, 1216 struct bucket_table *tbl, 1217 unsigned int hash) 1218 { 1219 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); 1220 unsigned int index = hash & ((1 << tbl->nest) - 1); 1221 unsigned int size = tbl->size >> tbl->nest; 1222 union nested_table *ntbl; 1223 1224 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); 1225 hash >>= tbl->nest; 1226 ntbl = nested_table_alloc(ht, &ntbl[index].table, 1227 size <= (1 << shift)); 1228 1229 while (ntbl && size > (1 << shift)) { 1230 index = hash & ((1 << shift) - 1); 1231 size >>= shift; 1232 hash >>= shift; 1233 ntbl = nested_table_alloc(ht, &ntbl[index].table, 1234 size <= (1 << shift)); 1235 } 1236 1237 if (!ntbl) 1238 return NULL; 1239 1240 return &ntbl[hash].bucket; 1241 1242 } 1243 EXPORT_SYMBOL_GPL(rht_bucket_nested_insert); 1244