1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2016 Facebook 4 * Copyright (C) 2013-2014 Jens Axboe 5 */ 6 7 #include <linux/sched.h> 8 #include <linux/random.h> 9 #include <linux/sbitmap.h> 10 #include <linux/seq_file.h> 11 12 static int init_alloc_hint(struct sbitmap *sb, gfp_t flags) 13 { 14 unsigned depth = sb->depth; 15 16 sb->alloc_hint = alloc_percpu_gfp(unsigned int, flags); 17 if (!sb->alloc_hint) 18 return -ENOMEM; 19 20 if (depth && !sb->round_robin) { 21 int i; 22 23 for_each_possible_cpu(i) 24 *per_cpu_ptr(sb->alloc_hint, i) = get_random_u32_below(depth); 25 } 26 return 0; 27 } 28 29 static inline unsigned update_alloc_hint_before_get(struct sbitmap *sb, 30 unsigned int depth) 31 { 32 unsigned hint; 33 34 hint = this_cpu_read(*sb->alloc_hint); 35 if (unlikely(hint >= depth)) { 36 hint = depth ? get_random_u32_below(depth) : 0; 37 this_cpu_write(*sb->alloc_hint, hint); 38 } 39 40 return hint; 41 } 42 43 static inline void update_alloc_hint_after_get(struct sbitmap *sb, 44 unsigned int depth, 45 unsigned int hint, 46 unsigned int nr) 47 { 48 if (nr == -1) { 49 /* If the map is full, a hint won't do us much good. */ 50 this_cpu_write(*sb->alloc_hint, 0); 51 } else if (nr == hint || unlikely(sb->round_robin)) { 52 /* Only update the hint if we used it. */ 53 hint = nr + 1; 54 if (hint >= depth - 1) 55 hint = 0; 56 this_cpu_write(*sb->alloc_hint, hint); 57 } 58 } 59 60 /* 61 * See if we have deferred clears that we can batch move 62 */ 63 static inline bool sbitmap_deferred_clear(struct sbitmap_word *map) 64 { 65 unsigned long mask; 66 67 if (!READ_ONCE(map->cleared)) 68 return false; 69 70 /* 71 * First get a stable cleared mask, setting the old mask to 0. 72 */ 73 mask = xchg(&map->cleared, 0); 74 75 /* 76 * Now clear the masked bits in our free word 77 */ 78 atomic_long_andnot(mask, (atomic_long_t *)&map->word); 79 BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word)); 80 return true; 81 } 82 83 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, 84 gfp_t flags, int node, bool round_robin, 85 bool alloc_hint) 86 { 87 unsigned int bits_per_word; 88 89 if (shift < 0) 90 shift = sbitmap_calculate_shift(depth); 91 92 bits_per_word = 1U << shift; 93 if (bits_per_word > BITS_PER_LONG) 94 return -EINVAL; 95 96 sb->shift = shift; 97 sb->depth = depth; 98 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); 99 sb->round_robin = round_robin; 100 101 if (depth == 0) { 102 sb->map = NULL; 103 return 0; 104 } 105 106 if (alloc_hint) { 107 if (init_alloc_hint(sb, flags)) 108 return -ENOMEM; 109 } else { 110 sb->alloc_hint = NULL; 111 } 112 113 sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node); 114 if (!sb->map) { 115 free_percpu(sb->alloc_hint); 116 return -ENOMEM; 117 } 118 119 return 0; 120 } 121 EXPORT_SYMBOL_GPL(sbitmap_init_node); 122 123 void sbitmap_resize(struct sbitmap *sb, unsigned int depth) 124 { 125 unsigned int bits_per_word = 1U << sb->shift; 126 unsigned int i; 127 128 for (i = 0; i < sb->map_nr; i++) 129 sbitmap_deferred_clear(&sb->map[i]); 130 131 sb->depth = depth; 132 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); 133 } 134 EXPORT_SYMBOL_GPL(sbitmap_resize); 135 136 static int __sbitmap_get_word(unsigned long *word, unsigned long depth, 137 unsigned int hint, bool wrap) 138 { 139 int nr; 140 141 /* don't wrap if starting from 0 */ 142 wrap = wrap && hint; 143 144 while (1) { 145 nr = find_next_zero_bit(word, depth, hint); 146 if (unlikely(nr >= depth)) { 147 /* 148 * We started with an offset, and we didn't reset the 149 * offset to 0 in a failure case, so start from 0 to 150 * exhaust the map. 151 */ 152 if (hint && wrap) { 153 hint = 0; 154 continue; 155 } 156 return -1; 157 } 158 159 if (!test_and_set_bit_lock(nr, word)) 160 break; 161 162 hint = nr + 1; 163 if (hint >= depth - 1) 164 hint = 0; 165 } 166 167 return nr; 168 } 169 170 static int sbitmap_find_bit_in_word(struct sbitmap_word *map, 171 unsigned int depth, 172 unsigned int alloc_hint, 173 bool wrap) 174 { 175 int nr; 176 177 do { 178 nr = __sbitmap_get_word(&map->word, depth, 179 alloc_hint, wrap); 180 if (nr != -1) 181 break; 182 if (!sbitmap_deferred_clear(map)) 183 break; 184 } while (1); 185 186 return nr; 187 } 188 189 static int sbitmap_find_bit(struct sbitmap *sb, 190 unsigned int depth, 191 unsigned int index, 192 unsigned int alloc_hint, 193 bool wrap) 194 { 195 unsigned int i; 196 int nr = -1; 197 198 for (i = 0; i < sb->map_nr; i++) { 199 nr = sbitmap_find_bit_in_word(&sb->map[index], 200 min_t(unsigned int, 201 __map_depth(sb, index), 202 depth), 203 alloc_hint, wrap); 204 205 if (nr != -1) { 206 nr += index << sb->shift; 207 break; 208 } 209 210 /* Jump to next index. */ 211 alloc_hint = 0; 212 if (++index >= sb->map_nr) 213 index = 0; 214 } 215 216 return nr; 217 } 218 219 static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint) 220 { 221 unsigned int index; 222 223 index = SB_NR_TO_INDEX(sb, alloc_hint); 224 225 /* 226 * Unless we're doing round robin tag allocation, just use the 227 * alloc_hint to find the right word index. No point in looping 228 * twice in find_next_zero_bit() for that case. 229 */ 230 if (sb->round_robin) 231 alloc_hint = SB_NR_TO_BIT(sb, alloc_hint); 232 else 233 alloc_hint = 0; 234 235 return sbitmap_find_bit(sb, UINT_MAX, index, alloc_hint, 236 !sb->round_robin); 237 } 238 239 int sbitmap_get(struct sbitmap *sb) 240 { 241 int nr; 242 unsigned int hint, depth; 243 244 if (WARN_ON_ONCE(unlikely(!sb->alloc_hint))) 245 return -1; 246 247 depth = READ_ONCE(sb->depth); 248 hint = update_alloc_hint_before_get(sb, depth); 249 nr = __sbitmap_get(sb, hint); 250 update_alloc_hint_after_get(sb, depth, hint, nr); 251 252 return nr; 253 } 254 EXPORT_SYMBOL_GPL(sbitmap_get); 255 256 static int __sbitmap_get_shallow(struct sbitmap *sb, 257 unsigned int alloc_hint, 258 unsigned long shallow_depth) 259 { 260 unsigned int index; 261 262 index = SB_NR_TO_INDEX(sb, alloc_hint); 263 alloc_hint = SB_NR_TO_BIT(sb, alloc_hint); 264 265 return sbitmap_find_bit(sb, shallow_depth, index, alloc_hint, true); 266 } 267 268 int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth) 269 { 270 int nr; 271 unsigned int hint, depth; 272 273 if (WARN_ON_ONCE(unlikely(!sb->alloc_hint))) 274 return -1; 275 276 depth = READ_ONCE(sb->depth); 277 hint = update_alloc_hint_before_get(sb, depth); 278 nr = __sbitmap_get_shallow(sb, hint, shallow_depth); 279 update_alloc_hint_after_get(sb, depth, hint, nr); 280 281 return nr; 282 } 283 EXPORT_SYMBOL_GPL(sbitmap_get_shallow); 284 285 bool sbitmap_any_bit_set(const struct sbitmap *sb) 286 { 287 unsigned int i; 288 289 for (i = 0; i < sb->map_nr; i++) { 290 if (sb->map[i].word & ~sb->map[i].cleared) 291 return true; 292 } 293 return false; 294 } 295 EXPORT_SYMBOL_GPL(sbitmap_any_bit_set); 296 297 static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set) 298 { 299 unsigned int i, weight = 0; 300 301 for (i = 0; i < sb->map_nr; i++) { 302 const struct sbitmap_word *word = &sb->map[i]; 303 unsigned int word_depth = __map_depth(sb, i); 304 305 if (set) 306 weight += bitmap_weight(&word->word, word_depth); 307 else 308 weight += bitmap_weight(&word->cleared, word_depth); 309 } 310 return weight; 311 } 312 313 static unsigned int sbitmap_cleared(const struct sbitmap *sb) 314 { 315 return __sbitmap_weight(sb, false); 316 } 317 318 unsigned int sbitmap_weight(const struct sbitmap *sb) 319 { 320 return __sbitmap_weight(sb, true) - sbitmap_cleared(sb); 321 } 322 EXPORT_SYMBOL_GPL(sbitmap_weight); 323 324 void sbitmap_show(struct sbitmap *sb, struct seq_file *m) 325 { 326 seq_printf(m, "depth=%u\n", sb->depth); 327 seq_printf(m, "busy=%u\n", sbitmap_weight(sb)); 328 seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb)); 329 seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift); 330 seq_printf(m, "map_nr=%u\n", sb->map_nr); 331 } 332 EXPORT_SYMBOL_GPL(sbitmap_show); 333 334 static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte) 335 { 336 if ((offset & 0xf) == 0) { 337 if (offset != 0) 338 seq_putc(m, '\n'); 339 seq_printf(m, "%08x:", offset); 340 } 341 if ((offset & 0x1) == 0) 342 seq_putc(m, ' '); 343 seq_printf(m, "%02x", byte); 344 } 345 346 void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m) 347 { 348 u8 byte = 0; 349 unsigned int byte_bits = 0; 350 unsigned int offset = 0; 351 int i; 352 353 for (i = 0; i < sb->map_nr; i++) { 354 unsigned long word = READ_ONCE(sb->map[i].word); 355 unsigned long cleared = READ_ONCE(sb->map[i].cleared); 356 unsigned int word_bits = __map_depth(sb, i); 357 358 word &= ~cleared; 359 360 while (word_bits > 0) { 361 unsigned int bits = min(8 - byte_bits, word_bits); 362 363 byte |= (word & (BIT(bits) - 1)) << byte_bits; 364 byte_bits += bits; 365 if (byte_bits == 8) { 366 emit_byte(m, offset, byte); 367 byte = 0; 368 byte_bits = 0; 369 offset++; 370 } 371 word >>= bits; 372 word_bits -= bits; 373 } 374 } 375 if (byte_bits) { 376 emit_byte(m, offset, byte); 377 offset++; 378 } 379 if (offset) 380 seq_putc(m, '\n'); 381 } 382 EXPORT_SYMBOL_GPL(sbitmap_bitmap_show); 383 384 static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq, 385 unsigned int depth) 386 { 387 unsigned int wake_batch; 388 unsigned int shallow_depth; 389 390 /* 391 * Each full word of the bitmap has bits_per_word bits, and there might 392 * be a partial word. There are depth / bits_per_word full words and 393 * depth % bits_per_word bits left over. In bitwise arithmetic: 394 * 395 * bits_per_word = 1 << shift 396 * depth / bits_per_word = depth >> shift 397 * depth % bits_per_word = depth & ((1 << shift) - 1) 398 * 399 * Each word can be limited to sbq->min_shallow_depth bits. 400 */ 401 shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth); 402 depth = ((depth >> sbq->sb.shift) * shallow_depth + 403 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth)); 404 wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1, 405 SBQ_WAKE_BATCH); 406 407 return wake_batch; 408 } 409 410 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, 411 int shift, bool round_robin, gfp_t flags, int node) 412 { 413 int ret; 414 int i; 415 416 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node, 417 round_robin, true); 418 if (ret) 419 return ret; 420 421 sbq->min_shallow_depth = UINT_MAX; 422 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); 423 atomic_set(&sbq->wake_index, 0); 424 atomic_set(&sbq->ws_active, 0); 425 atomic_set(&sbq->completion_cnt, 0); 426 atomic_set(&sbq->wakeup_cnt, 0); 427 428 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); 429 if (!sbq->ws) { 430 sbitmap_free(&sbq->sb); 431 return -ENOMEM; 432 } 433 434 for (i = 0; i < SBQ_WAIT_QUEUES; i++) 435 init_waitqueue_head(&sbq->ws[i].wait); 436 437 return 0; 438 } 439 EXPORT_SYMBOL_GPL(sbitmap_queue_init_node); 440 441 static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, 442 unsigned int depth) 443 { 444 unsigned int wake_batch; 445 446 wake_batch = sbq_calc_wake_batch(sbq, depth); 447 if (sbq->wake_batch != wake_batch) 448 WRITE_ONCE(sbq->wake_batch, wake_batch); 449 } 450 451 void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq, 452 unsigned int users) 453 { 454 unsigned int wake_batch; 455 unsigned int depth = (sbq->sb.depth + users - 1) / users; 456 457 wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES, 458 1, SBQ_WAKE_BATCH); 459 460 WRITE_ONCE(sbq->wake_batch, wake_batch); 461 } 462 EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch); 463 464 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) 465 { 466 sbitmap_queue_update_wake_batch(sbq, depth); 467 sbitmap_resize(&sbq->sb, depth); 468 } 469 EXPORT_SYMBOL_GPL(sbitmap_queue_resize); 470 471 int __sbitmap_queue_get(struct sbitmap_queue *sbq) 472 { 473 return sbitmap_get(&sbq->sb); 474 } 475 EXPORT_SYMBOL_GPL(__sbitmap_queue_get); 476 477 unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, 478 unsigned int *offset) 479 { 480 struct sbitmap *sb = &sbq->sb; 481 unsigned int hint, depth; 482 unsigned long index, nr; 483 int i; 484 485 if (unlikely(sb->round_robin)) 486 return 0; 487 488 depth = READ_ONCE(sb->depth); 489 hint = update_alloc_hint_before_get(sb, depth); 490 491 index = SB_NR_TO_INDEX(sb, hint); 492 493 for (i = 0; i < sb->map_nr; i++) { 494 struct sbitmap_word *map = &sb->map[index]; 495 unsigned long get_mask; 496 unsigned int map_depth = __map_depth(sb, index); 497 498 sbitmap_deferred_clear(map); 499 if (map->word == (1UL << (map_depth - 1)) - 1) 500 goto next; 501 502 nr = find_first_zero_bit(&map->word, map_depth); 503 if (nr + nr_tags <= map_depth) { 504 atomic_long_t *ptr = (atomic_long_t *) &map->word; 505 unsigned long val; 506 507 get_mask = ((1UL << nr_tags) - 1) << nr; 508 val = READ_ONCE(map->word); 509 while (!atomic_long_try_cmpxchg(ptr, &val, 510 get_mask | val)) 511 ; 512 get_mask = (get_mask & ~val) >> nr; 513 if (get_mask) { 514 *offset = nr + (index << sb->shift); 515 update_alloc_hint_after_get(sb, depth, hint, 516 *offset + nr_tags - 1); 517 return get_mask; 518 } 519 } 520 next: 521 /* Jump to next index. */ 522 if (++index >= sb->map_nr) 523 index = 0; 524 } 525 526 return 0; 527 } 528 529 int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, 530 unsigned int shallow_depth) 531 { 532 WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth); 533 534 return sbitmap_get_shallow(&sbq->sb, shallow_depth); 535 } 536 EXPORT_SYMBOL_GPL(sbitmap_queue_get_shallow); 537 538 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, 539 unsigned int min_shallow_depth) 540 { 541 sbq->min_shallow_depth = min_shallow_depth; 542 sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth); 543 } 544 EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth); 545 546 static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr) 547 { 548 int i, wake_index, woken; 549 550 if (!atomic_read(&sbq->ws_active)) 551 return; 552 553 wake_index = atomic_read(&sbq->wake_index); 554 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 555 struct sbq_wait_state *ws = &sbq->ws[wake_index]; 556 557 /* 558 * Advance the index before checking the current queue. 559 * It improves fairness, by ensuring the queue doesn't 560 * need to be fully emptied before trying to wake up 561 * from the next one. 562 */ 563 wake_index = sbq_index_inc(wake_index); 564 565 if (waitqueue_active(&ws->wait)) { 566 woken = wake_up_nr(&ws->wait, nr); 567 if (woken == nr) 568 break; 569 nr -= woken; 570 } 571 } 572 573 if (wake_index != atomic_read(&sbq->wake_index)) 574 atomic_set(&sbq->wake_index, wake_index); 575 } 576 577 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr) 578 { 579 unsigned int wake_batch = READ_ONCE(sbq->wake_batch); 580 unsigned int wakeups; 581 582 if (!atomic_read(&sbq->ws_active)) 583 return; 584 585 atomic_add(nr, &sbq->completion_cnt); 586 wakeups = atomic_read(&sbq->wakeup_cnt); 587 588 do { 589 if (atomic_read(&sbq->completion_cnt) - wakeups < wake_batch) 590 return; 591 } while (!atomic_try_cmpxchg(&sbq->wakeup_cnt, 592 &wakeups, wakeups + wake_batch)); 593 594 __sbitmap_queue_wake_up(sbq, wake_batch); 595 } 596 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up); 597 598 static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag) 599 { 600 if (likely(!sb->round_robin && tag < sb->depth)) 601 data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag); 602 } 603 604 void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset, 605 int *tags, int nr_tags) 606 { 607 struct sbitmap *sb = &sbq->sb; 608 unsigned long *addr = NULL; 609 unsigned long mask = 0; 610 int i; 611 612 smp_mb__before_atomic(); 613 for (i = 0; i < nr_tags; i++) { 614 const int tag = tags[i] - offset; 615 unsigned long *this_addr; 616 617 /* since we're clearing a batch, skip the deferred map */ 618 this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word; 619 if (!addr) { 620 addr = this_addr; 621 } else if (addr != this_addr) { 622 atomic_long_andnot(mask, (atomic_long_t *) addr); 623 mask = 0; 624 addr = this_addr; 625 } 626 mask |= (1UL << SB_NR_TO_BIT(sb, tag)); 627 } 628 629 if (mask) 630 atomic_long_andnot(mask, (atomic_long_t *) addr); 631 632 smp_mb__after_atomic(); 633 sbitmap_queue_wake_up(sbq, nr_tags); 634 sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(), 635 tags[nr_tags - 1] - offset); 636 } 637 638 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, 639 unsigned int cpu) 640 { 641 /* 642 * Once the clear bit is set, the bit may be allocated out. 643 * 644 * Orders READ/WRITE on the associated instance(such as request 645 * of blk_mq) by this bit for avoiding race with re-allocation, 646 * and its pair is the memory barrier implied in __sbitmap_get_word. 647 * 648 * One invariant is that the clear bit has to be zero when the bit 649 * is in use. 650 */ 651 smp_mb__before_atomic(); 652 sbitmap_deferred_clear_bit(&sbq->sb, nr); 653 654 /* 655 * Pairs with the memory barrier in set_current_state() to ensure the 656 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker 657 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the 658 * waiter. See the comment on waitqueue_active(). 659 */ 660 smp_mb__after_atomic(); 661 sbitmap_queue_wake_up(sbq, 1); 662 sbitmap_update_cpu_hint(&sbq->sb, cpu, nr); 663 } 664 EXPORT_SYMBOL_GPL(sbitmap_queue_clear); 665 666 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq) 667 { 668 int i, wake_index; 669 670 /* 671 * Pairs with the memory barrier in set_current_state() like in 672 * sbitmap_queue_wake_up(). 673 */ 674 smp_mb(); 675 wake_index = atomic_read(&sbq->wake_index); 676 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 677 struct sbq_wait_state *ws = &sbq->ws[wake_index]; 678 679 if (waitqueue_active(&ws->wait)) 680 wake_up(&ws->wait); 681 682 wake_index = sbq_index_inc(wake_index); 683 } 684 } 685 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all); 686 687 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) 688 { 689 bool first; 690 int i; 691 692 sbitmap_show(&sbq->sb, m); 693 694 seq_puts(m, "alloc_hint={"); 695 first = true; 696 for_each_possible_cpu(i) { 697 if (!first) 698 seq_puts(m, ", "); 699 first = false; 700 seq_printf(m, "%u", *per_cpu_ptr(sbq->sb.alloc_hint, i)); 701 } 702 seq_puts(m, "}\n"); 703 704 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); 705 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); 706 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active)); 707 708 seq_puts(m, "ws={\n"); 709 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 710 struct sbq_wait_state *ws = &sbq->ws[i]; 711 seq_printf(m, "\t{.wait=%s},\n", 712 waitqueue_active(&ws->wait) ? "active" : "inactive"); 713 } 714 seq_puts(m, "}\n"); 715 716 seq_printf(m, "round_robin=%d\n", sbq->sb.round_robin); 717 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); 718 } 719 EXPORT_SYMBOL_GPL(sbitmap_queue_show); 720 721 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, 722 struct sbq_wait_state *ws, 723 struct sbq_wait *sbq_wait) 724 { 725 if (!sbq_wait->sbq) { 726 sbq_wait->sbq = sbq; 727 atomic_inc(&sbq->ws_active); 728 add_wait_queue(&ws->wait, &sbq_wait->wait); 729 } 730 } 731 EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue); 732 733 void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait) 734 { 735 list_del_init(&sbq_wait->wait.entry); 736 if (sbq_wait->sbq) { 737 atomic_dec(&sbq_wait->sbq->ws_active); 738 sbq_wait->sbq = NULL; 739 } 740 } 741 EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue); 742 743 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, 744 struct sbq_wait_state *ws, 745 struct sbq_wait *sbq_wait, int state) 746 { 747 if (!sbq_wait->sbq) { 748 atomic_inc(&sbq->ws_active); 749 sbq_wait->sbq = sbq; 750 } 751 prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state); 752 } 753 EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait); 754 755 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, 756 struct sbq_wait *sbq_wait) 757 { 758 finish_wait(&ws->wait, &sbq_wait->wait); 759 if (sbq_wait->sbq) { 760 atomic_dec(&sbq->ws_active); 761 sbq_wait->sbq = NULL; 762 } 763 } 764 EXPORT_SYMBOL_GPL(sbitmap_finish_wait); 765