Lines Matching +full:int +full:- +full:map +full:- +full:mask

1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2013-2014 Jens Axboe
12 static int init_alloc_hint(struct sbitmap *sb, gfp_t flags) in init_alloc_hint()
14 unsigned depth = sb->depth; in init_alloc_hint()
16 sb->alloc_hint = alloc_percpu_gfp(unsigned int, flags); in init_alloc_hint()
17 if (!sb->alloc_hint) in init_alloc_hint()
18 return -ENOMEM; in init_alloc_hint()
20 if (depth && !sb->round_robin) { in init_alloc_hint()
21 int i; in init_alloc_hint()
24 *per_cpu_ptr(sb->alloc_hint, i) = get_random_u32_below(depth); in init_alloc_hint()
30 unsigned int depth) in update_alloc_hint_before_get()
34 hint = this_cpu_read(*sb->alloc_hint); in update_alloc_hint_before_get()
37 this_cpu_write(*sb->alloc_hint, hint); in update_alloc_hint_before_get()
44 unsigned int depth, in update_alloc_hint_after_get()
45 unsigned int hint, in update_alloc_hint_after_get()
46 unsigned int nr) in update_alloc_hint_after_get()
48 if (nr == -1) { in update_alloc_hint_after_get()
49 /* If the map is full, a hint won't do us much good. */ in update_alloc_hint_after_get()
50 this_cpu_write(*sb->alloc_hint, 0); in update_alloc_hint_after_get()
51 } else if (nr == hint || unlikely(sb->round_robin)) { in update_alloc_hint_after_get()
54 if (hint >= depth - 1) in update_alloc_hint_after_get()
56 this_cpu_write(*sb->alloc_hint, hint); in update_alloc_hint_after_get()
63 static inline bool sbitmap_deferred_clear(struct sbitmap_word *map, in sbitmap_deferred_clear() argument
64 unsigned int depth, unsigned int alloc_hint, bool wrap) in sbitmap_deferred_clear()
66 unsigned long mask, word_mask; in sbitmap_deferred_clear() local
68 guard(raw_spinlock_irqsave)(&map->swap_lock); in sbitmap_deferred_clear()
70 if (!map->cleared) { in sbitmap_deferred_clear()
74 word_mask = (~0UL) >> (BITS_PER_LONG - depth); in sbitmap_deferred_clear()
77 * ->cleared to word, and we change it to retry in case in sbitmap_deferred_clear()
83 word_mask &= ~((1UL << alloc_hint) - 1); in sbitmap_deferred_clear()
85 return (READ_ONCE(map->word) & word_mask) != word_mask; in sbitmap_deferred_clear()
89 * First get a stable cleared mask, setting the old mask to 0. in sbitmap_deferred_clear()
91 mask = xchg(&map->cleared, 0); in sbitmap_deferred_clear()
96 atomic_long_andnot(mask, (atomic_long_t *)&map->word); in sbitmap_deferred_clear()
97 BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word)); in sbitmap_deferred_clear()
101 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, in sbitmap_init_node()
102 gfp_t flags, int node, bool round_robin, in sbitmap_init_node()
105 unsigned int bits_per_word; in sbitmap_init_node()
106 int i; in sbitmap_init_node()
113 return -EINVAL; in sbitmap_init_node()
115 sb->shift = shift; in sbitmap_init_node()
116 sb->depth = depth; in sbitmap_init_node()
117 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); in sbitmap_init_node()
118 sb->round_robin = round_robin; in sbitmap_init_node()
121 sb->map = NULL; in sbitmap_init_node()
127 return -ENOMEM; in sbitmap_init_node()
129 sb->alloc_hint = NULL; in sbitmap_init_node()
132 sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node); in sbitmap_init_node()
133 if (!sb->map) { in sbitmap_init_node()
134 free_percpu(sb->alloc_hint); in sbitmap_init_node()
135 return -ENOMEM; in sbitmap_init_node()
138 for (i = 0; i < sb->map_nr; i++) in sbitmap_init_node()
139 raw_spin_lock_init(&sb->map[i].swap_lock); in sbitmap_init_node()
145 void sbitmap_resize(struct sbitmap *sb, unsigned int depth) in sbitmap_resize()
147 unsigned int bits_per_word = 1U << sb->shift; in sbitmap_resize()
148 unsigned int i; in sbitmap_resize()
150 for (i = 0; i < sb->map_nr; i++) in sbitmap_resize()
151 sbitmap_deferred_clear(&sb->map[i], 0, 0, 0); in sbitmap_resize()
153 sb->depth = depth; in sbitmap_resize()
154 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); in sbitmap_resize()
158 static int __sbitmap_get_word(unsigned long *word, unsigned long depth, in __sbitmap_get_word()
159 unsigned int hint, bool wrap) in __sbitmap_get_word()
161 int nr; in __sbitmap_get_word()
172 * exhaust the map. in __sbitmap_get_word()
178 return -1; in __sbitmap_get_word()
185 if (hint >= depth - 1) in __sbitmap_get_word()
192 static int sbitmap_find_bit_in_word(struct sbitmap_word *map, in sbitmap_find_bit_in_word() argument
193 unsigned int depth, in sbitmap_find_bit_in_word()
194 unsigned int alloc_hint, in sbitmap_find_bit_in_word()
197 int nr; in sbitmap_find_bit_in_word()
200 nr = __sbitmap_get_word(&map->word, depth, in sbitmap_find_bit_in_word()
202 if (nr != -1) in sbitmap_find_bit_in_word()
204 if (!sbitmap_deferred_clear(map, depth, alloc_hint, wrap)) in sbitmap_find_bit_in_word()
211 static unsigned int __map_depth_with_shallow(const struct sbitmap *sb, in __map_depth_with_shallow()
212 int index, in __map_depth_with_shallow()
213 unsigned int shallow_depth) in __map_depth_with_shallow()
216 unsigned int word_depth, reminder; in __map_depth_with_shallow()
219 if (shallow_depth >= sb->depth) in __map_depth_with_shallow()
223 reminder = do_div(shallow_word_depth, sb->depth); in __map_depth_with_shallow()
228 return (unsigned int)shallow_word_depth; in __map_depth_with_shallow()
231 static int sbitmap_find_bit(struct sbitmap *sb, in sbitmap_find_bit()
232 unsigned int shallow_depth, in sbitmap_find_bit()
233 unsigned int index, in sbitmap_find_bit()
234 unsigned int alloc_hint, in sbitmap_find_bit()
237 unsigned int i; in sbitmap_find_bit()
238 int nr = -1; in sbitmap_find_bit()
240 for (i = 0; i < sb->map_nr; i++) { in sbitmap_find_bit()
241 unsigned int depth = __map_depth_with_shallow(sb, index, in sbitmap_find_bit()
245 nr = sbitmap_find_bit_in_word(&sb->map[index], depth, in sbitmap_find_bit()
247 if (nr != -1) { in sbitmap_find_bit()
248 nr += index << sb->shift; in sbitmap_find_bit()
254 if (++index >= sb->map_nr) in sbitmap_find_bit()
261 static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint) in __sbitmap_get()
263 unsigned int index; in __sbitmap_get()
272 if (sb->round_robin) in __sbitmap_get()
278 !sb->round_robin); in __sbitmap_get()
281 int sbitmap_get(struct sbitmap *sb) in sbitmap_get()
283 int nr; in sbitmap_get()
284 unsigned int hint, depth; in sbitmap_get()
286 if (WARN_ON_ONCE(unlikely(!sb->alloc_hint))) in sbitmap_get()
287 return -1; in sbitmap_get()
289 depth = READ_ONCE(sb->depth); in sbitmap_get()
298 static int __sbitmap_get_shallow(struct sbitmap *sb, in __sbitmap_get_shallow()
299 unsigned int alloc_hint, in __sbitmap_get_shallow()
302 unsigned int index; in __sbitmap_get_shallow()
311 * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
317 * different allocation limits. E.g., there can be a high-priority class that
318 * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow()
319 * with a @shallow_depth of (sb->depth >> 1). Then, the low-priority
321 * from starving out the high-priority class.
323 * Return: Non-negative allocated bit number if successful, -1 otherwise.
325 static int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth) in sbitmap_get_shallow()
327 int nr; in sbitmap_get_shallow()
328 unsigned int hint, depth; in sbitmap_get_shallow()
330 if (WARN_ON_ONCE(unlikely(!sb->alloc_hint))) in sbitmap_get_shallow()
331 return -1; in sbitmap_get_shallow()
333 depth = READ_ONCE(sb->depth); in sbitmap_get_shallow()
343 unsigned int i; in sbitmap_any_bit_set()
345 for (i = 0; i < sb->map_nr; i++) { in sbitmap_any_bit_set()
346 if (sb->map[i].word & ~sb->map[i].cleared) in sbitmap_any_bit_set()
353 static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set) in __sbitmap_weight()
355 unsigned int i, weight = 0; in __sbitmap_weight()
357 for (i = 0; i < sb->map_nr; i++) { in __sbitmap_weight()
358 const struct sbitmap_word *word = &sb->map[i]; in __sbitmap_weight()
359 unsigned int word_depth = __map_depth(sb, i); in __sbitmap_weight()
362 weight += bitmap_weight(&word->word, word_depth); in __sbitmap_weight()
364 weight += bitmap_weight(&word->cleared, word_depth); in __sbitmap_weight()
369 static unsigned int sbitmap_cleared(const struct sbitmap *sb) in sbitmap_cleared()
374 unsigned int sbitmap_weight(const struct sbitmap *sb) in sbitmap_weight()
376 return __sbitmap_weight(sb, true) - sbitmap_cleared(sb); in sbitmap_weight()
382 seq_printf(m, "depth=%u\n", sb->depth); in sbitmap_show()
385 seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift); in sbitmap_show()
386 seq_printf(m, "map_nr=%u\n", sb->map_nr); in sbitmap_show()
390 static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte) in emit_byte()
405 unsigned int byte_bits = 0; in sbitmap_bitmap_show()
406 unsigned int offset = 0; in sbitmap_bitmap_show()
407 int i; in sbitmap_bitmap_show()
409 for (i = 0; i < sb->map_nr; i++) { in sbitmap_bitmap_show()
410 unsigned long word = READ_ONCE(sb->map[i].word); in sbitmap_bitmap_show()
411 unsigned long cleared = READ_ONCE(sb->map[i].cleared); in sbitmap_bitmap_show()
412 unsigned int word_bits = __map_depth(sb, i); in sbitmap_bitmap_show()
417 unsigned int bits = min(8 - byte_bits, word_bits); in sbitmap_bitmap_show()
419 byte |= (word & (BIT(bits) - 1)) << byte_bits; in sbitmap_bitmap_show()
428 word_bits -= bits; in sbitmap_bitmap_show()
440 static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq, in sbq_calc_wake_batch()
441 unsigned int depth) in sbq_calc_wake_batch()
443 return clamp_t(unsigned int, in sbq_calc_wake_batch()
444 min(depth, sbq->min_shallow_depth) / SBQ_WAIT_QUEUES, in sbq_calc_wake_batch()
448 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, in sbitmap_queue_init_node()
449 int shift, bool round_robin, gfp_t flags, int node) in sbitmap_queue_init_node()
451 int ret; in sbitmap_queue_init_node()
452 int i; in sbitmap_queue_init_node()
454 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node, in sbitmap_queue_init_node()
459 sbq->min_shallow_depth = UINT_MAX; in sbitmap_queue_init_node()
460 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); in sbitmap_queue_init_node()
461 atomic_set(&sbq->wake_index, 0); in sbitmap_queue_init_node()
462 atomic_set(&sbq->ws_active, 0); in sbitmap_queue_init_node()
463 atomic_set(&sbq->completion_cnt, 0); in sbitmap_queue_init_node()
464 atomic_set(&sbq->wakeup_cnt, 0); in sbitmap_queue_init_node()
466 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); in sbitmap_queue_init_node()
467 if (!sbq->ws) { in sbitmap_queue_init_node()
468 sbitmap_free(&sbq->sb); in sbitmap_queue_init_node()
469 return -ENOMEM; in sbitmap_queue_init_node()
473 init_waitqueue_head(&sbq->ws[i].wait); in sbitmap_queue_init_node()
480 unsigned int depth) in sbitmap_queue_update_wake_batch()
482 unsigned int wake_batch; in sbitmap_queue_update_wake_batch()
485 if (sbq->wake_batch != wake_batch) in sbitmap_queue_update_wake_batch()
486 WRITE_ONCE(sbq->wake_batch, wake_batch); in sbitmap_queue_update_wake_batch()
490 unsigned int users) in sbitmap_queue_recalculate_wake_batch()
492 unsigned int wake_batch; in sbitmap_queue_recalculate_wake_batch()
493 unsigned int depth = (sbq->sb.depth + users - 1) / users; in sbitmap_queue_recalculate_wake_batch()
498 WRITE_ONCE(sbq->wake_batch, wake_batch); in sbitmap_queue_recalculate_wake_batch()
502 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) in sbitmap_queue_resize()
505 sbitmap_resize(&sbq->sb, depth); in sbitmap_queue_resize()
509 int __sbitmap_queue_get(struct sbitmap_queue *sbq) in __sbitmap_queue_get()
511 return sbitmap_get(&sbq->sb); in __sbitmap_queue_get()
515 unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, in __sbitmap_queue_get_batch()
516 unsigned int *offset) in __sbitmap_queue_get_batch()
518 struct sbitmap *sb = &sbq->sb; in __sbitmap_queue_get_batch()
519 unsigned int hint, depth; in __sbitmap_queue_get_batch()
521 int i; in __sbitmap_queue_get_batch()
523 if (unlikely(sb->round_robin)) in __sbitmap_queue_get_batch()
526 depth = READ_ONCE(sb->depth); in __sbitmap_queue_get_batch()
531 for (i = 0; i < sb->map_nr; i++) { in __sbitmap_queue_get_batch()
532 struct sbitmap_word *map = &sb->map[index]; in __sbitmap_queue_get_batch() local
534 unsigned int map_depth = __map_depth(sb, index); in __sbitmap_queue_get_batch()
537 sbitmap_deferred_clear(map, 0, 0, 0); in __sbitmap_queue_get_batch()
538 val = READ_ONCE(map->word); in __sbitmap_queue_get_batch()
539 if (val == (1UL << (map_depth - 1)) - 1) in __sbitmap_queue_get_batch()
544 atomic_long_t *ptr = (atomic_long_t *) &map->word; in __sbitmap_queue_get_batch()
546 get_mask = ((1UL << nr_tags) - 1) << nr; in __sbitmap_queue_get_batch()
552 *offset = nr + (index << sb->shift); in __sbitmap_queue_get_batch()
554 *offset + nr_tags - 1); in __sbitmap_queue_get_batch()
560 if (++index >= sb->map_nr) in __sbitmap_queue_get_batch()
567 int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, in sbitmap_queue_get_shallow()
568 unsigned int shallow_depth) in sbitmap_queue_get_shallow()
570 WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth); in sbitmap_queue_get_shallow()
572 return sbitmap_get_shallow(&sbq->sb, shallow_depth); in sbitmap_queue_get_shallow()
577 unsigned int min_shallow_depth) in sbitmap_queue_min_shallow_depth()
579 sbq->min_shallow_depth = min_shallow_depth; in sbitmap_queue_min_shallow_depth()
580 sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth); in sbitmap_queue_min_shallow_depth()
584 static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr) in __sbitmap_queue_wake_up()
586 int i, wake_index, woken; in __sbitmap_queue_wake_up()
588 if (!atomic_read(&sbq->ws_active)) in __sbitmap_queue_wake_up()
591 wake_index = atomic_read(&sbq->wake_index); in __sbitmap_queue_wake_up()
593 struct sbq_wait_state *ws = &sbq->ws[wake_index]; in __sbitmap_queue_wake_up()
603 if (waitqueue_active(&ws->wait)) { in __sbitmap_queue_wake_up()
604 woken = wake_up_nr(&ws->wait, nr); in __sbitmap_queue_wake_up()
607 nr -= woken; in __sbitmap_queue_wake_up()
611 if (wake_index != atomic_read(&sbq->wake_index)) in __sbitmap_queue_wake_up()
612 atomic_set(&sbq->wake_index, wake_index); in __sbitmap_queue_wake_up()
615 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr) in sbitmap_queue_wake_up()
617 unsigned int wake_batch = READ_ONCE(sbq->wake_batch); in sbitmap_queue_wake_up()
618 unsigned int wakeups; in sbitmap_queue_wake_up()
620 if (!atomic_read(&sbq->ws_active)) in sbitmap_queue_wake_up()
623 atomic_add(nr, &sbq->completion_cnt); in sbitmap_queue_wake_up()
624 wakeups = atomic_read(&sbq->wakeup_cnt); in sbitmap_queue_wake_up()
627 if (atomic_read(&sbq->completion_cnt) - wakeups < wake_batch) in sbitmap_queue_wake_up()
629 } while (!atomic_try_cmpxchg(&sbq->wakeup_cnt, in sbitmap_queue_wake_up()
636 static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag) in sbitmap_update_cpu_hint()
638 if (likely(!sb->round_robin && tag < sb->depth)) in sbitmap_update_cpu_hint()
639 data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag); in sbitmap_update_cpu_hint()
642 void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset, in sbitmap_queue_clear_batch()
643 int *tags, int nr_tags) in sbitmap_queue_clear_batch()
645 struct sbitmap *sb = &sbq->sb; in sbitmap_queue_clear_batch()
647 unsigned long mask = 0; in sbitmap_queue_clear_batch() local
648 int i; in sbitmap_queue_clear_batch()
652 const int tag = tags[i] - offset; in sbitmap_queue_clear_batch()
655 /* since we're clearing a batch, skip the deferred map */ in sbitmap_queue_clear_batch()
656 this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word; in sbitmap_queue_clear_batch()
660 atomic_long_andnot(mask, (atomic_long_t *) addr); in sbitmap_queue_clear_batch()
661 mask = 0; in sbitmap_queue_clear_batch()
664 mask |= (1UL << SB_NR_TO_BIT(sb, tag)); in sbitmap_queue_clear_batch()
667 if (mask) in sbitmap_queue_clear_batch()
668 atomic_long_andnot(mask, (atomic_long_t *) addr); in sbitmap_queue_clear_batch()
672 sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(), in sbitmap_queue_clear_batch()
673 tags[nr_tags - 1] - offset); in sbitmap_queue_clear_batch()
676 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, in sbitmap_queue_clear()
677 unsigned int cpu) in sbitmap_queue_clear()
683 * of blk_mq) by this bit for avoiding race with re-allocation, in sbitmap_queue_clear()
690 sbitmap_deferred_clear_bit(&sbq->sb, nr); in sbitmap_queue_clear()
700 sbitmap_update_cpu_hint(&sbq->sb, cpu, nr); in sbitmap_queue_clear()
706 int i, wake_index; in sbitmap_queue_wake_all()
713 wake_index = atomic_read(&sbq->wake_index); in sbitmap_queue_wake_all()
715 struct sbq_wait_state *ws = &sbq->ws[wake_index]; in sbitmap_queue_wake_all()
717 if (waitqueue_active(&ws->wait)) in sbitmap_queue_wake_all()
718 wake_up(&ws->wait); in sbitmap_queue_wake_all()
728 int i; in sbitmap_queue_show()
730 sbitmap_show(&sbq->sb, m); in sbitmap_queue_show()
738 seq_printf(m, "%u", *per_cpu_ptr(sbq->sb.alloc_hint, i)); in sbitmap_queue_show()
742 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); in sbitmap_queue_show()
743 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); in sbitmap_queue_show()
744 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active)); in sbitmap_queue_show()
748 struct sbq_wait_state *ws = &sbq->ws[i]; in sbitmap_queue_show()
750 waitqueue_active(&ws->wait) ? "active" : "inactive"); in sbitmap_queue_show()
754 seq_printf(m, "round_robin=%d\n", sbq->sb.round_robin); in sbitmap_queue_show()
755 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); in sbitmap_queue_show()
763 if (!sbq_wait->sbq) { in sbitmap_add_wait_queue()
764 sbq_wait->sbq = sbq; in sbitmap_add_wait_queue()
765 atomic_inc(&sbq->ws_active); in sbitmap_add_wait_queue()
766 add_wait_queue(&ws->wait, &sbq_wait->wait); in sbitmap_add_wait_queue()
773 list_del_init(&sbq_wait->wait.entry); in sbitmap_del_wait_queue()
774 if (sbq_wait->sbq) { in sbitmap_del_wait_queue()
775 atomic_dec(&sbq_wait->sbq->ws_active); in sbitmap_del_wait_queue()
776 sbq_wait->sbq = NULL; in sbitmap_del_wait_queue()
783 struct sbq_wait *sbq_wait, int state) in sbitmap_prepare_to_wait()
785 if (!sbq_wait->sbq) { in sbitmap_prepare_to_wait()
786 atomic_inc(&sbq->ws_active); in sbitmap_prepare_to_wait()
787 sbq_wait->sbq = sbq; in sbitmap_prepare_to_wait()
789 prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state); in sbitmap_prepare_to_wait()
796 finish_wait(&ws->wait, &sbq_wait->wait); in sbitmap_finish_wait()
797 if (sbq_wait->sbq) { in sbitmap_finish_wait()
798 atomic_dec(&sbq->ws_active); in sbitmap_finish_wait()
799 sbq_wait->sbq = NULL; in sbitmap_finish_wait()