Lines Matching refs:sbq

406 static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,  in sbq_calc_wake_batch()  argument
423 shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth); in sbq_calc_wake_batch()
424 depth = ((depth >> sbq->sb.shift) * shallow_depth + in sbq_calc_wake_batch()
425 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth)); in sbq_calc_wake_batch()
432 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, in sbitmap_queue_init_node() argument
438 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node, in sbitmap_queue_init_node()
443 sbq->min_shallow_depth = UINT_MAX; in sbitmap_queue_init_node()
444 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); in sbitmap_queue_init_node()
445 atomic_set(&sbq->wake_index, 0); in sbitmap_queue_init_node()
446 atomic_set(&sbq->ws_active, 0); in sbitmap_queue_init_node()
447 atomic_set(&sbq->completion_cnt, 0); in sbitmap_queue_init_node()
448 atomic_set(&sbq->wakeup_cnt, 0); in sbitmap_queue_init_node()
450 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); in sbitmap_queue_init_node()
451 if (!sbq->ws) { in sbitmap_queue_init_node()
452 sbitmap_free(&sbq->sb); in sbitmap_queue_init_node()
457 init_waitqueue_head(&sbq->ws[i].wait); in sbitmap_queue_init_node()
463 static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, in sbitmap_queue_update_wake_batch() argument
468 wake_batch = sbq_calc_wake_batch(sbq, depth); in sbitmap_queue_update_wake_batch()
469 if (sbq->wake_batch != wake_batch) in sbitmap_queue_update_wake_batch()
470 WRITE_ONCE(sbq->wake_batch, wake_batch); in sbitmap_queue_update_wake_batch()
473 void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq, in sbitmap_queue_recalculate_wake_batch() argument
477 unsigned int depth = (sbq->sb.depth + users - 1) / users; in sbitmap_queue_recalculate_wake_batch()
482 WRITE_ONCE(sbq->wake_batch, wake_batch); in sbitmap_queue_recalculate_wake_batch()
486 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) in sbitmap_queue_resize() argument
488 sbitmap_queue_update_wake_batch(sbq, depth); in sbitmap_queue_resize()
489 sbitmap_resize(&sbq->sb, depth); in sbitmap_queue_resize()
493 int __sbitmap_queue_get(struct sbitmap_queue *sbq) in __sbitmap_queue_get() argument
495 return sbitmap_get(&sbq->sb); in __sbitmap_queue_get()
499 unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, in __sbitmap_queue_get_batch() argument
502 struct sbitmap *sb = &sbq->sb; in __sbitmap_queue_get_batch()
551 int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, in sbitmap_queue_get_shallow() argument
554 WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth); in sbitmap_queue_get_shallow()
556 return sbitmap_get_shallow(&sbq->sb, shallow_depth); in sbitmap_queue_get_shallow()
560 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, in sbitmap_queue_min_shallow_depth() argument
563 sbq->min_shallow_depth = min_shallow_depth; in sbitmap_queue_min_shallow_depth()
564 sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth); in sbitmap_queue_min_shallow_depth()
568 static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr) in __sbitmap_queue_wake_up() argument
572 if (!atomic_read(&sbq->ws_active)) in __sbitmap_queue_wake_up()
575 wake_index = atomic_read(&sbq->wake_index); in __sbitmap_queue_wake_up()
577 struct sbq_wait_state *ws = &sbq->ws[wake_index]; in __sbitmap_queue_wake_up()
595 if (wake_index != atomic_read(&sbq->wake_index)) in __sbitmap_queue_wake_up()
596 atomic_set(&sbq->wake_index, wake_index); in __sbitmap_queue_wake_up()
599 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr) in sbitmap_queue_wake_up() argument
601 unsigned int wake_batch = READ_ONCE(sbq->wake_batch); in sbitmap_queue_wake_up()
604 if (!atomic_read(&sbq->ws_active)) in sbitmap_queue_wake_up()
607 atomic_add(nr, &sbq->completion_cnt); in sbitmap_queue_wake_up()
608 wakeups = atomic_read(&sbq->wakeup_cnt); in sbitmap_queue_wake_up()
611 if (atomic_read(&sbq->completion_cnt) - wakeups < wake_batch) in sbitmap_queue_wake_up()
613 } while (!atomic_try_cmpxchg(&sbq->wakeup_cnt, in sbitmap_queue_wake_up()
616 __sbitmap_queue_wake_up(sbq, wake_batch); in sbitmap_queue_wake_up()
626 void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset, in sbitmap_queue_clear_batch() argument
629 struct sbitmap *sb = &sbq->sb; in sbitmap_queue_clear_batch()
655 sbitmap_queue_wake_up(sbq, nr_tags); in sbitmap_queue_clear_batch()
656 sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(), in sbitmap_queue_clear_batch()
660 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, in sbitmap_queue_clear() argument
674 sbitmap_deferred_clear_bit(&sbq->sb, nr); in sbitmap_queue_clear()
683 sbitmap_queue_wake_up(sbq, 1); in sbitmap_queue_clear()
684 sbitmap_update_cpu_hint(&sbq->sb, cpu, nr); in sbitmap_queue_clear()
688 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq) in sbitmap_queue_wake_all() argument
697 wake_index = atomic_read(&sbq->wake_index); in sbitmap_queue_wake_all()
699 struct sbq_wait_state *ws = &sbq->ws[wake_index]; in sbitmap_queue_wake_all()
709 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) in sbitmap_queue_show() argument
714 sbitmap_show(&sbq->sb, m); in sbitmap_queue_show()
722 seq_printf(m, "%u", *per_cpu_ptr(sbq->sb.alloc_hint, i)); in sbitmap_queue_show()
726 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); in sbitmap_queue_show()
727 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); in sbitmap_queue_show()
728 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active)); in sbitmap_queue_show()
732 struct sbq_wait_state *ws = &sbq->ws[i]; in sbitmap_queue_show()
738 seq_printf(m, "round_robin=%d\n", sbq->sb.round_robin); in sbitmap_queue_show()
739 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); in sbitmap_queue_show()
743 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, in sbitmap_add_wait_queue() argument
747 if (!sbq_wait->sbq) { in sbitmap_add_wait_queue()
748 sbq_wait->sbq = sbq; in sbitmap_add_wait_queue()
749 atomic_inc(&sbq->ws_active); in sbitmap_add_wait_queue()
758 if (sbq_wait->sbq) { in sbitmap_del_wait_queue()
759 atomic_dec(&sbq_wait->sbq->ws_active); in sbitmap_del_wait_queue()
760 sbq_wait->sbq = NULL; in sbitmap_del_wait_queue()
765 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, in sbitmap_prepare_to_wait() argument
769 if (!sbq_wait->sbq) { in sbitmap_prepare_to_wait()
770 atomic_inc(&sbq->ws_active); in sbitmap_prepare_to_wait()
771 sbq_wait->sbq = sbq; in sbitmap_prepare_to_wait()
777 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, in sbitmap_finish_wait() argument
781 if (sbq_wait->sbq) { in sbitmap_finish_wait()
782 atomic_dec(&sbq->ws_active); in sbitmap_finish_wait()
783 sbq_wait->sbq = NULL; in sbitmap_finish_wait()