Lines Matching refs:khd
138 * There is a same mapping between ctx & hctx and kcq & khd,
139 * we use request->mq_ctx->index_hw to index the kcq in khd.
466 struct kyber_hctx_data *khd;
469 khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node);
470 if (!khd)
473 khd->kcqs = kmalloc_array_node(hctx->nr_ctx,
476 if (!khd->kcqs)
480 kyber_ctx_queue_init(&khd->kcqs[i]);
483 if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx,
487 sbitmap_free(&khd->kcq_map[i]);
492 spin_lock_init(&khd->lock);
495 INIT_LIST_HEAD(&khd->rqs[i]);
496 khd->domain_wait[i].sbq = NULL;
497 init_waitqueue_func_entry(&khd->domain_wait[i].wait,
499 khd->domain_wait[i].wait.private = hctx;
500 INIT_LIST_HEAD(&khd->domain_wait[i].wait.entry);
501 atomic_set(&khd->wait_index[i], 0);
504 khd->cur_domain = 0;
505 khd->batching = 0;
507 hctx->sched_data = khd;
513 kfree(khd->kcqs);
515 kfree(khd);
521 struct kyber_hctx_data *khd = hctx->sched_data;
525 sbitmap_free(&khd->kcq_map[i]);
526 kfree(khd->kcqs);
572 struct kyber_hctx_data *khd = hctx->sched_data;
573 struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
594 struct kyber_hctx_data *khd = hctx->sched_data;
599 struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]];
608 sbitmap_set_bit(&khd->kcq_map[sched_domain],
662 struct kyber_hctx_data *khd;
670 struct kyber_ctx_queue *kcq = &flush_data->khd->kcqs[bitnr];
681 static void kyber_flush_busy_kcqs(struct kyber_hctx_data *khd,
686 .khd = khd,
691 sbitmap_for_each_set(&khd->kcq_map[sched_domain],
707 struct kyber_hctx_data *khd,
710 unsigned int sched_domain = khd->cur_domain;
712 struct sbq_wait *wait = &khd->domain_wait[sched_domain];
721 * khd->lock, but we still need to be careful about the waker.
725 &khd->wait_index[sched_domain]);
726 khd->domain_ws[sched_domain] = ws;
744 ws = khd->domain_ws[sched_domain];
755 struct kyber_hctx_data *khd,
762 rqs = &khd->rqs[khd->cur_domain];
769 * khd->lock serializes the flushes, so if we observed any bit set in
774 nr = kyber_get_domain_token(kqd, khd, hctx);
776 khd->batching++;
782 kyber_domain_names[khd->cur_domain]);
784 } else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) {
785 nr = kyber_get_domain_token(kqd, khd, hctx);
787 kyber_flush_busy_kcqs(khd, khd->cur_domain, rqs);
789 khd->batching++;
795 kyber_domain_names[khd->cur_domain]);
806 struct kyber_hctx_data *khd = hctx->sched_data;
810 spin_lock(&khd->lock);
816 if (khd->batching < kyber_batch_size[khd->cur_domain]) {
817 rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
831 khd->batching = 0;
833 if (khd->cur_domain == KYBER_NUM_DOMAINS - 1)
834 khd->cur_domain = 0;
836 khd->cur_domain++;
838 rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
845 spin_unlock(&khd->lock);
851 struct kyber_hctx_data *khd = hctx->sched_data;
855 if (!list_empty_careful(&khd->rqs[i]) ||
856 sbitmap_any_bit_set(&khd->kcq_map[i]))
911 __acquires(&khd->lock) \
914 struct kyber_hctx_data *khd = hctx->sched_data; \
916 spin_lock(&khd->lock); \
917 return seq_list_start(&khd->rqs[domain], *pos); \
924 struct kyber_hctx_data *khd = hctx->sched_data; \
926 return seq_list_next(v, &khd->rqs[domain], pos); \
930 __releases(&khd->lock) \
933 struct kyber_hctx_data *khd = hctx->sched_data; \
935 spin_unlock(&khd->lock); \
948 struct kyber_hctx_data *khd = hctx->sched_data; \
949 wait_queue_entry_t *wait = &khd->domain_wait[domain].wait; \
972 struct kyber_hctx_data *khd = hctx->sched_data;
974 seq_printf(m, "%s\n", kyber_domain_names[khd->cur_domain]);
981 struct kyber_hctx_data *khd = hctx->sched_data;
983 seq_printf(m, "%u\n", khd->batching);