Lines Matching refs:hctx_idx
673 blk_opf_t opf, blk_mq_req_flags_t flags, unsigned int hctx_idx) in blk_mq_alloc_request_hctx() argument
701 if (hctx_idx >= q->nr_hw_queues) in blk_mq_alloc_request_hctx()
713 data.hctx = xa_load(&q->hctx_table, hctx_idx); in blk_mq_alloc_request_hctx()
3398 unsigned int hctx_idx) in blk_mq_free_rqs() argument
3409 drv_tags = set->tags[hctx_idx]; in blk_mq_free_rqs()
3419 set->ops->exit_request(set, rq, hctx_idx); in blk_mq_free_rqs()
3449 unsigned int hctx_idx) in hctx_idx_to_type() argument
3457 if (hctx_idx >= start && hctx_idx < end) in hctx_idx_to_type()
3468 unsigned int hctx_idx) in blk_mq_get_hctx_node() argument
3470 enum hctx_type type = hctx_idx_to_type(set, hctx_idx); in blk_mq_get_hctx_node()
3472 return blk_mq_hw_queue_to_node(&set->map[type], hctx_idx); in blk_mq_get_hctx_node()
3476 unsigned int hctx_idx, in blk_mq_alloc_rq_map() argument
3480 int node = blk_mq_get_hctx_node(set, hctx_idx); in blk_mq_alloc_rq_map()
3512 unsigned int hctx_idx, int node) in blk_mq_init_request() argument
3517 ret = set->ops->init_request(set, rq, hctx_idx, node); in blk_mq_init_request()
3528 unsigned int hctx_idx, unsigned int depth) in blk_mq_alloc_rqs() argument
3531 int node = blk_mq_get_hctx_node(set, hctx_idx); in blk_mq_alloc_rqs()
3587 if (blk_mq_init_request(set, rq, hctx_idx, node)) { in blk_mq_alloc_rqs()
3599 blk_mq_free_rqs(set, tags, hctx_idx); in blk_mq_alloc_rqs()
3870 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in blk_mq_exit_hctx() argument
3878 blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx], in blk_mq_exit_hctx()
3881 set->ops->exit_request(set, flush_rq, hctx_idx); in blk_mq_exit_hctx()
3884 set->ops->exit_hctx(hctx, hctx_idx); in blk_mq_exit_hctx()
3886 xa_erase(&q->hctx_table, hctx_idx); in blk_mq_exit_hctx()
3909 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) in blk_mq_init_hctx() argument
3911 hctx->queue_num = hctx_idx; in blk_mq_init_hctx()
3913 hctx->tags = set->tags[hctx_idx]; in blk_mq_init_hctx()
3916 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) in blk_mq_init_hctx()
3919 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, in blk_mq_init_hctx()
3923 if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL)) in blk_mq_init_hctx()
3930 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); in blk_mq_init_hctx()
3933 set->ops->exit_hctx(hctx, hctx_idx); in blk_mq_init_hctx()
4036 unsigned int hctx_idx, in blk_mq_alloc_map_and_rqs() argument
4042 tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags); in blk_mq_alloc_map_and_rqs()
4046 ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth); in blk_mq_alloc_map_and_rqs()
4056 int hctx_idx) in __blk_mq_alloc_map_and_rqs() argument
4059 set->tags[hctx_idx] = set->shared_tags; in __blk_mq_alloc_map_and_rqs()
4064 set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx, in __blk_mq_alloc_map_and_rqs()
4067 return set->tags[hctx_idx]; in __blk_mq_alloc_map_and_rqs()
4072 unsigned int hctx_idx) in blk_mq_free_map_and_rqs() argument
4075 blk_mq_free_rqs(set, tags, hctx_idx); in blk_mq_free_map_and_rqs()
4081 unsigned int hctx_idx) in __blk_mq_free_map_and_rqs() argument
4084 blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx); in __blk_mq_free_map_and_rqs()
4086 set->tags[hctx_idx] = NULL; in __blk_mq_free_map_and_rqs()
4091 unsigned int j, hctx_idx; in blk_mq_map_swqueue() local
4117 hctx_idx = set->map[j].mq_map[i]; in blk_mq_map_swqueue()
4119 if (!set->tags[hctx_idx] && in blk_mq_map_swqueue()
4120 !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) { in blk_mq_map_swqueue()
4431 int hctx_idx, int node) in blk_mq_alloc_and_init_hctx() argument
4452 if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) in blk_mq_alloc_and_init_hctx()