Lines Matching +full:input +full:- +full:depth

1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/blk-mq.h>
6 #include "blk-stat.h"
17 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
35 BLK_MQ_NO_TAG = -1U,
37 BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1,
67 unsigned int hctx_idx, unsigned int depth);
73 * CPU -> queue mappings
78 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
87 return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]); in blk_mq_map_queue_type()
105 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
112 return ctx->hctxs[blk_mq_get_hctx_type(opf)]; in blk_mq_map_queue()
118 * did. Additionally, this is a per-hw queue depth.
123 return 2 * min_t(unsigned int, set->queue_depth, BLKDEV_DEFAULT_RQ); in blk_mq_default_nr_requests()
146 return per_cpu_ptr(q->queue_ctx, cpu); in __blk_mq_get_ctx()
150 * This assumes per-cpu software queueing queues. They could be per-node
151 * as well, for instance. For now this is hardcoded as-is. Note that we don't
161 /* input parameter */
172 /* input & output parameter */
202 return &bt->ws[0]; in bt_wait_ptr()
203 return sbq_wait_ptr(bt, &hctx->wait_index); in bt_wait_ptr()
211 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) in blk_mq_tag_busy()
217 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) in blk_mq_tag_idle()
224 return tag < tags->nr_reserved_tags; in blk_mq_tag_is_reserved()
234 if (data->rq_flags & RQF_SCHED_TAGS) in blk_mq_tags_from_data()
235 return data->hctx->sched_tags; in blk_mq_tags_from_data()
236 return data->hctx->tags; in blk_mq_tags_from_data()
242 if (likely(!test_bit(BLK_MQ_S_STOPPED, &hctx->state))) in blk_mq_hctx_stopped()
254 return test_bit(BLK_MQ_S_STOPPED, &hctx->state); in blk_mq_hctx_stopped()
259 return hctx->nr_ctx && hctx->tags; in blk_mq_hw_queue_mapped()
267 if (q->mq_ops->put_budget) in blk_mq_put_dispatch_budget()
268 q->mq_ops->put_budget(q, budget_token); in blk_mq_put_dispatch_budget()
273 if (q->mq_ops->get_budget) in blk_mq_get_dispatch_budget()
274 return q->mq_ops->get_budget(q); in blk_mq_get_dispatch_budget()
283 if (rq->q->mq_ops->set_rq_budget_token) in blk_mq_set_rq_budget_token()
284 rq->q->mq_ops->set_rq_budget_token(rq, token); in blk_mq_set_rq_budget_token()
289 if (rq->q->mq_ops->get_rq_budget_token) in blk_mq_get_rq_budget_token()
290 return rq->q->mq_ops->get_rq_budget_token(rq); in blk_mq_get_rq_budget_token()
291 return -1; in blk_mq_get_rq_budget_token()
297 if (blk_mq_is_shared_tags(hctx->flags)) in __blk_mq_add_active_requests()
298 atomic_add(val, &hctx->queue->nr_active_requests_shared_tags); in __blk_mq_add_active_requests()
300 atomic_add(val, &hctx->nr_active); in __blk_mq_add_active_requests()
311 if (blk_mq_is_shared_tags(hctx->flags)) in __blk_mq_sub_active_requests()
312 atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags); in __blk_mq_sub_active_requests()
314 atomic_sub(val, &hctx->nr_active); in __blk_mq_sub_active_requests()
325 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) in blk_mq_add_active_requests()
331 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) in blk_mq_inc_active_requests()
338 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) in blk_mq_sub_active_requests()
344 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) in blk_mq_dec_active_requests()
350 if (blk_mq_is_shared_tags(hctx->flags)) in __blk_mq_active_requests()
351 return atomic_read(&hctx->queue->nr_active_requests_shared_tags); in __blk_mq_active_requests()
352 return atomic_read(&hctx->nr_active); in __blk_mq_active_requests()
358 blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag); in __blk_mq_put_driver_tag()
359 rq->tag = BLK_MQ_NO_TAG; in __blk_mq_put_driver_tag()
364 if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG) in blk_mq_put_driver_tag()
367 __blk_mq_put_driver_tag(rq->mq_hctx, rq); in blk_mq_put_driver_tag()
374 if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq)) in blk_mq_get_driver_tag()
385 qmap->mq_map[cpu] = 0; in blk_mq_clear_mq_map()
392 struct request *rq = list_entry_rq(list->next); in blk_mq_free_requests()
394 list_del_init(&rq->queuelist); in blk_mq_free_requests()
401 * and attempt to provide a fair share of the tag depth for each of them.
406 unsigned int depth, users; in hctx_may_queue() local
408 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) in hctx_may_queue()
414 if (bt->sb.depth == 1) in hctx_may_queue()
417 if (blk_mq_is_shared_tags(hctx->flags)) { in hctx_may_queue()
418 struct request_queue *q = hctx->queue; in hctx_may_queue()
420 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) in hctx_may_queue()
423 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in hctx_may_queue()
427 users = READ_ONCE(hctx->tags->active_queues); in hctx_may_queue()
434 depth = max((bt->sb.depth + users - 1) / users, 4U); in hctx_may_queue()
435 return __blk_mq_active_requests(hctx) < depth; in hctx_may_queue()
441 if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \
442 struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
446 srcu_idx = srcu_read_lock(__tag_set->srcu); \
448 srcu_read_unlock(__tag_set->srcu, srcu_idx); \
461 return (q->limits.features & BLK_FEAT_POLL) && in blk_mq_can_poll()
462 q->tag_set->map[HCTX_TYPE_POLL].nr_queues; in blk_mq_can_poll()