1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 2320ae51fSJens Axboe #ifndef INT_BLK_MQ_H 3320ae51fSJens Axboe #define INT_BLK_MQ_H 4320ae51fSJens Axboe 5cf43e6beSJens Axboe #include "blk-stat.h" 6244c65a3SMing Lei #include "blk-mq-tag.h" 7cf43e6beSJens Axboe 824d2f903SChristoph Hellwig struct blk_mq_tag_set; 924d2f903SChristoph Hellwig 101db4909eSMing Lei struct blk_mq_ctxs { 111db4909eSMing Lei struct kobject kobj; 121db4909eSMing Lei struct blk_mq_ctx __percpu *queue_ctx; 131db4909eSMing Lei }; 141db4909eSMing Lei 15fe644072SLinus Walleij /** 16fe644072SLinus Walleij * struct blk_mq_ctx - State for a software queue facing the submitting CPUs 17fe644072SLinus Walleij */ 18320ae51fSJens Axboe struct blk_mq_ctx { 19320ae51fSJens Axboe struct { 20320ae51fSJens Axboe spinlock_t lock; 21c16d6b5aSMing Lei struct list_head rq_lists[HCTX_MAX_TYPES]; 22320ae51fSJens Axboe } ____cacheline_aligned_in_smp; 23320ae51fSJens Axboe 24320ae51fSJens Axboe unsigned int cpu; 25f31967f0SJens Axboe unsigned short index_hw[HCTX_MAX_TYPES]; 268ccdf4a3SJianchao Wang struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; 27320ae51fSJens Axboe 28320ae51fSJens Axboe /* incremented at dispatch time */ 29320ae51fSJens Axboe unsigned long rq_dispatched[2]; 30320ae51fSJens Axboe unsigned long rq_merged; 31320ae51fSJens Axboe 32320ae51fSJens Axboe /* incremented at completion time */ 33320ae51fSJens Axboe unsigned long ____cacheline_aligned_in_smp rq_completed[2]; 34320ae51fSJens Axboe 35320ae51fSJens Axboe struct request_queue *queue; 361db4909eSMing Lei struct blk_mq_ctxs *ctxs; 37320ae51fSJens Axboe struct kobject kobj; 384bb659b1SJens Axboe } ____cacheline_aligned_in_smp; 39320ae51fSJens Axboe 40c7e2d94bSMing Lei void blk_mq_exit_queue(struct request_queue *q); 41e3a2b3f9SJens Axboe int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); 42aed3ea94SJens Axboe void blk_mq_wake_waiters(struct request_queue *q); 431fd40b5eSMing Lei bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *, 441fd40b5eSMing Lei unsigned int); 45e6c98712SBart Van Assche void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, 46e6c98712SBart Van Assche bool kick_requeue_list); 472c3ad667SJens Axboe void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); 48b347689fSMing Lei struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 49b347689fSMing Lei struct blk_mq_ctx *start); 502c3ad667SJens Axboe 512c3ad667SJens Axboe /* 522c3ad667SJens Axboe * Internal helpers for allocating/freeing the request map 532c3ad667SJens Axboe */ 54cc71a6f4SJens Axboe void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 552c3ad667SJens Axboe unsigned int hctx_idx); 56cc71a6f4SJens Axboe void blk_mq_free_rq_map(struct blk_mq_tags *tags); 57cc71a6f4SJens Axboe struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, 58cc71a6f4SJens Axboe unsigned int hctx_idx, 59cc71a6f4SJens Axboe unsigned int nr_tags, 60cc71a6f4SJens Axboe unsigned int reserved_tags); 61cc71a6f4SJens Axboe int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 62cc71a6f4SJens Axboe unsigned int hctx_idx, unsigned int depth); 632c3ad667SJens Axboe 642c3ad667SJens Axboe /* 652c3ad667SJens Axboe * Internal helpers for request insertion into sw queues 662c3ad667SJens Axboe */ 672c3ad667SJens Axboe void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 682c3ad667SJens Axboe bool at_head); 6901e99aecSMing Lei void blk_mq_request_bypass_insert(struct request *rq, bool at_head, 7001e99aecSMing Lei bool run_queue); 71bd166ef1SJens Axboe void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 72bd166ef1SJens Axboe struct list_head *list); 73320ae51fSJens Axboe 74fd9c40f6SBart Van Assche /* Used by blk_insert_cloned_request() to issue request directly */ 75fd9c40f6SBart Van Assche blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last); 766ce3dd6eSMing Lei void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 776ce3dd6eSMing Lei struct list_head *list); 78396eaf21SMing Lei 79320ae51fSJens Axboe /* 80320ae51fSJens Axboe * CPU -> queue mappings 81320ae51fSJens Axboe */ 82ed76e329SJens Axboe extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int); 83320ae51fSJens Axboe 84b3c661b1SJens Axboe /* 85b3c661b1SJens Axboe * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue 86b3c661b1SJens Axboe * @q: request queue 87e20ba6e1SChristoph Hellwig * @type: the hctx type index 88b3c661b1SJens Axboe * @cpu: CPU 89b3c661b1SJens Axboe */ 90ff2c5660SJens Axboe static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, 91e20ba6e1SChristoph Hellwig enum hctx_type type, 92ff2c5660SJens Axboe unsigned int cpu) 93ff2c5660SJens Axboe { 94e20ba6e1SChristoph Hellwig return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; 95b3c661b1SJens Axboe } 96b3c661b1SJens Axboe 97b3c661b1SJens Axboe /* 98b3c661b1SJens Axboe * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue 99b3c661b1SJens Axboe * @q: request queue 100b3c661b1SJens Axboe * @flags: request command flags 1018ccdf4a3SJianchao Wang * @cpu: cpu ctx 102b3c661b1SJens Axboe */ 103b3c661b1SJens Axboe static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, 104b3c661b1SJens Axboe unsigned int flags, 1058ccdf4a3SJianchao Wang struct blk_mq_ctx *ctx) 106b3c661b1SJens Axboe { 107e20ba6e1SChristoph Hellwig enum hctx_type type = HCTX_TYPE_DEFAULT; 108b3c661b1SJens Axboe 109bb94aea1SJianchao Wang /* 110bb94aea1SJianchao Wang * The caller ensure that if REQ_HIPRI, poll must be enabled. 111bb94aea1SJianchao Wang */ 112bb94aea1SJianchao Wang if (flags & REQ_HIPRI) 113e20ba6e1SChristoph Hellwig type = HCTX_TYPE_POLL; 114bb94aea1SJianchao Wang else if ((flags & REQ_OP_MASK) == REQ_OP_READ) 115e20ba6e1SChristoph Hellwig type = HCTX_TYPE_READ; 116e20ba6e1SChristoph Hellwig 1178ccdf4a3SJianchao Wang return ctx->hctxs[type]; 118ff2c5660SJens Axboe } 119ff2c5660SJens Axboe 120e93ecf60SJens Axboe /* 12167aec14cSJens Axboe * sysfs helpers 12267aec14cSJens Axboe */ 123737f98cfSMing Lei extern void blk_mq_sysfs_init(struct request_queue *q); 1247ea5fe31SMing Lei extern void blk_mq_sysfs_deinit(struct request_queue *q); 1252d0364c8SBart Van Assche extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q); 12667aec14cSJens Axboe extern int blk_mq_sysfs_register(struct request_queue *q); 12767aec14cSJens Axboe extern void blk_mq_sysfs_unregister(struct request_queue *q); 128868f2f0bSKeith Busch extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); 12967aec14cSJens Axboe 130e09aae7eSMing Lei void blk_mq_release(struct request_queue *q); 131e09aae7eSMing Lei 1321aecfe48SMing Lei static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, 1331aecfe48SMing Lei unsigned int cpu) 1341aecfe48SMing Lei { 1351aecfe48SMing Lei return per_cpu_ptr(q->queue_ctx, cpu); 1361aecfe48SMing Lei } 1371aecfe48SMing Lei 1381aecfe48SMing Lei /* 1391aecfe48SMing Lei * This assumes per-cpu software queueing queues. They could be per-node 1401aecfe48SMing Lei * as well, for instance. For now this is hardcoded as-is. Note that we don't 1411aecfe48SMing Lei * care about preemption, since we know the ctx's are persistent. This does 1421aecfe48SMing Lei * mean that we can't rely on ctx always matching the currently running CPU. 1431aecfe48SMing Lei */ 1441aecfe48SMing Lei static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) 1451aecfe48SMing Lei { 146c05f4220SBart Van Assche return __blk_mq_get_ctx(q, raw_smp_processor_id()); 1471aecfe48SMing Lei } 1481aecfe48SMing Lei 149cb96a42cSMing Lei struct blk_mq_alloc_data { 150cb96a42cSMing Lei /* input parameter */ 151cb96a42cSMing Lei struct request_queue *q; 1529a95e4efSBart Van Assche blk_mq_req_flags_t flags; 153229a9287SOmar Sandoval unsigned int shallow_depth; 154f9afca4dSJens Axboe unsigned int cmd_flags; 155cb96a42cSMing Lei 156cb96a42cSMing Lei /* input & output parameter */ 157cb96a42cSMing Lei struct blk_mq_ctx *ctx; 158cb96a42cSMing Lei struct blk_mq_hw_ctx *hctx; 159cb96a42cSMing Lei }; 160cb96a42cSMing Lei 1614941115bSJens Axboe static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) 1624941115bSJens Axboe { 16342fdc5e4SChristoph Hellwig if (data->q->elevator) 164bd166ef1SJens Axboe return data->hctx->sched_tags; 165bd166ef1SJens Axboe 1664941115bSJens Axboe return data->hctx->tags; 1674941115bSJens Axboe } 1684941115bSJens Axboe 1695d1b25c1SBart Van Assche static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) 1705d1b25c1SBart Van Assche { 1715d1b25c1SBart Van Assche return test_bit(BLK_MQ_S_STOPPED, &hctx->state); 1725d1b25c1SBart Van Assche } 1735d1b25c1SBart Van Assche 17419c66e59SMing Lei static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) 17519c66e59SMing Lei { 17619c66e59SMing Lei return hctx->nr_ctx && hctx->tags; 17719c66e59SMing Lei } 17819c66e59SMing Lei 179e016b782SMikulas Patocka unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part); 180bf0ddabaSOmar Sandoval void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, 181bf0ddabaSOmar Sandoval unsigned int inflight[2]); 182f299b7c7SJens Axboe 18365c76369SMing Lei static inline void blk_mq_put_dispatch_budget(struct request_queue *q) 184de148297SMing Lei { 185de148297SMing Lei if (q->mq_ops->put_budget) 18665c76369SMing Lei q->mq_ops->put_budget(q); 187de148297SMing Lei } 188de148297SMing Lei 18965c76369SMing Lei static inline bool blk_mq_get_dispatch_budget(struct request_queue *q) 190de148297SMing Lei { 191de148297SMing Lei if (q->mq_ops->get_budget) 19265c76369SMing Lei return q->mq_ops->get_budget(q); 19388022d72SMing Lei return true; 194de148297SMing Lei } 195de148297SMing Lei 196*4e2f62e5SJens Axboe static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, 197*4e2f62e5SJens Axboe struct request *rq) 198*4e2f62e5SJens Axboe { 199*4e2f62e5SJens Axboe blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag); 200*4e2f62e5SJens Axboe rq->tag = BLK_MQ_NO_TAG; 201*4e2f62e5SJens Axboe 202*4e2f62e5SJens Axboe if (rq->rq_flags & RQF_MQ_INFLIGHT) { 203*4e2f62e5SJens Axboe rq->rq_flags &= ~RQF_MQ_INFLIGHT; 204*4e2f62e5SJens Axboe atomic_dec(&hctx->nr_active); 205*4e2f62e5SJens Axboe } 206*4e2f62e5SJens Axboe } 207*4e2f62e5SJens Axboe 208*4e2f62e5SJens Axboe static inline void blk_mq_put_driver_tag(struct request *rq) 209*4e2f62e5SJens Axboe { 210*4e2f62e5SJens Axboe if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG) 211*4e2f62e5SJens Axboe return; 212*4e2f62e5SJens Axboe 213*4e2f62e5SJens Axboe __blk_mq_put_driver_tag(rq->mq_hctx, rq); 214*4e2f62e5SJens Axboe } 215*4e2f62e5SJens Axboe 216ed76e329SJens Axboe static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap) 2170da73d00SMinwoo Im { 2180da73d00SMinwoo Im int cpu; 2190da73d00SMinwoo Im 2200da73d00SMinwoo Im for_each_possible_cpu(cpu) 221ed76e329SJens Axboe qmap->mq_map[cpu] = 0; 2220da73d00SMinwoo Im } 2230da73d00SMinwoo Im 224b49773e7SDamien Le Moal /* 225b49773e7SDamien Le Moal * blk_mq_plug() - Get caller context plug 226b49773e7SDamien Le Moal * @q: request queue 227b49773e7SDamien Le Moal * @bio : the bio being submitted by the caller context 228b49773e7SDamien Le Moal * 229b49773e7SDamien Le Moal * Plugging, by design, may delay the insertion of BIOs into the elevator in 230b49773e7SDamien Le Moal * order to increase BIO merging opportunities. This however can cause BIO 231b49773e7SDamien Le Moal * insertion order to change from the order in which submit_bio() is being 232b49773e7SDamien Le Moal * executed in the case of multiple contexts concurrently issuing BIOs to a 233b49773e7SDamien Le Moal * device, even if these context are synchronized to tightly control BIO issuing 234b49773e7SDamien Le Moal * order. While this is not a problem with regular block devices, this ordering 235b49773e7SDamien Le Moal * change can cause write BIO failures with zoned block devices as these 236b49773e7SDamien Le Moal * require sequential write patterns to zones. Prevent this from happening by 237b49773e7SDamien Le Moal * ignoring the plug state of a BIO issuing context if the target request queue 238b49773e7SDamien Le Moal * is for a zoned block device and the BIO to plug is a write operation. 239b49773e7SDamien Le Moal * 240b49773e7SDamien Le Moal * Return current->plug if the bio can be plugged and NULL otherwise 241b49773e7SDamien Le Moal */ 242b49773e7SDamien Le Moal static inline struct blk_plug *blk_mq_plug(struct request_queue *q, 243b49773e7SDamien Le Moal struct bio *bio) 244b49773e7SDamien Le Moal { 245b49773e7SDamien Le Moal /* 246b49773e7SDamien Le Moal * For regular block devices or read operations, use the context plug 247b49773e7SDamien Le Moal * which may be NULL if blk_start_plug() was not executed. 248b49773e7SDamien Le Moal */ 249b49773e7SDamien Le Moal if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio))) 250b49773e7SDamien Le Moal return current->plug; 251b49773e7SDamien Le Moal 252b49773e7SDamien Le Moal /* Zoned block device write operation case: do not plug the BIO */ 253b49773e7SDamien Le Moal return NULL; 254b49773e7SDamien Le Moal } 255b49773e7SDamien Le Moal 256320ae51fSJens Axboe #endif 257