1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 2320ae51fSJens Axboe #ifndef INT_BLK_MQ_H 3320ae51fSJens Axboe #define INT_BLK_MQ_H 4320ae51fSJens Axboe 5cf43e6beSJens Axboe #include "blk-stat.h" 6244c65a3SMing Lei #include "blk-mq-tag.h" 7cf43e6beSJens Axboe 824d2f903SChristoph Hellwig struct blk_mq_tag_set; 924d2f903SChristoph Hellwig 101db4909eSMing Lei struct blk_mq_ctxs { 111db4909eSMing Lei struct kobject kobj; 121db4909eSMing Lei struct blk_mq_ctx __percpu *queue_ctx; 131db4909eSMing Lei }; 141db4909eSMing Lei 15fe644072SLinus Walleij /** 16fe644072SLinus Walleij * struct blk_mq_ctx - State for a software queue facing the submitting CPUs 17fe644072SLinus Walleij */ 18320ae51fSJens Axboe struct blk_mq_ctx { 19320ae51fSJens Axboe struct { 20320ae51fSJens Axboe spinlock_t lock; 21c16d6b5aSMing Lei struct list_head rq_lists[HCTX_MAX_TYPES]; 22320ae51fSJens Axboe } ____cacheline_aligned_in_smp; 23320ae51fSJens Axboe 24320ae51fSJens Axboe unsigned int cpu; 25f31967f0SJens Axboe unsigned short index_hw[HCTX_MAX_TYPES]; 268ccdf4a3SJianchao Wang struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; 27320ae51fSJens Axboe 28320ae51fSJens Axboe /* incremented at dispatch time */ 29320ae51fSJens Axboe unsigned long rq_dispatched[2]; 30320ae51fSJens Axboe unsigned long rq_merged; 31320ae51fSJens Axboe 32320ae51fSJens Axboe /* incremented at completion time */ 33320ae51fSJens Axboe unsigned long ____cacheline_aligned_in_smp rq_completed[2]; 34320ae51fSJens Axboe 35320ae51fSJens Axboe struct request_queue *queue; 361db4909eSMing Lei struct blk_mq_ctxs *ctxs; 37320ae51fSJens Axboe struct kobject kobj; 384bb659b1SJens Axboe } ____cacheline_aligned_in_smp; 39320ae51fSJens Axboe 40c7e2d94bSMing Lei void blk_mq_exit_queue(struct request_queue *q); 41e3a2b3f9SJens Axboe int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); 42aed3ea94SJens Axboe void blk_mq_wake_waiters(struct request_queue *q); 431fd40b5eSMing Lei bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *, 441fd40b5eSMing Lei unsigned int); 45e6c98712SBart Van Assche void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, 46e6c98712SBart Van Assche bool kick_requeue_list); 472c3ad667SJens Axboe void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); 48b347689fSMing Lei struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 49b347689fSMing Lei struct blk_mq_ctx *start); 502c3ad667SJens Axboe 512c3ad667SJens Axboe /* 522c3ad667SJens Axboe * Internal helpers for allocating/freeing the request map 532c3ad667SJens Axboe */ 54cc71a6f4SJens Axboe void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 552c3ad667SJens Axboe unsigned int hctx_idx); 561c0706a7SJohn Garry void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags); 57cc71a6f4SJens Axboe struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, 58cc71a6f4SJens Axboe unsigned int hctx_idx, 59cc71a6f4SJens Axboe unsigned int nr_tags, 601c0706a7SJohn Garry unsigned int reserved_tags, 611c0706a7SJohn Garry unsigned int flags); 62cc71a6f4SJens Axboe int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 63cc71a6f4SJens Axboe unsigned int hctx_idx, unsigned int depth); 642c3ad667SJens Axboe 652c3ad667SJens Axboe /* 662c3ad667SJens Axboe * Internal helpers for request insertion into sw queues 672c3ad667SJens Axboe */ 682c3ad667SJens Axboe void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 692c3ad667SJens Axboe bool at_head); 7001e99aecSMing Lei void blk_mq_request_bypass_insert(struct request *rq, bool at_head, 7101e99aecSMing Lei bool run_queue); 72bd166ef1SJens Axboe void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 73bd166ef1SJens Axboe struct list_head *list); 74320ae51fSJens Axboe 75fd9c40f6SBart Van Assche /* Used by blk_insert_cloned_request() to issue request directly */ 76fd9c40f6SBart Van Assche blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last); 776ce3dd6eSMing Lei void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 786ce3dd6eSMing Lei struct list_head *list); 79396eaf21SMing Lei 80320ae51fSJens Axboe /* 81320ae51fSJens Axboe * CPU -> queue mappings 82320ae51fSJens Axboe */ 83ed76e329SJens Axboe extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int); 84320ae51fSJens Axboe 85b3c661b1SJens Axboe /* 86b3c661b1SJens Axboe * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue 87b3c661b1SJens Axboe * @q: request queue 88e20ba6e1SChristoph Hellwig * @type: the hctx type index 89b3c661b1SJens Axboe * @cpu: CPU 90b3c661b1SJens Axboe */ 91ff2c5660SJens Axboe static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, 92e20ba6e1SChristoph Hellwig enum hctx_type type, 93ff2c5660SJens Axboe unsigned int cpu) 94ff2c5660SJens Axboe { 95e20ba6e1SChristoph Hellwig return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; 96b3c661b1SJens Axboe } 97b3c661b1SJens Axboe 98b3c661b1SJens Axboe /* 99b3c661b1SJens Axboe * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue 100b3c661b1SJens Axboe * @q: request queue 101b3c661b1SJens Axboe * @flags: request command flags 1028ccdf4a3SJianchao Wang * @cpu: cpu ctx 103b3c661b1SJens Axboe */ 104b3c661b1SJens Axboe static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, 105b3c661b1SJens Axboe unsigned int flags, 1068ccdf4a3SJianchao Wang struct blk_mq_ctx *ctx) 107b3c661b1SJens Axboe { 108e20ba6e1SChristoph Hellwig enum hctx_type type = HCTX_TYPE_DEFAULT; 109b3c661b1SJens Axboe 110bb94aea1SJianchao Wang /* 111bb94aea1SJianchao Wang * The caller ensure that if REQ_HIPRI, poll must be enabled. 112bb94aea1SJianchao Wang */ 113bb94aea1SJianchao Wang if (flags & REQ_HIPRI) 114e20ba6e1SChristoph Hellwig type = HCTX_TYPE_POLL; 115bb94aea1SJianchao Wang else if ((flags & REQ_OP_MASK) == REQ_OP_READ) 116e20ba6e1SChristoph Hellwig type = HCTX_TYPE_READ; 117e20ba6e1SChristoph Hellwig 1188ccdf4a3SJianchao Wang return ctx->hctxs[type]; 119ff2c5660SJens Axboe } 120ff2c5660SJens Axboe 121e93ecf60SJens Axboe /* 12267aec14cSJens Axboe * sysfs helpers 12367aec14cSJens Axboe */ 124737f98cfSMing Lei extern void blk_mq_sysfs_init(struct request_queue *q); 1257ea5fe31SMing Lei extern void blk_mq_sysfs_deinit(struct request_queue *q); 1262d0364c8SBart Van Assche extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q); 12767aec14cSJens Axboe extern int blk_mq_sysfs_register(struct request_queue *q); 12867aec14cSJens Axboe extern void blk_mq_sysfs_unregister(struct request_queue *q); 129868f2f0bSKeith Busch extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); 13067aec14cSJens Axboe 131e09aae7eSMing Lei void blk_mq_release(struct request_queue *q); 132e09aae7eSMing Lei 1331aecfe48SMing Lei static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, 1341aecfe48SMing Lei unsigned int cpu) 1351aecfe48SMing Lei { 1361aecfe48SMing Lei return per_cpu_ptr(q->queue_ctx, cpu); 1371aecfe48SMing Lei } 1381aecfe48SMing Lei 1391aecfe48SMing Lei /* 1401aecfe48SMing Lei * This assumes per-cpu software queueing queues. They could be per-node 1411aecfe48SMing Lei * as well, for instance. For now this is hardcoded as-is. Note that we don't 1421aecfe48SMing Lei * care about preemption, since we know the ctx's are persistent. This does 1431aecfe48SMing Lei * mean that we can't rely on ctx always matching the currently running CPU. 1441aecfe48SMing Lei */ 1451aecfe48SMing Lei static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) 1461aecfe48SMing Lei { 147c05f4220SBart Van Assche return __blk_mq_get_ctx(q, raw_smp_processor_id()); 1481aecfe48SMing Lei } 1491aecfe48SMing Lei 150cb96a42cSMing Lei struct blk_mq_alloc_data { 151cb96a42cSMing Lei /* input parameter */ 152cb96a42cSMing Lei struct request_queue *q; 1539a95e4efSBart Van Assche blk_mq_req_flags_t flags; 154229a9287SOmar Sandoval unsigned int shallow_depth; 155f9afca4dSJens Axboe unsigned int cmd_flags; 156cb96a42cSMing Lei 157cb96a42cSMing Lei /* input & output parameter */ 158cb96a42cSMing Lei struct blk_mq_ctx *ctx; 159cb96a42cSMing Lei struct blk_mq_hw_ctx *hctx; 160cb96a42cSMing Lei }; 161cb96a42cSMing Lei 16232bc15afSJohn Garry static inline bool blk_mq_is_sbitmap_shared(unsigned int flags) 16332bc15afSJohn Garry { 16432bc15afSJohn Garry return flags & BLK_MQ_F_TAG_HCTX_SHARED; 16532bc15afSJohn Garry } 16632bc15afSJohn Garry 1674941115bSJens Axboe static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) 1684941115bSJens Axboe { 16942fdc5e4SChristoph Hellwig if (data->q->elevator) 170bd166ef1SJens Axboe return data->hctx->sched_tags; 171bd166ef1SJens Axboe 1724941115bSJens Axboe return data->hctx->tags; 1734941115bSJens Axboe } 1744941115bSJens Axboe 1755d1b25c1SBart Van Assche static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) 1765d1b25c1SBart Van Assche { 1775d1b25c1SBart Van Assche return test_bit(BLK_MQ_S_STOPPED, &hctx->state); 1785d1b25c1SBart Van Assche } 1795d1b25c1SBart Van Assche 18019c66e59SMing Lei static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) 18119c66e59SMing Lei { 18219c66e59SMing Lei return hctx->nr_ctx && hctx->tags; 18319c66e59SMing Lei } 18419c66e59SMing Lei 185e016b782SMikulas Patocka unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part); 186bf0ddabaSOmar Sandoval void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, 187bf0ddabaSOmar Sandoval unsigned int inflight[2]); 188f299b7c7SJens Axboe 18965c76369SMing Lei static inline void blk_mq_put_dispatch_budget(struct request_queue *q) 190de148297SMing Lei { 191de148297SMing Lei if (q->mq_ops->put_budget) 19265c76369SMing Lei q->mq_ops->put_budget(q); 193de148297SMing Lei } 194de148297SMing Lei 19565c76369SMing Lei static inline bool blk_mq_get_dispatch_budget(struct request_queue *q) 196de148297SMing Lei { 197de148297SMing Lei if (q->mq_ops->get_budget) 19865c76369SMing Lei return q->mq_ops->get_budget(q); 19988022d72SMing Lei return true; 200de148297SMing Lei } 201de148297SMing Lei 202*bccf5e26SJohn Garry static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx) 203*bccf5e26SJohn Garry { 204*bccf5e26SJohn Garry if (blk_mq_is_sbitmap_shared(hctx->flags)) 205*bccf5e26SJohn Garry atomic_inc(&hctx->queue->nr_active_requests_shared_sbitmap); 206*bccf5e26SJohn Garry else 207*bccf5e26SJohn Garry atomic_inc(&hctx->nr_active); 208*bccf5e26SJohn Garry } 209*bccf5e26SJohn Garry 210*bccf5e26SJohn Garry static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx) 211*bccf5e26SJohn Garry { 212*bccf5e26SJohn Garry if (blk_mq_is_sbitmap_shared(hctx->flags)) 213*bccf5e26SJohn Garry atomic_dec(&hctx->queue->nr_active_requests_shared_sbitmap); 214*bccf5e26SJohn Garry else 215*bccf5e26SJohn Garry atomic_dec(&hctx->nr_active); 216*bccf5e26SJohn Garry } 217*bccf5e26SJohn Garry 218*bccf5e26SJohn Garry static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx) 219*bccf5e26SJohn Garry { 220*bccf5e26SJohn Garry if (blk_mq_is_sbitmap_shared(hctx->flags)) 221*bccf5e26SJohn Garry return atomic_read(&hctx->queue->nr_active_requests_shared_sbitmap); 222*bccf5e26SJohn Garry return atomic_read(&hctx->nr_active); 223*bccf5e26SJohn Garry } 2244e2f62e5SJens Axboe static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, 2254e2f62e5SJens Axboe struct request *rq) 2264e2f62e5SJens Axboe { 2274e2f62e5SJens Axboe blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag); 2284e2f62e5SJens Axboe rq->tag = BLK_MQ_NO_TAG; 2294e2f62e5SJens Axboe 2304e2f62e5SJens Axboe if (rq->rq_flags & RQF_MQ_INFLIGHT) { 2314e2f62e5SJens Axboe rq->rq_flags &= ~RQF_MQ_INFLIGHT; 232*bccf5e26SJohn Garry __blk_mq_dec_active_requests(hctx); 2334e2f62e5SJens Axboe } 2344e2f62e5SJens Axboe } 2354e2f62e5SJens Axboe 2364e2f62e5SJens Axboe static inline void blk_mq_put_driver_tag(struct request *rq) 2374e2f62e5SJens Axboe { 2384e2f62e5SJens Axboe if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG) 2394e2f62e5SJens Axboe return; 2404e2f62e5SJens Axboe 2414e2f62e5SJens Axboe __blk_mq_put_driver_tag(rq->mq_hctx, rq); 2424e2f62e5SJens Axboe } 2434e2f62e5SJens Axboe 244ed76e329SJens Axboe static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap) 2450da73d00SMinwoo Im { 2460da73d00SMinwoo Im int cpu; 2470da73d00SMinwoo Im 2480da73d00SMinwoo Im for_each_possible_cpu(cpu) 249ed76e329SJens Axboe qmap->mq_map[cpu] = 0; 2500da73d00SMinwoo Im } 2510da73d00SMinwoo Im 252b49773e7SDamien Le Moal /* 253b49773e7SDamien Le Moal * blk_mq_plug() - Get caller context plug 254b49773e7SDamien Le Moal * @q: request queue 255b49773e7SDamien Le Moal * @bio : the bio being submitted by the caller context 256b49773e7SDamien Le Moal * 257b49773e7SDamien Le Moal * Plugging, by design, may delay the insertion of BIOs into the elevator in 258b49773e7SDamien Le Moal * order to increase BIO merging opportunities. This however can cause BIO 259b49773e7SDamien Le Moal * insertion order to change from the order in which submit_bio() is being 260b49773e7SDamien Le Moal * executed in the case of multiple contexts concurrently issuing BIOs to a 261b49773e7SDamien Le Moal * device, even if these context are synchronized to tightly control BIO issuing 262b49773e7SDamien Le Moal * order. While this is not a problem with regular block devices, this ordering 263b49773e7SDamien Le Moal * change can cause write BIO failures with zoned block devices as these 264b49773e7SDamien Le Moal * require sequential write patterns to zones. Prevent this from happening by 265b49773e7SDamien Le Moal * ignoring the plug state of a BIO issuing context if the target request queue 266b49773e7SDamien Le Moal * is for a zoned block device and the BIO to plug is a write operation. 267b49773e7SDamien Le Moal * 268b49773e7SDamien Le Moal * Return current->plug if the bio can be plugged and NULL otherwise 269b49773e7SDamien Le Moal */ 270b49773e7SDamien Le Moal static inline struct blk_plug *blk_mq_plug(struct request_queue *q, 271b49773e7SDamien Le Moal struct bio *bio) 272b49773e7SDamien Le Moal { 273b49773e7SDamien Le Moal /* 274b49773e7SDamien Le Moal * For regular block devices or read operations, use the context plug 275b49773e7SDamien Le Moal * which may be NULL if blk_start_plug() was not executed. 276b49773e7SDamien Le Moal */ 277b49773e7SDamien Le Moal if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio))) 278b49773e7SDamien Le Moal return current->plug; 279b49773e7SDamien Le Moal 280b49773e7SDamien Le Moal /* Zoned block device write operation case: do not plug the BIO */ 281b49773e7SDamien Le Moal return NULL; 282b49773e7SDamien Le Moal } 283b49773e7SDamien Le Moal 284a0235d23SJohn Garry /* 285a0235d23SJohn Garry * For shared tag users, we track the number of currently active users 286a0235d23SJohn Garry * and attempt to provide a fair share of the tag depth for each of them. 287a0235d23SJohn Garry */ 288a0235d23SJohn Garry static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, 289a0235d23SJohn Garry struct sbitmap_queue *bt) 290a0235d23SJohn Garry { 291a0235d23SJohn Garry unsigned int depth, users; 292a0235d23SJohn Garry 293a0235d23SJohn Garry if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) 294a0235d23SJohn Garry return true; 295a0235d23SJohn Garry if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) 296a0235d23SJohn Garry return true; 297a0235d23SJohn Garry 298a0235d23SJohn Garry /* 299a0235d23SJohn Garry * Don't try dividing an ant 300a0235d23SJohn Garry */ 301a0235d23SJohn Garry if (bt->sb.depth == 1) 302a0235d23SJohn Garry return true; 303a0235d23SJohn Garry 304a0235d23SJohn Garry users = atomic_read(&hctx->tags->active_queues); 305a0235d23SJohn Garry if (!users) 306a0235d23SJohn Garry return true; 307a0235d23SJohn Garry 308a0235d23SJohn Garry /* 309a0235d23SJohn Garry * Allow at least some tags 310a0235d23SJohn Garry */ 311a0235d23SJohn Garry depth = max((bt->sb.depth + users - 1) / users, 4U); 312*bccf5e26SJohn Garry return __blk_mq_active_requests(hctx) < depth; 313a0235d23SJohn Garry } 314a0235d23SJohn Garry 315a0235d23SJohn Garry 316320ae51fSJens Axboe #endif 317