1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef INT_BLK_MQ_H 3 #define INT_BLK_MQ_H 4 5 #include "blk-stat.h" 6 #include "blk-mq-tag.h" 7 8 struct blk_mq_tag_set; 9 10 struct blk_mq_ctxs { 11 struct kobject kobj; 12 struct blk_mq_ctx __percpu *queue_ctx; 13 }; 14 15 /** 16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs 17 */ 18 struct blk_mq_ctx { 19 struct { 20 spinlock_t lock; 21 struct list_head rq_lists[HCTX_MAX_TYPES]; 22 } ____cacheline_aligned_in_smp; 23 24 unsigned int cpu; 25 unsigned short index_hw[HCTX_MAX_TYPES]; 26 27 /* incremented at dispatch time */ 28 unsigned long rq_dispatched[2]; 29 unsigned long rq_merged; 30 31 /* incremented at completion time */ 32 unsigned long ____cacheline_aligned_in_smp rq_completed[2]; 33 34 struct request_queue *queue; 35 struct blk_mq_ctxs *ctxs; 36 struct kobject kobj; 37 } ____cacheline_aligned_in_smp; 38 39 void blk_mq_free_queue(struct request_queue *q); 40 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); 41 void blk_mq_wake_waiters(struct request_queue *q); 42 bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool); 43 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); 44 bool blk_mq_get_driver_tag(struct request *rq); 45 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 46 struct blk_mq_ctx *start); 47 48 /* 49 * Internal helpers for allocating/freeing the request map 50 */ 51 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 52 unsigned int hctx_idx); 53 void blk_mq_free_rq_map(struct blk_mq_tags *tags); 54 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, 55 unsigned int hctx_idx, 56 unsigned int nr_tags, 57 unsigned int reserved_tags); 58 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 59 unsigned int hctx_idx, unsigned int depth); 60 61 /* 62 * Internal helpers for request insertion into sw queues 63 */ 64 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 65 bool at_head); 66 void blk_mq_request_bypass_insert(struct request *rq, bool run_queue); 67 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 68 struct list_head *list); 69 70 blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 71 struct request *rq, 72 blk_qc_t *cookie, 73 bool bypass, bool last); 74 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 75 struct list_head *list); 76 77 /* 78 * CPU -> queue mappings 79 */ 80 extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int); 81 82 /* 83 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue 84 * @q: request queue 85 * @type: the hctx type index 86 * @cpu: CPU 87 */ 88 static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, 89 enum hctx_type type, 90 unsigned int cpu) 91 { 92 return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; 93 } 94 95 /* 96 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue 97 * @q: request queue 98 * @flags: request command flags 99 * @cpu: CPU 100 */ 101 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, 102 unsigned int flags, 103 unsigned int cpu) 104 { 105 enum hctx_type type = HCTX_TYPE_DEFAULT; 106 107 if ((flags & REQ_HIPRI) && 108 q->tag_set->nr_maps > HCTX_TYPE_POLL && 109 q->tag_set->map[HCTX_TYPE_POLL].nr_queues && 110 test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) 111 type = HCTX_TYPE_POLL; 112 113 else if (((flags & REQ_OP_MASK) == REQ_OP_READ) && 114 q->tag_set->nr_maps > HCTX_TYPE_READ && 115 q->tag_set->map[HCTX_TYPE_READ].nr_queues) 116 type = HCTX_TYPE_READ; 117 118 return blk_mq_map_queue_type(q, type, cpu); 119 } 120 121 /* 122 * sysfs helpers 123 */ 124 extern void blk_mq_sysfs_init(struct request_queue *q); 125 extern void blk_mq_sysfs_deinit(struct request_queue *q); 126 extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q); 127 extern int blk_mq_sysfs_register(struct request_queue *q); 128 extern void blk_mq_sysfs_unregister(struct request_queue *q); 129 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); 130 131 void blk_mq_release(struct request_queue *q); 132 133 /** 134 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request 135 * @rq: target request. 136 */ 137 static inline enum mq_rq_state blk_mq_rq_state(struct request *rq) 138 { 139 return READ_ONCE(rq->state); 140 } 141 142 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, 143 unsigned int cpu) 144 { 145 return per_cpu_ptr(q->queue_ctx, cpu); 146 } 147 148 /* 149 * This assumes per-cpu software queueing queues. They could be per-node 150 * as well, for instance. For now this is hardcoded as-is. Note that we don't 151 * care about preemption, since we know the ctx's are persistent. This does 152 * mean that we can't rely on ctx always matching the currently running CPU. 153 */ 154 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) 155 { 156 return __blk_mq_get_ctx(q, get_cpu()); 157 } 158 159 static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) 160 { 161 put_cpu(); 162 } 163 164 struct blk_mq_alloc_data { 165 /* input parameter */ 166 struct request_queue *q; 167 blk_mq_req_flags_t flags; 168 unsigned int shallow_depth; 169 unsigned int cmd_flags; 170 171 /* input & output parameter */ 172 struct blk_mq_ctx *ctx; 173 struct blk_mq_hw_ctx *hctx; 174 }; 175 176 static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) 177 { 178 if (data->flags & BLK_MQ_REQ_INTERNAL) 179 return data->hctx->sched_tags; 180 181 return data->hctx->tags; 182 } 183 184 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) 185 { 186 return test_bit(BLK_MQ_S_STOPPED, &hctx->state); 187 } 188 189 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) 190 { 191 return hctx->nr_ctx && hctx->tags; 192 } 193 194 unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part); 195 void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, 196 unsigned int inflight[2]); 197 198 static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx) 199 { 200 struct request_queue *q = hctx->queue; 201 202 if (q->mq_ops->put_budget) 203 q->mq_ops->put_budget(hctx); 204 } 205 206 static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx) 207 { 208 struct request_queue *q = hctx->queue; 209 210 if (q->mq_ops->get_budget) 211 return q->mq_ops->get_budget(hctx); 212 return true; 213 } 214 215 static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, 216 struct request *rq) 217 { 218 blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag); 219 rq->tag = -1; 220 221 if (rq->rq_flags & RQF_MQ_INFLIGHT) { 222 rq->rq_flags &= ~RQF_MQ_INFLIGHT; 223 atomic_dec(&hctx->nr_active); 224 } 225 } 226 227 static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx, 228 struct request *rq) 229 { 230 if (rq->tag == -1 || rq->internal_tag == -1) 231 return; 232 233 __blk_mq_put_driver_tag(hctx, rq); 234 } 235 236 static inline void blk_mq_put_driver_tag(struct request *rq) 237 { 238 if (rq->tag == -1 || rq->internal_tag == -1) 239 return; 240 241 __blk_mq_put_driver_tag(rq->mq_hctx, rq); 242 } 243 244 static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap) 245 { 246 int cpu; 247 248 for_each_possible_cpu(cpu) 249 qmap->mq_map[cpu] = 0; 250 } 251 252 #endif 253