1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Block multiqueue core code 4 * 5 * Copyright (C) 2013-2014 Jens Axboe 6 * Copyright (C) 2013-2014 Christoph Hellwig 7 */ 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/backing-dev.h> 11 #include <linux/bio.h> 12 #include <linux/blkdev.h> 13 #include <linux/blk-integrity.h> 14 #include <linux/kmemleak.h> 15 #include <linux/mm.h> 16 #include <linux/init.h> 17 #include <linux/slab.h> 18 #include <linux/workqueue.h> 19 #include <linux/smp.h> 20 #include <linux/interrupt.h> 21 #include <linux/llist.h> 22 #include <linux/cpu.h> 23 #include <linux/cache.h> 24 #include <linux/sched/sysctl.h> 25 #include <linux/sched/topology.h> 26 #include <linux/sched/signal.h> 27 #include <linux/delay.h> 28 #include <linux/crash_dump.h> 29 #include <linux/prefetch.h> 30 #include <linux/blk-crypto.h> 31 #include <linux/part_stat.h> 32 33 #include <trace/events/block.h> 34 35 #include <linux/blk-mq.h> 36 #include <linux/t10-pi.h> 37 #include "blk.h" 38 #include "blk-mq.h" 39 #include "blk-mq-debugfs.h" 40 #include "blk-mq-tag.h" 41 #include "blk-pm.h" 42 #include "blk-stat.h" 43 #include "blk-mq-sched.h" 44 #include "blk-rq-qos.h" 45 46 static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); 47 48 static void blk_mq_poll_stats_start(struct request_queue *q); 49 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); 50 51 static int blk_mq_poll_stats_bkt(const struct request *rq) 52 { 53 int ddir, sectors, bucket; 54 55 ddir = rq_data_dir(rq); 56 sectors = blk_rq_stats_sectors(rq); 57 58 bucket = ddir + 2 * ilog2(sectors); 59 60 if (bucket < 0) 61 return -1; 62 else if (bucket >= BLK_MQ_POLL_STATS_BKTS) 63 return ddir + BLK_MQ_POLL_STATS_BKTS - 2; 64 65 return bucket; 66 } 67 68 #define BLK_QC_T_SHIFT 16 69 #define BLK_QC_T_INTERNAL (1U << 31) 70 71 static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q, 72 blk_qc_t qc) 73 { 74 return xa_load(&q->hctx_table, 75 (qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT); 76 } 77 78 static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx, 79 blk_qc_t qc) 80 { 81 unsigned int tag = qc & ((1U << BLK_QC_T_SHIFT) - 1); 82 83 if (qc & BLK_QC_T_INTERNAL) 84 return blk_mq_tag_to_rq(hctx->sched_tags, tag); 85 return blk_mq_tag_to_rq(hctx->tags, tag); 86 } 87 88 static inline blk_qc_t blk_rq_to_qc(struct request *rq) 89 { 90 return (rq->mq_hctx->queue_num << BLK_QC_T_SHIFT) | 91 (rq->tag != -1 ? 92 rq->tag : (rq->internal_tag | BLK_QC_T_INTERNAL)); 93 } 94 95 /* 96 * Check if any of the ctx, dispatch list or elevator 97 * have pending work in this hardware queue. 98 */ 99 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) 100 { 101 return !list_empty_careful(&hctx->dispatch) || 102 sbitmap_any_bit_set(&hctx->ctx_map) || 103 blk_mq_sched_has_work(hctx); 104 } 105 106 /* 107 * Mark this ctx as having pending work in this hardware queue 108 */ 109 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, 110 struct blk_mq_ctx *ctx) 111 { 112 const int bit = ctx->index_hw[hctx->type]; 113 114 if (!sbitmap_test_bit(&hctx->ctx_map, bit)) 115 sbitmap_set_bit(&hctx->ctx_map, bit); 116 } 117 118 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, 119 struct blk_mq_ctx *ctx) 120 { 121 const int bit = ctx->index_hw[hctx->type]; 122 123 sbitmap_clear_bit(&hctx->ctx_map, bit); 124 } 125 126 struct mq_inflight { 127 struct block_device *part; 128 unsigned int inflight[2]; 129 }; 130 131 static bool blk_mq_check_inflight(struct request *rq, void *priv, 132 bool reserved) 133 { 134 struct mq_inflight *mi = priv; 135 136 if ((!mi->part->bd_partno || rq->part == mi->part) && 137 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) 138 mi->inflight[rq_data_dir(rq)]++; 139 140 return true; 141 } 142 143 unsigned int blk_mq_in_flight(struct request_queue *q, 144 struct block_device *part) 145 { 146 struct mq_inflight mi = { .part = part }; 147 148 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 149 150 return mi.inflight[0] + mi.inflight[1]; 151 } 152 153 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, 154 unsigned int inflight[2]) 155 { 156 struct mq_inflight mi = { .part = part }; 157 158 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 159 inflight[0] = mi.inflight[0]; 160 inflight[1] = mi.inflight[1]; 161 } 162 163 void blk_freeze_queue_start(struct request_queue *q) 164 { 165 mutex_lock(&q->mq_freeze_lock); 166 if (++q->mq_freeze_depth == 1) { 167 percpu_ref_kill(&q->q_usage_counter); 168 mutex_unlock(&q->mq_freeze_lock); 169 if (queue_is_mq(q)) 170 blk_mq_run_hw_queues(q, false); 171 } else { 172 mutex_unlock(&q->mq_freeze_lock); 173 } 174 } 175 EXPORT_SYMBOL_GPL(blk_freeze_queue_start); 176 177 void blk_mq_freeze_queue_wait(struct request_queue *q) 178 { 179 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); 180 } 181 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait); 182 183 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, 184 unsigned long timeout) 185 { 186 return wait_event_timeout(q->mq_freeze_wq, 187 percpu_ref_is_zero(&q->q_usage_counter), 188 timeout); 189 } 190 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout); 191 192 /* 193 * Guarantee no request is in use, so we can change any data structure of 194 * the queue afterward. 195 */ 196 void blk_freeze_queue(struct request_queue *q) 197 { 198 /* 199 * In the !blk_mq case we are only calling this to kill the 200 * q_usage_counter, otherwise this increases the freeze depth 201 * and waits for it to return to zero. For this reason there is 202 * no blk_unfreeze_queue(), and blk_freeze_queue() is not 203 * exported to drivers as the only user for unfreeze is blk_mq. 204 */ 205 blk_freeze_queue_start(q); 206 blk_mq_freeze_queue_wait(q); 207 } 208 209 void blk_mq_freeze_queue(struct request_queue *q) 210 { 211 /* 212 * ...just an alias to keep freeze and unfreeze actions balanced 213 * in the blk_mq_* namespace 214 */ 215 blk_freeze_queue(q); 216 } 217 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); 218 219 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic) 220 { 221 mutex_lock(&q->mq_freeze_lock); 222 if (force_atomic) 223 q->q_usage_counter.data->force_atomic = true; 224 q->mq_freeze_depth--; 225 WARN_ON_ONCE(q->mq_freeze_depth < 0); 226 if (!q->mq_freeze_depth) { 227 percpu_ref_resurrect(&q->q_usage_counter); 228 wake_up_all(&q->mq_freeze_wq); 229 } 230 mutex_unlock(&q->mq_freeze_lock); 231 } 232 233 void blk_mq_unfreeze_queue(struct request_queue *q) 234 { 235 __blk_mq_unfreeze_queue(q, false); 236 } 237 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); 238 239 /* 240 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the 241 * mpt3sas driver such that this function can be removed. 242 */ 243 void blk_mq_quiesce_queue_nowait(struct request_queue *q) 244 { 245 unsigned long flags; 246 247 spin_lock_irqsave(&q->queue_lock, flags); 248 if (!q->quiesce_depth++) 249 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); 250 spin_unlock_irqrestore(&q->queue_lock, flags); 251 } 252 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); 253 254 /** 255 * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done 256 * @q: request queue. 257 * 258 * Note: it is driver's responsibility for making sure that quiesce has 259 * been started. 260 */ 261 void blk_mq_wait_quiesce_done(struct request_queue *q) 262 { 263 if (blk_queue_has_srcu(q)) 264 synchronize_srcu(q->srcu); 265 else 266 synchronize_rcu(); 267 } 268 EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done); 269 270 /** 271 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished 272 * @q: request queue. 273 * 274 * Note: this function does not prevent that the struct request end_io() 275 * callback function is invoked. Once this function is returned, we make 276 * sure no dispatch can happen until the queue is unquiesced via 277 * blk_mq_unquiesce_queue(). 278 */ 279 void blk_mq_quiesce_queue(struct request_queue *q) 280 { 281 blk_mq_quiesce_queue_nowait(q); 282 blk_mq_wait_quiesce_done(q); 283 } 284 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); 285 286 /* 287 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue() 288 * @q: request queue. 289 * 290 * This function recovers queue into the state before quiescing 291 * which is done by blk_mq_quiesce_queue. 292 */ 293 void blk_mq_unquiesce_queue(struct request_queue *q) 294 { 295 unsigned long flags; 296 bool run_queue = false; 297 298 spin_lock_irqsave(&q->queue_lock, flags); 299 if (WARN_ON_ONCE(q->quiesce_depth <= 0)) { 300 ; 301 } else if (!--q->quiesce_depth) { 302 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); 303 run_queue = true; 304 } 305 spin_unlock_irqrestore(&q->queue_lock, flags); 306 307 /* dispatch requests which are inserted during quiescing */ 308 if (run_queue) 309 blk_mq_run_hw_queues(q, true); 310 } 311 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue); 312 313 void blk_mq_wake_waiters(struct request_queue *q) 314 { 315 struct blk_mq_hw_ctx *hctx; 316 unsigned long i; 317 318 queue_for_each_hw_ctx(q, hctx, i) 319 if (blk_mq_hw_queue_mapped(hctx)) 320 blk_mq_tag_wakeup_all(hctx->tags, true); 321 } 322 323 void blk_rq_init(struct request_queue *q, struct request *rq) 324 { 325 memset(rq, 0, sizeof(*rq)); 326 327 INIT_LIST_HEAD(&rq->queuelist); 328 rq->q = q; 329 rq->__sector = (sector_t) -1; 330 INIT_HLIST_NODE(&rq->hash); 331 RB_CLEAR_NODE(&rq->rb_node); 332 rq->tag = BLK_MQ_NO_TAG; 333 rq->internal_tag = BLK_MQ_NO_TAG; 334 rq->start_time_ns = ktime_get_ns(); 335 rq->part = NULL; 336 blk_crypto_rq_set_defaults(rq); 337 } 338 EXPORT_SYMBOL(blk_rq_init); 339 340 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, 341 struct blk_mq_tags *tags, unsigned int tag, u64 alloc_time_ns) 342 { 343 struct blk_mq_ctx *ctx = data->ctx; 344 struct blk_mq_hw_ctx *hctx = data->hctx; 345 struct request_queue *q = data->q; 346 struct request *rq = tags->static_rqs[tag]; 347 348 rq->q = q; 349 rq->mq_ctx = ctx; 350 rq->mq_hctx = hctx; 351 rq->cmd_flags = data->cmd_flags; 352 353 if (data->flags & BLK_MQ_REQ_PM) 354 data->rq_flags |= RQF_PM; 355 if (blk_queue_io_stat(q)) 356 data->rq_flags |= RQF_IO_STAT; 357 rq->rq_flags = data->rq_flags; 358 359 if (!(data->rq_flags & RQF_ELV)) { 360 rq->tag = tag; 361 rq->internal_tag = BLK_MQ_NO_TAG; 362 } else { 363 rq->tag = BLK_MQ_NO_TAG; 364 rq->internal_tag = tag; 365 } 366 rq->timeout = 0; 367 368 if (blk_mq_need_time_stamp(rq)) 369 rq->start_time_ns = ktime_get_ns(); 370 else 371 rq->start_time_ns = 0; 372 rq->part = NULL; 373 #ifdef CONFIG_BLK_RQ_ALLOC_TIME 374 rq->alloc_time_ns = alloc_time_ns; 375 #endif 376 rq->io_start_time_ns = 0; 377 rq->stats_sectors = 0; 378 rq->nr_phys_segments = 0; 379 #if defined(CONFIG_BLK_DEV_INTEGRITY) 380 rq->nr_integrity_segments = 0; 381 #endif 382 rq->end_io = NULL; 383 rq->end_io_data = NULL; 384 385 blk_crypto_rq_set_defaults(rq); 386 INIT_LIST_HEAD(&rq->queuelist); 387 /* tag was already set */ 388 WRITE_ONCE(rq->deadline, 0); 389 req_ref_set(rq, 1); 390 391 if (rq->rq_flags & RQF_ELV) { 392 struct elevator_queue *e = data->q->elevator; 393 394 INIT_HLIST_NODE(&rq->hash); 395 RB_CLEAR_NODE(&rq->rb_node); 396 397 if (!op_is_flush(data->cmd_flags) && 398 e->type->ops.prepare_request) { 399 e->type->ops.prepare_request(rq); 400 rq->rq_flags |= RQF_ELVPRIV; 401 } 402 } 403 404 return rq; 405 } 406 407 static inline struct request * 408 __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data, 409 u64 alloc_time_ns) 410 { 411 unsigned int tag, tag_offset; 412 struct blk_mq_tags *tags; 413 struct request *rq; 414 unsigned long tag_mask; 415 int i, nr = 0; 416 417 tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset); 418 if (unlikely(!tag_mask)) 419 return NULL; 420 421 tags = blk_mq_tags_from_data(data); 422 for (i = 0; tag_mask; i++) { 423 if (!(tag_mask & (1UL << i))) 424 continue; 425 tag = tag_offset + i; 426 prefetch(tags->static_rqs[tag]); 427 tag_mask &= ~(1UL << i); 428 rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns); 429 rq_list_add(data->cached_rq, rq); 430 nr++; 431 } 432 /* caller already holds a reference, add for remainder */ 433 percpu_ref_get_many(&data->q->q_usage_counter, nr - 1); 434 data->nr_tags -= nr; 435 436 return rq_list_pop(data->cached_rq); 437 } 438 439 static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) 440 { 441 struct request_queue *q = data->q; 442 u64 alloc_time_ns = 0; 443 struct request *rq; 444 unsigned int tag; 445 446 /* alloc_time includes depth and tag waits */ 447 if (blk_queue_rq_alloc_time(q)) 448 alloc_time_ns = ktime_get_ns(); 449 450 if (data->cmd_flags & REQ_NOWAIT) 451 data->flags |= BLK_MQ_REQ_NOWAIT; 452 453 if (q->elevator) { 454 struct elevator_queue *e = q->elevator; 455 456 data->rq_flags |= RQF_ELV; 457 458 /* 459 * Flush/passthrough requests are special and go directly to the 460 * dispatch list. Don't include reserved tags in the 461 * limiting, as it isn't useful. 462 */ 463 if (!op_is_flush(data->cmd_flags) && 464 !blk_op_is_passthrough(data->cmd_flags) && 465 e->type->ops.limit_depth && 466 !(data->flags & BLK_MQ_REQ_RESERVED)) 467 e->type->ops.limit_depth(data->cmd_flags, data); 468 } 469 470 retry: 471 data->ctx = blk_mq_get_ctx(q); 472 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); 473 if (!(data->rq_flags & RQF_ELV)) 474 blk_mq_tag_busy(data->hctx); 475 476 /* 477 * Try batched alloc if we want more than 1 tag. 478 */ 479 if (data->nr_tags > 1) { 480 rq = __blk_mq_alloc_requests_batch(data, alloc_time_ns); 481 if (rq) 482 return rq; 483 data->nr_tags = 1; 484 } 485 486 /* 487 * Waiting allocations only fail because of an inactive hctx. In that 488 * case just retry the hctx assignment and tag allocation as CPU hotplug 489 * should have migrated us to an online CPU by now. 490 */ 491 tag = blk_mq_get_tag(data); 492 if (tag == BLK_MQ_NO_TAG) { 493 if (data->flags & BLK_MQ_REQ_NOWAIT) 494 return NULL; 495 /* 496 * Give up the CPU and sleep for a random short time to 497 * ensure that thread using a realtime scheduling class 498 * are migrated off the CPU, and thus off the hctx that 499 * is going away. 500 */ 501 msleep(3); 502 goto retry; 503 } 504 505 return blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag, 506 alloc_time_ns); 507 } 508 509 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, 510 blk_mq_req_flags_t flags) 511 { 512 struct blk_mq_alloc_data data = { 513 .q = q, 514 .flags = flags, 515 .cmd_flags = op, 516 .nr_tags = 1, 517 }; 518 struct request *rq; 519 int ret; 520 521 ret = blk_queue_enter(q, flags); 522 if (ret) 523 return ERR_PTR(ret); 524 525 rq = __blk_mq_alloc_requests(&data); 526 if (!rq) 527 goto out_queue_exit; 528 rq->__data_len = 0; 529 rq->__sector = (sector_t) -1; 530 rq->bio = rq->biotail = NULL; 531 return rq; 532 out_queue_exit: 533 blk_queue_exit(q); 534 return ERR_PTR(-EWOULDBLOCK); 535 } 536 EXPORT_SYMBOL(blk_mq_alloc_request); 537 538 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, 539 unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx) 540 { 541 struct blk_mq_alloc_data data = { 542 .q = q, 543 .flags = flags, 544 .cmd_flags = op, 545 .nr_tags = 1, 546 }; 547 u64 alloc_time_ns = 0; 548 unsigned int cpu; 549 unsigned int tag; 550 int ret; 551 552 /* alloc_time includes depth and tag waits */ 553 if (blk_queue_rq_alloc_time(q)) 554 alloc_time_ns = ktime_get_ns(); 555 556 /* 557 * If the tag allocator sleeps we could get an allocation for a 558 * different hardware context. No need to complicate the low level 559 * allocator for this for the rare use case of a command tied to 560 * a specific queue. 561 */ 562 if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED)))) 563 return ERR_PTR(-EINVAL); 564 565 if (hctx_idx >= q->nr_hw_queues) 566 return ERR_PTR(-EIO); 567 568 ret = blk_queue_enter(q, flags); 569 if (ret) 570 return ERR_PTR(ret); 571 572 /* 573 * Check if the hardware context is actually mapped to anything. 574 * If not tell the caller that it should skip this queue. 575 */ 576 ret = -EXDEV; 577 data.hctx = xa_load(&q->hctx_table, hctx_idx); 578 if (!blk_mq_hw_queue_mapped(data.hctx)) 579 goto out_queue_exit; 580 cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); 581 data.ctx = __blk_mq_get_ctx(q, cpu); 582 583 if (!q->elevator) 584 blk_mq_tag_busy(data.hctx); 585 else 586 data.rq_flags |= RQF_ELV; 587 588 ret = -EWOULDBLOCK; 589 tag = blk_mq_get_tag(&data); 590 if (tag == BLK_MQ_NO_TAG) 591 goto out_queue_exit; 592 return blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag, 593 alloc_time_ns); 594 595 out_queue_exit: 596 blk_queue_exit(q); 597 return ERR_PTR(ret); 598 } 599 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 600 601 static void __blk_mq_free_request(struct request *rq) 602 { 603 struct request_queue *q = rq->q; 604 struct blk_mq_ctx *ctx = rq->mq_ctx; 605 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 606 const int sched_tag = rq->internal_tag; 607 608 blk_crypto_free_request(rq); 609 blk_pm_mark_last_busy(rq); 610 rq->mq_hctx = NULL; 611 if (rq->tag != BLK_MQ_NO_TAG) 612 blk_mq_put_tag(hctx->tags, ctx, rq->tag); 613 if (sched_tag != BLK_MQ_NO_TAG) 614 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag); 615 blk_mq_sched_restart(hctx); 616 blk_queue_exit(q); 617 } 618 619 void blk_mq_free_request(struct request *rq) 620 { 621 struct request_queue *q = rq->q; 622 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 623 624 if ((rq->rq_flags & RQF_ELVPRIV) && 625 q->elevator->type->ops.finish_request) 626 q->elevator->type->ops.finish_request(rq); 627 628 if (rq->rq_flags & RQF_MQ_INFLIGHT) 629 __blk_mq_dec_active_requests(hctx); 630 631 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) 632 laptop_io_completion(q->disk->bdi); 633 634 rq_qos_done(q, rq); 635 636 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 637 if (req_ref_put_and_test(rq)) 638 __blk_mq_free_request(rq); 639 } 640 EXPORT_SYMBOL_GPL(blk_mq_free_request); 641 642 void blk_mq_free_plug_rqs(struct blk_plug *plug) 643 { 644 struct request *rq; 645 646 while ((rq = rq_list_pop(&plug->cached_rq)) != NULL) 647 blk_mq_free_request(rq); 648 } 649 650 void blk_dump_rq_flags(struct request *rq, char *msg) 651 { 652 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg, 653 rq->q->disk ? rq->q->disk->disk_name : "?", 654 (unsigned long long) rq->cmd_flags); 655 656 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 657 (unsigned long long)blk_rq_pos(rq), 658 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 659 printk(KERN_INFO " bio %p, biotail %p, len %u\n", 660 rq->bio, rq->biotail, blk_rq_bytes(rq)); 661 } 662 EXPORT_SYMBOL(blk_dump_rq_flags); 663 664 static void req_bio_endio(struct request *rq, struct bio *bio, 665 unsigned int nbytes, blk_status_t error) 666 { 667 if (unlikely(error)) { 668 bio->bi_status = error; 669 } else if (req_op(rq) == REQ_OP_ZONE_APPEND) { 670 /* 671 * Partial zone append completions cannot be supported as the 672 * BIO fragments may end up not being written sequentially. 673 */ 674 if (bio->bi_iter.bi_size != nbytes) 675 bio->bi_status = BLK_STS_IOERR; 676 else 677 bio->bi_iter.bi_sector = rq->__sector; 678 } 679 680 bio_advance(bio, nbytes); 681 682 if (unlikely(rq->rq_flags & RQF_QUIET)) 683 bio_set_flag(bio, BIO_QUIET); 684 /* don't actually finish bio if it's part of flush sequence */ 685 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) 686 bio_endio(bio); 687 } 688 689 static void blk_account_io_completion(struct request *req, unsigned int bytes) 690 { 691 if (req->part && blk_do_io_stat(req)) { 692 const int sgrp = op_stat_group(req_op(req)); 693 694 part_stat_lock(); 695 part_stat_add(req->part, sectors[sgrp], bytes >> 9); 696 part_stat_unlock(); 697 } 698 } 699 700 static void blk_print_req_error(struct request *req, blk_status_t status) 701 { 702 printk_ratelimited(KERN_ERR 703 "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x " 704 "phys_seg %u prio class %u\n", 705 blk_status_to_str(status), 706 req->q->disk ? req->q->disk->disk_name : "?", 707 blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)), 708 req->cmd_flags & ~REQ_OP_MASK, 709 req->nr_phys_segments, 710 IOPRIO_PRIO_CLASS(req->ioprio)); 711 } 712 713 /* 714 * Fully end IO on a request. Does not support partial completions, or 715 * errors. 716 */ 717 static void blk_complete_request(struct request *req) 718 { 719 const bool is_flush = (req->rq_flags & RQF_FLUSH_SEQ) != 0; 720 int total_bytes = blk_rq_bytes(req); 721 struct bio *bio = req->bio; 722 723 trace_block_rq_complete(req, BLK_STS_OK, total_bytes); 724 725 if (!bio) 726 return; 727 728 #ifdef CONFIG_BLK_DEV_INTEGRITY 729 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ) 730 req->q->integrity.profile->complete_fn(req, total_bytes); 731 #endif 732 733 blk_account_io_completion(req, total_bytes); 734 735 do { 736 struct bio *next = bio->bi_next; 737 738 /* Completion has already been traced */ 739 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 740 741 if (req_op(req) == REQ_OP_ZONE_APPEND) 742 bio->bi_iter.bi_sector = req->__sector; 743 744 if (!is_flush) 745 bio_endio(bio); 746 bio = next; 747 } while (bio); 748 749 /* 750 * Reset counters so that the request stacking driver 751 * can find how many bytes remain in the request 752 * later. 753 */ 754 req->bio = NULL; 755 req->__data_len = 0; 756 } 757 758 /** 759 * blk_update_request - Complete multiple bytes without completing the request 760 * @req: the request being processed 761 * @error: block status code 762 * @nr_bytes: number of bytes to complete for @req 763 * 764 * Description: 765 * Ends I/O on a number of bytes attached to @req, but doesn't complete 766 * the request structure even if @req doesn't have leftover. 767 * If @req has leftover, sets it up for the next range of segments. 768 * 769 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 770 * %false return from this function. 771 * 772 * Note: 773 * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function 774 * except in the consistency check at the end of this function. 775 * 776 * Return: 777 * %false - this request doesn't have any more data 778 * %true - this request has more data 779 **/ 780 bool blk_update_request(struct request *req, blk_status_t error, 781 unsigned int nr_bytes) 782 { 783 int total_bytes; 784 785 trace_block_rq_complete(req, error, nr_bytes); 786 787 if (!req->bio) 788 return false; 789 790 #ifdef CONFIG_BLK_DEV_INTEGRITY 791 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && 792 error == BLK_STS_OK) 793 req->q->integrity.profile->complete_fn(req, nr_bytes); 794 #endif 795 796 if (unlikely(error && !blk_rq_is_passthrough(req) && 797 !(req->rq_flags & RQF_QUIET)) && 798 !test_bit(GD_DEAD, &req->q->disk->state)) { 799 blk_print_req_error(req, error); 800 trace_block_rq_error(req, error, nr_bytes); 801 } 802 803 blk_account_io_completion(req, nr_bytes); 804 805 total_bytes = 0; 806 while (req->bio) { 807 struct bio *bio = req->bio; 808 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); 809 810 if (bio_bytes == bio->bi_iter.bi_size) 811 req->bio = bio->bi_next; 812 813 /* Completion has already been traced */ 814 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 815 req_bio_endio(req, bio, bio_bytes, error); 816 817 total_bytes += bio_bytes; 818 nr_bytes -= bio_bytes; 819 820 if (!nr_bytes) 821 break; 822 } 823 824 /* 825 * completely done 826 */ 827 if (!req->bio) { 828 /* 829 * Reset counters so that the request stacking driver 830 * can find how many bytes remain in the request 831 * later. 832 */ 833 req->__data_len = 0; 834 return false; 835 } 836 837 req->__data_len -= total_bytes; 838 839 /* update sector only for requests with clear definition of sector */ 840 if (!blk_rq_is_passthrough(req)) 841 req->__sector += total_bytes >> 9; 842 843 /* mixed attributes always follow the first bio */ 844 if (req->rq_flags & RQF_MIXED_MERGE) { 845 req->cmd_flags &= ~REQ_FAILFAST_MASK; 846 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; 847 } 848 849 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) { 850 /* 851 * If total number of sectors is less than the first segment 852 * size, something has gone terribly wrong. 853 */ 854 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 855 blk_dump_rq_flags(req, "request botched"); 856 req->__data_len = blk_rq_cur_bytes(req); 857 } 858 859 /* recalculate the number of segments */ 860 req->nr_phys_segments = blk_recalc_rq_segments(req); 861 } 862 863 return true; 864 } 865 EXPORT_SYMBOL_GPL(blk_update_request); 866 867 static void __blk_account_io_done(struct request *req, u64 now) 868 { 869 const int sgrp = op_stat_group(req_op(req)); 870 871 part_stat_lock(); 872 update_io_ticks(req->part, jiffies, true); 873 part_stat_inc(req->part, ios[sgrp]); 874 part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns); 875 part_stat_unlock(); 876 } 877 878 static inline void blk_account_io_done(struct request *req, u64 now) 879 { 880 /* 881 * Account IO completion. flush_rq isn't accounted as a 882 * normal IO on queueing nor completion. Accounting the 883 * containing request is enough. 884 */ 885 if (blk_do_io_stat(req) && req->part && 886 !(req->rq_flags & RQF_FLUSH_SEQ)) 887 __blk_account_io_done(req, now); 888 } 889 890 static void __blk_account_io_start(struct request *rq) 891 { 892 /* 893 * All non-passthrough requests are created from a bio with one 894 * exception: when a flush command that is part of a flush sequence 895 * generated by the state machine in blk-flush.c is cloned onto the 896 * lower device by dm-multipath we can get here without a bio. 897 */ 898 if (rq->bio) 899 rq->part = rq->bio->bi_bdev; 900 else 901 rq->part = rq->q->disk->part0; 902 903 part_stat_lock(); 904 update_io_ticks(rq->part, jiffies, false); 905 part_stat_unlock(); 906 } 907 908 static inline void blk_account_io_start(struct request *req) 909 { 910 if (blk_do_io_stat(req)) 911 __blk_account_io_start(req); 912 } 913 914 static inline void __blk_mq_end_request_acct(struct request *rq, u64 now) 915 { 916 if (rq->rq_flags & RQF_STATS) { 917 blk_mq_poll_stats_start(rq->q); 918 blk_stat_add(rq, now); 919 } 920 921 blk_mq_sched_completed_request(rq, now); 922 blk_account_io_done(rq, now); 923 } 924 925 inline void __blk_mq_end_request(struct request *rq, blk_status_t error) 926 { 927 if (blk_mq_need_time_stamp(rq)) 928 __blk_mq_end_request_acct(rq, ktime_get_ns()); 929 930 if (rq->end_io) { 931 rq_qos_done(rq->q, rq); 932 rq->end_io(rq, error); 933 } else { 934 blk_mq_free_request(rq); 935 } 936 } 937 EXPORT_SYMBOL(__blk_mq_end_request); 938 939 void blk_mq_end_request(struct request *rq, blk_status_t error) 940 { 941 if (blk_update_request(rq, error, blk_rq_bytes(rq))) 942 BUG(); 943 __blk_mq_end_request(rq, error); 944 } 945 EXPORT_SYMBOL(blk_mq_end_request); 946 947 #define TAG_COMP_BATCH 32 948 949 static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx, 950 int *tag_array, int nr_tags) 951 { 952 struct request_queue *q = hctx->queue; 953 954 /* 955 * All requests should have been marked as RQF_MQ_INFLIGHT, so 956 * update hctx->nr_active in batch 957 */ 958 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) 959 __blk_mq_sub_active_requests(hctx, nr_tags); 960 961 blk_mq_put_tags(hctx->tags, tag_array, nr_tags); 962 percpu_ref_put_many(&q->q_usage_counter, nr_tags); 963 } 964 965 void blk_mq_end_request_batch(struct io_comp_batch *iob) 966 { 967 int tags[TAG_COMP_BATCH], nr_tags = 0; 968 struct blk_mq_hw_ctx *cur_hctx = NULL; 969 struct request *rq; 970 u64 now = 0; 971 972 if (iob->need_ts) 973 now = ktime_get_ns(); 974 975 while ((rq = rq_list_pop(&iob->req_list)) != NULL) { 976 prefetch(rq->bio); 977 prefetch(rq->rq_next); 978 979 blk_complete_request(rq); 980 if (iob->need_ts) 981 __blk_mq_end_request_acct(rq, now); 982 983 rq_qos_done(rq->q, rq); 984 985 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 986 if (!req_ref_put_and_test(rq)) 987 continue; 988 989 blk_crypto_free_request(rq); 990 blk_pm_mark_last_busy(rq); 991 992 if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) { 993 if (cur_hctx) 994 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags); 995 nr_tags = 0; 996 cur_hctx = rq->mq_hctx; 997 } 998 tags[nr_tags++] = rq->tag; 999 } 1000 1001 if (nr_tags) 1002 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags); 1003 } 1004 EXPORT_SYMBOL_GPL(blk_mq_end_request_batch); 1005 1006 static void blk_complete_reqs(struct llist_head *list) 1007 { 1008 struct llist_node *entry = llist_reverse_order(llist_del_all(list)); 1009 struct request *rq, *next; 1010 1011 llist_for_each_entry_safe(rq, next, entry, ipi_list) 1012 rq->q->mq_ops->complete(rq); 1013 } 1014 1015 static __latent_entropy void blk_done_softirq(struct softirq_action *h) 1016 { 1017 blk_complete_reqs(this_cpu_ptr(&blk_cpu_done)); 1018 } 1019 1020 static int blk_softirq_cpu_dead(unsigned int cpu) 1021 { 1022 blk_complete_reqs(&per_cpu(blk_cpu_done, cpu)); 1023 return 0; 1024 } 1025 1026 static void __blk_mq_complete_request_remote(void *data) 1027 { 1028 __raise_softirq_irqoff(BLOCK_SOFTIRQ); 1029 } 1030 1031 static inline bool blk_mq_complete_need_ipi(struct request *rq) 1032 { 1033 int cpu = raw_smp_processor_id(); 1034 1035 if (!IS_ENABLED(CONFIG_SMP) || 1036 !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) 1037 return false; 1038 /* 1039 * With force threaded interrupts enabled, raising softirq from an SMP 1040 * function call will always result in waking the ksoftirqd thread. 1041 * This is probably worse than completing the request on a different 1042 * cache domain. 1043 */ 1044 if (force_irqthreads()) 1045 return false; 1046 1047 /* same CPU or cache domain? Complete locally */ 1048 if (cpu == rq->mq_ctx->cpu || 1049 (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) && 1050 cpus_share_cache(cpu, rq->mq_ctx->cpu))) 1051 return false; 1052 1053 /* don't try to IPI to an offline CPU */ 1054 return cpu_online(rq->mq_ctx->cpu); 1055 } 1056 1057 static void blk_mq_complete_send_ipi(struct request *rq) 1058 { 1059 struct llist_head *list; 1060 unsigned int cpu; 1061 1062 cpu = rq->mq_ctx->cpu; 1063 list = &per_cpu(blk_cpu_done, cpu); 1064 if (llist_add(&rq->ipi_list, list)) { 1065 INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq); 1066 smp_call_function_single_async(cpu, &rq->csd); 1067 } 1068 } 1069 1070 static void blk_mq_raise_softirq(struct request *rq) 1071 { 1072 struct llist_head *list; 1073 1074 preempt_disable(); 1075 list = this_cpu_ptr(&blk_cpu_done); 1076 if (llist_add(&rq->ipi_list, list)) 1077 raise_softirq(BLOCK_SOFTIRQ); 1078 preempt_enable(); 1079 } 1080 1081 bool blk_mq_complete_request_remote(struct request *rq) 1082 { 1083 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); 1084 1085 /* 1086 * For a polled request, always complete locally, it's pointless 1087 * to redirect the completion. 1088 */ 1089 if (rq->cmd_flags & REQ_POLLED) 1090 return false; 1091 1092 if (blk_mq_complete_need_ipi(rq)) { 1093 blk_mq_complete_send_ipi(rq); 1094 return true; 1095 } 1096 1097 if (rq->q->nr_hw_queues == 1) { 1098 blk_mq_raise_softirq(rq); 1099 return true; 1100 } 1101 return false; 1102 } 1103 EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote); 1104 1105 /** 1106 * blk_mq_complete_request - end I/O on a request 1107 * @rq: the request being processed 1108 * 1109 * Description: 1110 * Complete a request by scheduling the ->complete_rq operation. 1111 **/ 1112 void blk_mq_complete_request(struct request *rq) 1113 { 1114 if (!blk_mq_complete_request_remote(rq)) 1115 rq->q->mq_ops->complete(rq); 1116 } 1117 EXPORT_SYMBOL(blk_mq_complete_request); 1118 1119 /** 1120 * blk_mq_start_request - Start processing a request 1121 * @rq: Pointer to request to be started 1122 * 1123 * Function used by device drivers to notify the block layer that a request 1124 * is going to be processed now, so blk layer can do proper initializations 1125 * such as starting the timeout timer. 1126 */ 1127 void blk_mq_start_request(struct request *rq) 1128 { 1129 struct request_queue *q = rq->q; 1130 1131 trace_block_rq_issue(rq); 1132 1133 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { 1134 rq->io_start_time_ns = ktime_get_ns(); 1135 rq->stats_sectors = blk_rq_sectors(rq); 1136 rq->rq_flags |= RQF_STATS; 1137 rq_qos_issue(q, rq); 1138 } 1139 1140 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE); 1141 1142 blk_add_timer(rq); 1143 WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT); 1144 1145 #ifdef CONFIG_BLK_DEV_INTEGRITY 1146 if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE) 1147 q->integrity.profile->prepare_fn(rq); 1148 #endif 1149 if (rq->bio && rq->bio->bi_opf & REQ_POLLED) 1150 WRITE_ONCE(rq->bio->bi_cookie, blk_rq_to_qc(rq)); 1151 } 1152 EXPORT_SYMBOL(blk_mq_start_request); 1153 1154 /** 1155 * blk_end_sync_rq - executes a completion event on a request 1156 * @rq: request to complete 1157 * @error: end I/O status of the request 1158 */ 1159 static void blk_end_sync_rq(struct request *rq, blk_status_t error) 1160 { 1161 struct completion *waiting = rq->end_io_data; 1162 1163 rq->end_io_data = (void *)(uintptr_t)error; 1164 1165 /* 1166 * complete last, if this is a stack request the process (and thus 1167 * the rq pointer) could be invalid right after this complete() 1168 */ 1169 complete(waiting); 1170 } 1171 1172 /* 1173 * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple 1174 * queues. This is important for md arrays to benefit from merging 1175 * requests. 1176 */ 1177 static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug) 1178 { 1179 if (plug->multiple_queues) 1180 return BLK_MAX_REQUEST_COUNT * 2; 1181 return BLK_MAX_REQUEST_COUNT; 1182 } 1183 1184 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) 1185 { 1186 struct request *last = rq_list_peek(&plug->mq_list); 1187 1188 if (!plug->rq_count) { 1189 trace_block_plug(rq->q); 1190 } else if (plug->rq_count >= blk_plug_max_rq_count(plug) || 1191 (!blk_queue_nomerges(rq->q) && 1192 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { 1193 blk_mq_flush_plug_list(plug, false); 1194 trace_block_plug(rq->q); 1195 } 1196 1197 if (!plug->multiple_queues && last && last->q != rq->q) 1198 plug->multiple_queues = true; 1199 if (!plug->has_elevator && (rq->rq_flags & RQF_ELV)) 1200 plug->has_elevator = true; 1201 rq->rq_next = NULL; 1202 rq_list_add(&plug->mq_list, rq); 1203 plug->rq_count++; 1204 } 1205 1206 static void __blk_execute_rq_nowait(struct request *rq, bool at_head, 1207 rq_end_io_fn *done, bool use_plug) 1208 { 1209 WARN_ON(irqs_disabled()); 1210 WARN_ON(!blk_rq_is_passthrough(rq)); 1211 1212 rq->end_io = done; 1213 1214 blk_account_io_start(rq); 1215 1216 if (use_plug && current->plug) { 1217 blk_add_rq_to_plug(current->plug, rq); 1218 return; 1219 } 1220 /* 1221 * don't check dying flag for MQ because the request won't 1222 * be reused after dying flag is set 1223 */ 1224 blk_mq_sched_insert_request(rq, at_head, true, false); 1225 } 1226 1227 1228 /** 1229 * blk_execute_rq_nowait - insert a request to I/O scheduler for execution 1230 * @rq: request to insert 1231 * @at_head: insert request at head or tail of queue 1232 * @done: I/O completion handler 1233 * 1234 * Description: 1235 * Insert a fully prepared request at the back of the I/O scheduler queue 1236 * for execution. Don't wait for completion. 1237 * 1238 * Note: 1239 * This function will invoke @done directly if the queue is dead. 1240 */ 1241 void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done) 1242 { 1243 __blk_execute_rq_nowait(rq, at_head, done, true); 1244 1245 } 1246 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); 1247 1248 static bool blk_rq_is_poll(struct request *rq) 1249 { 1250 if (!rq->mq_hctx) 1251 return false; 1252 if (rq->mq_hctx->type != HCTX_TYPE_POLL) 1253 return false; 1254 if (WARN_ON_ONCE(!rq->bio)) 1255 return false; 1256 return true; 1257 } 1258 1259 static void blk_rq_poll_completion(struct request *rq, struct completion *wait) 1260 { 1261 do { 1262 bio_poll(rq->bio, NULL, 0); 1263 cond_resched(); 1264 } while (!completion_done(wait)); 1265 } 1266 1267 /** 1268 * blk_execute_rq - insert a request into queue for execution 1269 * @rq: request to insert 1270 * @at_head: insert request at head or tail of queue 1271 * 1272 * Description: 1273 * Insert a fully prepared request at the back of the I/O scheduler queue 1274 * for execution and wait for completion. 1275 * Return: The blk_status_t result provided to blk_mq_end_request(). 1276 */ 1277 blk_status_t blk_execute_rq(struct request *rq, bool at_head) 1278 { 1279 DECLARE_COMPLETION_ONSTACK(wait); 1280 unsigned long hang_check; 1281 1282 /* 1283 * iopoll requires request to be submitted to driver, so can't 1284 * use plug 1285 */ 1286 rq->end_io_data = &wait; 1287 __blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq, 1288 !blk_rq_is_poll(rq)); 1289 1290 /* Prevent hang_check timer from firing at us during very long I/O */ 1291 hang_check = sysctl_hung_task_timeout_secs; 1292 1293 if (blk_rq_is_poll(rq)) 1294 blk_rq_poll_completion(rq, &wait); 1295 else if (hang_check) 1296 while (!wait_for_completion_io_timeout(&wait, 1297 hang_check * (HZ/2))) 1298 ; 1299 else 1300 wait_for_completion_io(&wait); 1301 1302 return (blk_status_t)(uintptr_t)rq->end_io_data; 1303 } 1304 EXPORT_SYMBOL(blk_execute_rq); 1305 1306 static void __blk_mq_requeue_request(struct request *rq) 1307 { 1308 struct request_queue *q = rq->q; 1309 1310 blk_mq_put_driver_tag(rq); 1311 1312 trace_block_rq_requeue(rq); 1313 rq_qos_requeue(q, rq); 1314 1315 if (blk_mq_request_started(rq)) { 1316 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 1317 rq->rq_flags &= ~RQF_TIMED_OUT; 1318 } 1319 } 1320 1321 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) 1322 { 1323 __blk_mq_requeue_request(rq); 1324 1325 /* this request will be re-inserted to io scheduler queue */ 1326 blk_mq_sched_requeue_request(rq); 1327 1328 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); 1329 } 1330 EXPORT_SYMBOL(blk_mq_requeue_request); 1331 1332 static void blk_mq_requeue_work(struct work_struct *work) 1333 { 1334 struct request_queue *q = 1335 container_of(work, struct request_queue, requeue_work.work); 1336 LIST_HEAD(rq_list); 1337 struct request *rq, *next; 1338 1339 spin_lock_irq(&q->requeue_lock); 1340 list_splice_init(&q->requeue_list, &rq_list); 1341 spin_unlock_irq(&q->requeue_lock); 1342 1343 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { 1344 if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP))) 1345 continue; 1346 1347 rq->rq_flags &= ~RQF_SOFTBARRIER; 1348 list_del_init(&rq->queuelist); 1349 /* 1350 * If RQF_DONTPREP, rq has contained some driver specific 1351 * data, so insert it to hctx dispatch list to avoid any 1352 * merge. 1353 */ 1354 if (rq->rq_flags & RQF_DONTPREP) 1355 blk_mq_request_bypass_insert(rq, false, false); 1356 else 1357 blk_mq_sched_insert_request(rq, true, false, false); 1358 } 1359 1360 while (!list_empty(&rq_list)) { 1361 rq = list_entry(rq_list.next, struct request, queuelist); 1362 list_del_init(&rq->queuelist); 1363 blk_mq_sched_insert_request(rq, false, false, false); 1364 } 1365 1366 blk_mq_run_hw_queues(q, false); 1367 } 1368 1369 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, 1370 bool kick_requeue_list) 1371 { 1372 struct request_queue *q = rq->q; 1373 unsigned long flags; 1374 1375 /* 1376 * We abuse this flag that is otherwise used by the I/O scheduler to 1377 * request head insertion from the workqueue. 1378 */ 1379 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER); 1380 1381 spin_lock_irqsave(&q->requeue_lock, flags); 1382 if (at_head) { 1383 rq->rq_flags |= RQF_SOFTBARRIER; 1384 list_add(&rq->queuelist, &q->requeue_list); 1385 } else { 1386 list_add_tail(&rq->queuelist, &q->requeue_list); 1387 } 1388 spin_unlock_irqrestore(&q->requeue_lock, flags); 1389 1390 if (kick_requeue_list) 1391 blk_mq_kick_requeue_list(q); 1392 } 1393 1394 void blk_mq_kick_requeue_list(struct request_queue *q) 1395 { 1396 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0); 1397 } 1398 EXPORT_SYMBOL(blk_mq_kick_requeue_list); 1399 1400 void blk_mq_delay_kick_requeue_list(struct request_queue *q, 1401 unsigned long msecs) 1402 { 1403 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 1404 msecs_to_jiffies(msecs)); 1405 } 1406 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); 1407 1408 static bool blk_mq_rq_inflight(struct request *rq, void *priv, 1409 bool reserved) 1410 { 1411 /* 1412 * If we find a request that isn't idle we know the queue is busy 1413 * as it's checked in the iter. 1414 * Return false to stop the iteration. 1415 */ 1416 if (blk_mq_request_started(rq)) { 1417 bool *busy = priv; 1418 1419 *busy = true; 1420 return false; 1421 } 1422 1423 return true; 1424 } 1425 1426 bool blk_mq_queue_inflight(struct request_queue *q) 1427 { 1428 bool busy = false; 1429 1430 blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy); 1431 return busy; 1432 } 1433 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight); 1434 1435 static void blk_mq_rq_timed_out(struct request *req, bool reserved) 1436 { 1437 req->rq_flags |= RQF_TIMED_OUT; 1438 if (req->q->mq_ops->timeout) { 1439 enum blk_eh_timer_return ret; 1440 1441 ret = req->q->mq_ops->timeout(req, reserved); 1442 if (ret == BLK_EH_DONE) 1443 return; 1444 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER); 1445 } 1446 1447 blk_add_timer(req); 1448 } 1449 1450 static bool blk_mq_req_expired(struct request *rq, unsigned long *next) 1451 { 1452 unsigned long deadline; 1453 1454 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT) 1455 return false; 1456 if (rq->rq_flags & RQF_TIMED_OUT) 1457 return false; 1458 1459 deadline = READ_ONCE(rq->deadline); 1460 if (time_after_eq(jiffies, deadline)) 1461 return true; 1462 1463 if (*next == 0) 1464 *next = deadline; 1465 else if (time_after(*next, deadline)) 1466 *next = deadline; 1467 return false; 1468 } 1469 1470 void blk_mq_put_rq_ref(struct request *rq) 1471 { 1472 if (is_flush_rq(rq)) 1473 rq->end_io(rq, 0); 1474 else if (req_ref_put_and_test(rq)) 1475 __blk_mq_free_request(rq); 1476 } 1477 1478 static bool blk_mq_check_expired(struct request *rq, void *priv, bool reserved) 1479 { 1480 unsigned long *next = priv; 1481 1482 /* 1483 * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot 1484 * be reallocated underneath the timeout handler's processing, then 1485 * the expire check is reliable. If the request is not expired, then 1486 * it was completed and reallocated as a new request after returning 1487 * from blk_mq_check_expired(). 1488 */ 1489 if (blk_mq_req_expired(rq, next)) 1490 blk_mq_rq_timed_out(rq, reserved); 1491 return true; 1492 } 1493 1494 static void blk_mq_timeout_work(struct work_struct *work) 1495 { 1496 struct request_queue *q = 1497 container_of(work, struct request_queue, timeout_work); 1498 unsigned long next = 0; 1499 struct blk_mq_hw_ctx *hctx; 1500 unsigned long i; 1501 1502 /* A deadlock might occur if a request is stuck requiring a 1503 * timeout at the same time a queue freeze is waiting 1504 * completion, since the timeout code would not be able to 1505 * acquire the queue reference here. 1506 * 1507 * That's why we don't use blk_queue_enter here; instead, we use 1508 * percpu_ref_tryget directly, because we need to be able to 1509 * obtain a reference even in the short window between the queue 1510 * starting to freeze, by dropping the first reference in 1511 * blk_freeze_queue_start, and the moment the last request is 1512 * consumed, marked by the instant q_usage_counter reaches 1513 * zero. 1514 */ 1515 if (!percpu_ref_tryget(&q->q_usage_counter)) 1516 return; 1517 1518 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next); 1519 1520 if (next != 0) { 1521 mod_timer(&q->timeout, next); 1522 } else { 1523 /* 1524 * Request timeouts are handled as a forward rolling timer. If 1525 * we end up here it means that no requests are pending and 1526 * also that no request has been pending for a while. Mark 1527 * each hctx as idle. 1528 */ 1529 queue_for_each_hw_ctx(q, hctx, i) { 1530 /* the hctx may be unmapped, so check it here */ 1531 if (blk_mq_hw_queue_mapped(hctx)) 1532 blk_mq_tag_idle(hctx); 1533 } 1534 } 1535 blk_queue_exit(q); 1536 } 1537 1538 struct flush_busy_ctx_data { 1539 struct blk_mq_hw_ctx *hctx; 1540 struct list_head *list; 1541 }; 1542 1543 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data) 1544 { 1545 struct flush_busy_ctx_data *flush_data = data; 1546 struct blk_mq_hw_ctx *hctx = flush_data->hctx; 1547 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 1548 enum hctx_type type = hctx->type; 1549 1550 spin_lock(&ctx->lock); 1551 list_splice_tail_init(&ctx->rq_lists[type], flush_data->list); 1552 sbitmap_clear_bit(sb, bitnr); 1553 spin_unlock(&ctx->lock); 1554 return true; 1555 } 1556 1557 /* 1558 * Process software queues that have been marked busy, splicing them 1559 * to the for-dispatch 1560 */ 1561 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) 1562 { 1563 struct flush_busy_ctx_data data = { 1564 .hctx = hctx, 1565 .list = list, 1566 }; 1567 1568 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data); 1569 } 1570 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs); 1571 1572 struct dispatch_rq_data { 1573 struct blk_mq_hw_ctx *hctx; 1574 struct request *rq; 1575 }; 1576 1577 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr, 1578 void *data) 1579 { 1580 struct dispatch_rq_data *dispatch_data = data; 1581 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; 1582 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 1583 enum hctx_type type = hctx->type; 1584 1585 spin_lock(&ctx->lock); 1586 if (!list_empty(&ctx->rq_lists[type])) { 1587 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next); 1588 list_del_init(&dispatch_data->rq->queuelist); 1589 if (list_empty(&ctx->rq_lists[type])) 1590 sbitmap_clear_bit(sb, bitnr); 1591 } 1592 spin_unlock(&ctx->lock); 1593 1594 return !dispatch_data->rq; 1595 } 1596 1597 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 1598 struct blk_mq_ctx *start) 1599 { 1600 unsigned off = start ? start->index_hw[hctx->type] : 0; 1601 struct dispatch_rq_data data = { 1602 .hctx = hctx, 1603 .rq = NULL, 1604 }; 1605 1606 __sbitmap_for_each_set(&hctx->ctx_map, off, 1607 dispatch_rq_from_ctx, &data); 1608 1609 return data.rq; 1610 } 1611 1612 static bool __blk_mq_alloc_driver_tag(struct request *rq) 1613 { 1614 struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags; 1615 unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags; 1616 int tag; 1617 1618 blk_mq_tag_busy(rq->mq_hctx); 1619 1620 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) { 1621 bt = &rq->mq_hctx->tags->breserved_tags; 1622 tag_offset = 0; 1623 } else { 1624 if (!hctx_may_queue(rq->mq_hctx, bt)) 1625 return false; 1626 } 1627 1628 tag = __sbitmap_queue_get(bt); 1629 if (tag == BLK_MQ_NO_TAG) 1630 return false; 1631 1632 rq->tag = tag + tag_offset; 1633 return true; 1634 } 1635 1636 bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq) 1637 { 1638 if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq)) 1639 return false; 1640 1641 if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) && 1642 !(rq->rq_flags & RQF_MQ_INFLIGHT)) { 1643 rq->rq_flags |= RQF_MQ_INFLIGHT; 1644 __blk_mq_inc_active_requests(hctx); 1645 } 1646 hctx->tags->rqs[rq->tag] = rq; 1647 return true; 1648 } 1649 1650 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, 1651 int flags, void *key) 1652 { 1653 struct blk_mq_hw_ctx *hctx; 1654 1655 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); 1656 1657 spin_lock(&hctx->dispatch_wait_lock); 1658 if (!list_empty(&wait->entry)) { 1659 struct sbitmap_queue *sbq; 1660 1661 list_del_init(&wait->entry); 1662 sbq = &hctx->tags->bitmap_tags; 1663 atomic_dec(&sbq->ws_active); 1664 } 1665 spin_unlock(&hctx->dispatch_wait_lock); 1666 1667 blk_mq_run_hw_queue(hctx, true); 1668 return 1; 1669 } 1670 1671 /* 1672 * Mark us waiting for a tag. For shared tags, this involves hooking us into 1673 * the tag wakeups. For non-shared tags, we can simply mark us needing a 1674 * restart. For both cases, take care to check the condition again after 1675 * marking us as waiting. 1676 */ 1677 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, 1678 struct request *rq) 1679 { 1680 struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags; 1681 struct wait_queue_head *wq; 1682 wait_queue_entry_t *wait; 1683 bool ret; 1684 1685 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { 1686 blk_mq_sched_mark_restart_hctx(hctx); 1687 1688 /* 1689 * It's possible that a tag was freed in the window between the 1690 * allocation failure and adding the hardware queue to the wait 1691 * queue. 1692 * 1693 * Don't clear RESTART here, someone else could have set it. 1694 * At most this will cost an extra queue run. 1695 */ 1696 return blk_mq_get_driver_tag(rq); 1697 } 1698 1699 wait = &hctx->dispatch_wait; 1700 if (!list_empty_careful(&wait->entry)) 1701 return false; 1702 1703 wq = &bt_wait_ptr(sbq, hctx)->wait; 1704 1705 spin_lock_irq(&wq->lock); 1706 spin_lock(&hctx->dispatch_wait_lock); 1707 if (!list_empty(&wait->entry)) { 1708 spin_unlock(&hctx->dispatch_wait_lock); 1709 spin_unlock_irq(&wq->lock); 1710 return false; 1711 } 1712 1713 atomic_inc(&sbq->ws_active); 1714 wait->flags &= ~WQ_FLAG_EXCLUSIVE; 1715 __add_wait_queue(wq, wait); 1716 1717 /* 1718 * It's possible that a tag was freed in the window between the 1719 * allocation failure and adding the hardware queue to the wait 1720 * queue. 1721 */ 1722 ret = blk_mq_get_driver_tag(rq); 1723 if (!ret) { 1724 spin_unlock(&hctx->dispatch_wait_lock); 1725 spin_unlock_irq(&wq->lock); 1726 return false; 1727 } 1728 1729 /* 1730 * We got a tag, remove ourselves from the wait queue to ensure 1731 * someone else gets the wakeup. 1732 */ 1733 list_del_init(&wait->entry); 1734 atomic_dec(&sbq->ws_active); 1735 spin_unlock(&hctx->dispatch_wait_lock); 1736 spin_unlock_irq(&wq->lock); 1737 1738 return true; 1739 } 1740 1741 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT 8 1742 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR 4 1743 /* 1744 * Update dispatch busy with the Exponential Weighted Moving Average(EWMA): 1745 * - EWMA is one simple way to compute running average value 1746 * - weight(7/8 and 1/8) is applied so that it can decrease exponentially 1747 * - take 4 as factor for avoiding to get too small(0) result, and this 1748 * factor doesn't matter because EWMA decreases exponentially 1749 */ 1750 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy) 1751 { 1752 unsigned int ewma; 1753 1754 ewma = hctx->dispatch_busy; 1755 1756 if (!ewma && !busy) 1757 return; 1758 1759 ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1; 1760 if (busy) 1761 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR; 1762 ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT; 1763 1764 hctx->dispatch_busy = ewma; 1765 } 1766 1767 #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */ 1768 1769 static void blk_mq_handle_dev_resource(struct request *rq, 1770 struct list_head *list) 1771 { 1772 struct request *next = 1773 list_first_entry_or_null(list, struct request, queuelist); 1774 1775 /* 1776 * If an I/O scheduler has been configured and we got a driver tag for 1777 * the next request already, free it. 1778 */ 1779 if (next) 1780 blk_mq_put_driver_tag(next); 1781 1782 list_add(&rq->queuelist, list); 1783 __blk_mq_requeue_request(rq); 1784 } 1785 1786 static void blk_mq_handle_zone_resource(struct request *rq, 1787 struct list_head *zone_list) 1788 { 1789 /* 1790 * If we end up here it is because we cannot dispatch a request to a 1791 * specific zone due to LLD level zone-write locking or other zone 1792 * related resource not being available. In this case, set the request 1793 * aside in zone_list for retrying it later. 1794 */ 1795 list_add(&rq->queuelist, zone_list); 1796 __blk_mq_requeue_request(rq); 1797 } 1798 1799 enum prep_dispatch { 1800 PREP_DISPATCH_OK, 1801 PREP_DISPATCH_NO_TAG, 1802 PREP_DISPATCH_NO_BUDGET, 1803 }; 1804 1805 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq, 1806 bool need_budget) 1807 { 1808 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 1809 int budget_token = -1; 1810 1811 if (need_budget) { 1812 budget_token = blk_mq_get_dispatch_budget(rq->q); 1813 if (budget_token < 0) { 1814 blk_mq_put_driver_tag(rq); 1815 return PREP_DISPATCH_NO_BUDGET; 1816 } 1817 blk_mq_set_rq_budget_token(rq, budget_token); 1818 } 1819 1820 if (!blk_mq_get_driver_tag(rq)) { 1821 /* 1822 * The initial allocation attempt failed, so we need to 1823 * rerun the hardware queue when a tag is freed. The 1824 * waitqueue takes care of that. If the queue is run 1825 * before we add this entry back on the dispatch list, 1826 * we'll re-run it below. 1827 */ 1828 if (!blk_mq_mark_tag_wait(hctx, rq)) { 1829 /* 1830 * All budgets not got from this function will be put 1831 * together during handling partial dispatch 1832 */ 1833 if (need_budget) 1834 blk_mq_put_dispatch_budget(rq->q, budget_token); 1835 return PREP_DISPATCH_NO_TAG; 1836 } 1837 } 1838 1839 return PREP_DISPATCH_OK; 1840 } 1841 1842 /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */ 1843 static void blk_mq_release_budgets(struct request_queue *q, 1844 struct list_head *list) 1845 { 1846 struct request *rq; 1847 1848 list_for_each_entry(rq, list, queuelist) { 1849 int budget_token = blk_mq_get_rq_budget_token(rq); 1850 1851 if (budget_token >= 0) 1852 blk_mq_put_dispatch_budget(q, budget_token); 1853 } 1854 } 1855 1856 /* 1857 * Returns true if we did some work AND can potentially do more. 1858 */ 1859 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, 1860 unsigned int nr_budgets) 1861 { 1862 enum prep_dispatch prep; 1863 struct request_queue *q = hctx->queue; 1864 struct request *rq, *nxt; 1865 int errors, queued; 1866 blk_status_t ret = BLK_STS_OK; 1867 LIST_HEAD(zone_list); 1868 bool needs_resource = false; 1869 1870 if (list_empty(list)) 1871 return false; 1872 1873 /* 1874 * Now process all the entries, sending them to the driver. 1875 */ 1876 errors = queued = 0; 1877 do { 1878 struct blk_mq_queue_data bd; 1879 1880 rq = list_first_entry(list, struct request, queuelist); 1881 1882 WARN_ON_ONCE(hctx != rq->mq_hctx); 1883 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets); 1884 if (prep != PREP_DISPATCH_OK) 1885 break; 1886 1887 list_del_init(&rq->queuelist); 1888 1889 bd.rq = rq; 1890 1891 /* 1892 * Flag last if we have no more requests, or if we have more 1893 * but can't assign a driver tag to it. 1894 */ 1895 if (list_empty(list)) 1896 bd.last = true; 1897 else { 1898 nxt = list_first_entry(list, struct request, queuelist); 1899 bd.last = !blk_mq_get_driver_tag(nxt); 1900 } 1901 1902 /* 1903 * once the request is queued to lld, no need to cover the 1904 * budget any more 1905 */ 1906 if (nr_budgets) 1907 nr_budgets--; 1908 ret = q->mq_ops->queue_rq(hctx, &bd); 1909 switch (ret) { 1910 case BLK_STS_OK: 1911 queued++; 1912 break; 1913 case BLK_STS_RESOURCE: 1914 needs_resource = true; 1915 fallthrough; 1916 case BLK_STS_DEV_RESOURCE: 1917 blk_mq_handle_dev_resource(rq, list); 1918 goto out; 1919 case BLK_STS_ZONE_RESOURCE: 1920 /* 1921 * Move the request to zone_list and keep going through 1922 * the dispatch list to find more requests the drive can 1923 * accept. 1924 */ 1925 blk_mq_handle_zone_resource(rq, &zone_list); 1926 needs_resource = true; 1927 break; 1928 default: 1929 errors++; 1930 blk_mq_end_request(rq, ret); 1931 } 1932 } while (!list_empty(list)); 1933 out: 1934 if (!list_empty(&zone_list)) 1935 list_splice_tail_init(&zone_list, list); 1936 1937 /* If we didn't flush the entire list, we could have told the driver 1938 * there was more coming, but that turned out to be a lie. 1939 */ 1940 if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued) 1941 q->mq_ops->commit_rqs(hctx); 1942 /* 1943 * Any items that need requeuing? Stuff them into hctx->dispatch, 1944 * that is where we will continue on next queue run. 1945 */ 1946 if (!list_empty(list)) { 1947 bool needs_restart; 1948 /* For non-shared tags, the RESTART check will suffice */ 1949 bool no_tag = prep == PREP_DISPATCH_NO_TAG && 1950 (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED); 1951 1952 if (nr_budgets) 1953 blk_mq_release_budgets(q, list); 1954 1955 spin_lock(&hctx->lock); 1956 list_splice_tail_init(list, &hctx->dispatch); 1957 spin_unlock(&hctx->lock); 1958 1959 /* 1960 * Order adding requests to hctx->dispatch and checking 1961 * SCHED_RESTART flag. The pair of this smp_mb() is the one 1962 * in blk_mq_sched_restart(). Avoid restart code path to 1963 * miss the new added requests to hctx->dispatch, meantime 1964 * SCHED_RESTART is observed here. 1965 */ 1966 smp_mb(); 1967 1968 /* 1969 * If SCHED_RESTART was set by the caller of this function and 1970 * it is no longer set that means that it was cleared by another 1971 * thread and hence that a queue rerun is needed. 1972 * 1973 * If 'no_tag' is set, that means that we failed getting 1974 * a driver tag with an I/O scheduler attached. If our dispatch 1975 * waitqueue is no longer active, ensure that we run the queue 1976 * AFTER adding our entries back to the list. 1977 * 1978 * If no I/O scheduler has been configured it is possible that 1979 * the hardware queue got stopped and restarted before requests 1980 * were pushed back onto the dispatch list. Rerun the queue to 1981 * avoid starvation. Notes: 1982 * - blk_mq_run_hw_queue() checks whether or not a queue has 1983 * been stopped before rerunning a queue. 1984 * - Some but not all block drivers stop a queue before 1985 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq 1986 * and dm-rq. 1987 * 1988 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART 1989 * bit is set, run queue after a delay to avoid IO stalls 1990 * that could otherwise occur if the queue is idle. We'll do 1991 * similar if we couldn't get budget or couldn't lock a zone 1992 * and SCHED_RESTART is set. 1993 */ 1994 needs_restart = blk_mq_sched_needs_restart(hctx); 1995 if (prep == PREP_DISPATCH_NO_BUDGET) 1996 needs_resource = true; 1997 if (!needs_restart || 1998 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) 1999 blk_mq_run_hw_queue(hctx, true); 2000 else if (needs_restart && needs_resource) 2001 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); 2002 2003 blk_mq_update_dispatch_busy(hctx, true); 2004 return false; 2005 } else 2006 blk_mq_update_dispatch_busy(hctx, false); 2007 2008 return (queued + errors) != 0; 2009 } 2010 2011 /** 2012 * __blk_mq_run_hw_queue - Run a hardware queue. 2013 * @hctx: Pointer to the hardware queue to run. 2014 * 2015 * Send pending requests to the hardware. 2016 */ 2017 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) 2018 { 2019 /* 2020 * We can't run the queue inline with ints disabled. Ensure that 2021 * we catch bad users of this early. 2022 */ 2023 WARN_ON_ONCE(in_interrupt()); 2024 2025 blk_mq_run_dispatch_ops(hctx->queue, 2026 blk_mq_sched_dispatch_requests(hctx)); 2027 } 2028 2029 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) 2030 { 2031 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask); 2032 2033 if (cpu >= nr_cpu_ids) 2034 cpu = cpumask_first(hctx->cpumask); 2035 return cpu; 2036 } 2037 2038 /* 2039 * It'd be great if the workqueue API had a way to pass 2040 * in a mask and had some smarts for more clever placement. 2041 * For now we just round-robin here, switching for every 2042 * BLK_MQ_CPU_WORK_BATCH queued items. 2043 */ 2044 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) 2045 { 2046 bool tried = false; 2047 int next_cpu = hctx->next_cpu; 2048 2049 if (hctx->queue->nr_hw_queues == 1) 2050 return WORK_CPU_UNBOUND; 2051 2052 if (--hctx->next_cpu_batch <= 0) { 2053 select_cpu: 2054 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, 2055 cpu_online_mask); 2056 if (next_cpu >= nr_cpu_ids) 2057 next_cpu = blk_mq_first_mapped_cpu(hctx); 2058 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 2059 } 2060 2061 /* 2062 * Do unbound schedule if we can't find a online CPU for this hctx, 2063 * and it should only happen in the path of handling CPU DEAD. 2064 */ 2065 if (!cpu_online(next_cpu)) { 2066 if (!tried) { 2067 tried = true; 2068 goto select_cpu; 2069 } 2070 2071 /* 2072 * Make sure to re-select CPU next time once after CPUs 2073 * in hctx->cpumask become online again. 2074 */ 2075 hctx->next_cpu = next_cpu; 2076 hctx->next_cpu_batch = 1; 2077 return WORK_CPU_UNBOUND; 2078 } 2079 2080 hctx->next_cpu = next_cpu; 2081 return next_cpu; 2082 } 2083 2084 /** 2085 * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue. 2086 * @hctx: Pointer to the hardware queue to run. 2087 * @async: If we want to run the queue asynchronously. 2088 * @msecs: Milliseconds of delay to wait before running the queue. 2089 * 2090 * If !@async, try to run the queue now. Else, run the queue asynchronously and 2091 * with a delay of @msecs. 2092 */ 2093 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, 2094 unsigned long msecs) 2095 { 2096 if (unlikely(blk_mq_hctx_stopped(hctx))) 2097 return; 2098 2099 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { 2100 int cpu = get_cpu(); 2101 if (cpumask_test_cpu(cpu, hctx->cpumask)) { 2102 __blk_mq_run_hw_queue(hctx); 2103 put_cpu(); 2104 return; 2105 } 2106 2107 put_cpu(); 2108 } 2109 2110 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, 2111 msecs_to_jiffies(msecs)); 2112 } 2113 2114 /** 2115 * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously. 2116 * @hctx: Pointer to the hardware queue to run. 2117 * @msecs: Milliseconds of delay to wait before running the queue. 2118 * 2119 * Run a hardware queue asynchronously with a delay of @msecs. 2120 */ 2121 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) 2122 { 2123 __blk_mq_delay_run_hw_queue(hctx, true, msecs); 2124 } 2125 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue); 2126 2127 /** 2128 * blk_mq_run_hw_queue - Start to run a hardware queue. 2129 * @hctx: Pointer to the hardware queue to run. 2130 * @async: If we want to run the queue asynchronously. 2131 * 2132 * Check if the request queue is not in a quiesced state and if there are 2133 * pending requests to be sent. If this is true, run the queue to send requests 2134 * to hardware. 2135 */ 2136 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 2137 { 2138 bool need_run; 2139 2140 /* 2141 * When queue is quiesced, we may be switching io scheduler, or 2142 * updating nr_hw_queues, or other things, and we can't run queue 2143 * any more, even __blk_mq_hctx_has_pending() can't be called safely. 2144 * 2145 * And queue will be rerun in blk_mq_unquiesce_queue() if it is 2146 * quiesced. 2147 */ 2148 __blk_mq_run_dispatch_ops(hctx->queue, false, 2149 need_run = !blk_queue_quiesced(hctx->queue) && 2150 blk_mq_hctx_has_pending(hctx)); 2151 2152 if (need_run) 2153 __blk_mq_delay_run_hw_queue(hctx, async, 0); 2154 } 2155 EXPORT_SYMBOL(blk_mq_run_hw_queue); 2156 2157 /* 2158 * Is the request queue handled by an IO scheduler that does not respect 2159 * hardware queues when dispatching? 2160 */ 2161 static bool blk_mq_has_sqsched(struct request_queue *q) 2162 { 2163 struct elevator_queue *e = q->elevator; 2164 2165 if (e && e->type->ops.dispatch_request && 2166 !(e->type->elevator_features & ELEVATOR_F_MQ_AWARE)) 2167 return true; 2168 return false; 2169 } 2170 2171 /* 2172 * Return prefered queue to dispatch from (if any) for non-mq aware IO 2173 * scheduler. 2174 */ 2175 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q) 2176 { 2177 struct blk_mq_hw_ctx *hctx; 2178 2179 /* 2180 * If the IO scheduler does not respect hardware queues when 2181 * dispatching, we just don't bother with multiple HW queues and 2182 * dispatch from hctx for the current CPU since running multiple queues 2183 * just causes lock contention inside the scheduler and pointless cache 2184 * bouncing. 2185 */ 2186 hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT, 2187 raw_smp_processor_id()); 2188 if (!blk_mq_hctx_stopped(hctx)) 2189 return hctx; 2190 return NULL; 2191 } 2192 2193 /** 2194 * blk_mq_run_hw_queues - Run all hardware queues in a request queue. 2195 * @q: Pointer to the request queue to run. 2196 * @async: If we want to run the queue asynchronously. 2197 */ 2198 void blk_mq_run_hw_queues(struct request_queue *q, bool async) 2199 { 2200 struct blk_mq_hw_ctx *hctx, *sq_hctx; 2201 unsigned long i; 2202 2203 sq_hctx = NULL; 2204 if (blk_mq_has_sqsched(q)) 2205 sq_hctx = blk_mq_get_sq_hctx(q); 2206 queue_for_each_hw_ctx(q, hctx, i) { 2207 if (blk_mq_hctx_stopped(hctx)) 2208 continue; 2209 /* 2210 * Dispatch from this hctx either if there's no hctx preferred 2211 * by IO scheduler or if it has requests that bypass the 2212 * scheduler. 2213 */ 2214 if (!sq_hctx || sq_hctx == hctx || 2215 !list_empty_careful(&hctx->dispatch)) 2216 blk_mq_run_hw_queue(hctx, async); 2217 } 2218 } 2219 EXPORT_SYMBOL(blk_mq_run_hw_queues); 2220 2221 /** 2222 * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously. 2223 * @q: Pointer to the request queue to run. 2224 * @msecs: Milliseconds of delay to wait before running the queues. 2225 */ 2226 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs) 2227 { 2228 struct blk_mq_hw_ctx *hctx, *sq_hctx; 2229 unsigned long i; 2230 2231 sq_hctx = NULL; 2232 if (blk_mq_has_sqsched(q)) 2233 sq_hctx = blk_mq_get_sq_hctx(q); 2234 queue_for_each_hw_ctx(q, hctx, i) { 2235 if (blk_mq_hctx_stopped(hctx)) 2236 continue; 2237 /* 2238 * If there is already a run_work pending, leave the 2239 * pending delay untouched. Otherwise, a hctx can stall 2240 * if another hctx is re-delaying the other's work 2241 * before the work executes. 2242 */ 2243 if (delayed_work_pending(&hctx->run_work)) 2244 continue; 2245 /* 2246 * Dispatch from this hctx either if there's no hctx preferred 2247 * by IO scheduler or if it has requests that bypass the 2248 * scheduler. 2249 */ 2250 if (!sq_hctx || sq_hctx == hctx || 2251 !list_empty_careful(&hctx->dispatch)) 2252 blk_mq_delay_run_hw_queue(hctx, msecs); 2253 } 2254 } 2255 EXPORT_SYMBOL(blk_mq_delay_run_hw_queues); 2256 2257 /** 2258 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped 2259 * @q: request queue. 2260 * 2261 * The caller is responsible for serializing this function against 2262 * blk_mq_{start,stop}_hw_queue(). 2263 */ 2264 bool blk_mq_queue_stopped(struct request_queue *q) 2265 { 2266 struct blk_mq_hw_ctx *hctx; 2267 unsigned long i; 2268 2269 queue_for_each_hw_ctx(q, hctx, i) 2270 if (blk_mq_hctx_stopped(hctx)) 2271 return true; 2272 2273 return false; 2274 } 2275 EXPORT_SYMBOL(blk_mq_queue_stopped); 2276 2277 /* 2278 * This function is often used for pausing .queue_rq() by driver when 2279 * there isn't enough resource or some conditions aren't satisfied, and 2280 * BLK_STS_RESOURCE is usually returned. 2281 * 2282 * We do not guarantee that dispatch can be drained or blocked 2283 * after blk_mq_stop_hw_queue() returns. Please use 2284 * blk_mq_quiesce_queue() for that requirement. 2285 */ 2286 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) 2287 { 2288 cancel_delayed_work(&hctx->run_work); 2289 2290 set_bit(BLK_MQ_S_STOPPED, &hctx->state); 2291 } 2292 EXPORT_SYMBOL(blk_mq_stop_hw_queue); 2293 2294 /* 2295 * This function is often used for pausing .queue_rq() by driver when 2296 * there isn't enough resource or some conditions aren't satisfied, and 2297 * BLK_STS_RESOURCE is usually returned. 2298 * 2299 * We do not guarantee that dispatch can be drained or blocked 2300 * after blk_mq_stop_hw_queues() returns. Please use 2301 * blk_mq_quiesce_queue() for that requirement. 2302 */ 2303 void blk_mq_stop_hw_queues(struct request_queue *q) 2304 { 2305 struct blk_mq_hw_ctx *hctx; 2306 unsigned long i; 2307 2308 queue_for_each_hw_ctx(q, hctx, i) 2309 blk_mq_stop_hw_queue(hctx); 2310 } 2311 EXPORT_SYMBOL(blk_mq_stop_hw_queues); 2312 2313 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) 2314 { 2315 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 2316 2317 blk_mq_run_hw_queue(hctx, false); 2318 } 2319 EXPORT_SYMBOL(blk_mq_start_hw_queue); 2320 2321 void blk_mq_start_hw_queues(struct request_queue *q) 2322 { 2323 struct blk_mq_hw_ctx *hctx; 2324 unsigned long i; 2325 2326 queue_for_each_hw_ctx(q, hctx, i) 2327 blk_mq_start_hw_queue(hctx); 2328 } 2329 EXPORT_SYMBOL(blk_mq_start_hw_queues); 2330 2331 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 2332 { 2333 if (!blk_mq_hctx_stopped(hctx)) 2334 return; 2335 2336 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 2337 blk_mq_run_hw_queue(hctx, async); 2338 } 2339 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue); 2340 2341 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) 2342 { 2343 struct blk_mq_hw_ctx *hctx; 2344 unsigned long i; 2345 2346 queue_for_each_hw_ctx(q, hctx, i) 2347 blk_mq_start_stopped_hw_queue(hctx, async); 2348 } 2349 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); 2350 2351 static void blk_mq_run_work_fn(struct work_struct *work) 2352 { 2353 struct blk_mq_hw_ctx *hctx; 2354 2355 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); 2356 2357 /* 2358 * If we are stopped, don't run the queue. 2359 */ 2360 if (blk_mq_hctx_stopped(hctx)) 2361 return; 2362 2363 __blk_mq_run_hw_queue(hctx); 2364 } 2365 2366 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, 2367 struct request *rq, 2368 bool at_head) 2369 { 2370 struct blk_mq_ctx *ctx = rq->mq_ctx; 2371 enum hctx_type type = hctx->type; 2372 2373 lockdep_assert_held(&ctx->lock); 2374 2375 trace_block_rq_insert(rq); 2376 2377 if (at_head) 2378 list_add(&rq->queuelist, &ctx->rq_lists[type]); 2379 else 2380 list_add_tail(&rq->queuelist, &ctx->rq_lists[type]); 2381 } 2382 2383 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 2384 bool at_head) 2385 { 2386 struct blk_mq_ctx *ctx = rq->mq_ctx; 2387 2388 lockdep_assert_held(&ctx->lock); 2389 2390 __blk_mq_insert_req_list(hctx, rq, at_head); 2391 blk_mq_hctx_mark_pending(hctx, ctx); 2392 } 2393 2394 /** 2395 * blk_mq_request_bypass_insert - Insert a request at dispatch list. 2396 * @rq: Pointer to request to be inserted. 2397 * @at_head: true if the request should be inserted at the head of the list. 2398 * @run_queue: If we should run the hardware queue after inserting the request. 2399 * 2400 * Should only be used carefully, when the caller knows we want to 2401 * bypass a potential IO scheduler on the target device. 2402 */ 2403 void blk_mq_request_bypass_insert(struct request *rq, bool at_head, 2404 bool run_queue) 2405 { 2406 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 2407 2408 spin_lock(&hctx->lock); 2409 if (at_head) 2410 list_add(&rq->queuelist, &hctx->dispatch); 2411 else 2412 list_add_tail(&rq->queuelist, &hctx->dispatch); 2413 spin_unlock(&hctx->lock); 2414 2415 if (run_queue) 2416 blk_mq_run_hw_queue(hctx, false); 2417 } 2418 2419 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 2420 struct list_head *list) 2421 2422 { 2423 struct request *rq; 2424 enum hctx_type type = hctx->type; 2425 2426 /* 2427 * preemption doesn't flush plug list, so it's possible ctx->cpu is 2428 * offline now 2429 */ 2430 list_for_each_entry(rq, list, queuelist) { 2431 BUG_ON(rq->mq_ctx != ctx); 2432 trace_block_rq_insert(rq); 2433 } 2434 2435 spin_lock(&ctx->lock); 2436 list_splice_tail_init(list, &ctx->rq_lists[type]); 2437 blk_mq_hctx_mark_pending(hctx, ctx); 2438 spin_unlock(&ctx->lock); 2439 } 2440 2441 static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued, 2442 bool from_schedule) 2443 { 2444 if (hctx->queue->mq_ops->commit_rqs) { 2445 trace_block_unplug(hctx->queue, *queued, !from_schedule); 2446 hctx->queue->mq_ops->commit_rqs(hctx); 2447 } 2448 *queued = 0; 2449 } 2450 2451 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, 2452 unsigned int nr_segs) 2453 { 2454 int err; 2455 2456 if (bio->bi_opf & REQ_RAHEAD) 2457 rq->cmd_flags |= REQ_FAILFAST_MASK; 2458 2459 rq->__sector = bio->bi_iter.bi_sector; 2460 blk_rq_bio_prep(rq, bio, nr_segs); 2461 2462 /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */ 2463 err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO); 2464 WARN_ON_ONCE(err); 2465 2466 blk_account_io_start(rq); 2467 } 2468 2469 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, 2470 struct request *rq, bool last) 2471 { 2472 struct request_queue *q = rq->q; 2473 struct blk_mq_queue_data bd = { 2474 .rq = rq, 2475 .last = last, 2476 }; 2477 blk_status_t ret; 2478 2479 /* 2480 * For OK queue, we are done. For error, caller may kill it. 2481 * Any other error (busy), just add it to our list as we 2482 * previously would have done. 2483 */ 2484 ret = q->mq_ops->queue_rq(hctx, &bd); 2485 switch (ret) { 2486 case BLK_STS_OK: 2487 blk_mq_update_dispatch_busy(hctx, false); 2488 break; 2489 case BLK_STS_RESOURCE: 2490 case BLK_STS_DEV_RESOURCE: 2491 blk_mq_update_dispatch_busy(hctx, true); 2492 __blk_mq_requeue_request(rq); 2493 break; 2494 default: 2495 blk_mq_update_dispatch_busy(hctx, false); 2496 break; 2497 } 2498 2499 return ret; 2500 } 2501 2502 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 2503 struct request *rq, 2504 bool bypass_insert, bool last) 2505 { 2506 struct request_queue *q = rq->q; 2507 bool run_queue = true; 2508 int budget_token; 2509 2510 /* 2511 * RCU or SRCU read lock is needed before checking quiesced flag. 2512 * 2513 * When queue is stopped or quiesced, ignore 'bypass_insert' from 2514 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller, 2515 * and avoid driver to try to dispatch again. 2516 */ 2517 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { 2518 run_queue = false; 2519 bypass_insert = false; 2520 goto insert; 2521 } 2522 2523 if ((rq->rq_flags & RQF_ELV) && !bypass_insert) 2524 goto insert; 2525 2526 budget_token = blk_mq_get_dispatch_budget(q); 2527 if (budget_token < 0) 2528 goto insert; 2529 2530 blk_mq_set_rq_budget_token(rq, budget_token); 2531 2532 if (!blk_mq_get_driver_tag(rq)) { 2533 blk_mq_put_dispatch_budget(q, budget_token); 2534 goto insert; 2535 } 2536 2537 return __blk_mq_issue_directly(hctx, rq, last); 2538 insert: 2539 if (bypass_insert) 2540 return BLK_STS_RESOURCE; 2541 2542 blk_mq_sched_insert_request(rq, false, run_queue, false); 2543 2544 return BLK_STS_OK; 2545 } 2546 2547 /** 2548 * blk_mq_try_issue_directly - Try to send a request directly to device driver. 2549 * @hctx: Pointer of the associated hardware queue. 2550 * @rq: Pointer to request to be sent. 2551 * 2552 * If the device has enough resources to accept a new request now, send the 2553 * request directly to device driver. Else, insert at hctx->dispatch queue, so 2554 * we can try send it another time in the future. Requests inserted at this 2555 * queue have higher priority. 2556 */ 2557 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 2558 struct request *rq) 2559 { 2560 blk_status_t ret = 2561 __blk_mq_try_issue_directly(hctx, rq, false, true); 2562 2563 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) 2564 blk_mq_request_bypass_insert(rq, false, true); 2565 else if (ret != BLK_STS_OK) 2566 blk_mq_end_request(rq, ret); 2567 } 2568 2569 static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) 2570 { 2571 return __blk_mq_try_issue_directly(rq->mq_hctx, rq, true, last); 2572 } 2573 2574 static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule) 2575 { 2576 struct blk_mq_hw_ctx *hctx = NULL; 2577 struct request *rq; 2578 int queued = 0; 2579 int errors = 0; 2580 2581 while ((rq = rq_list_pop(&plug->mq_list))) { 2582 bool last = rq_list_empty(plug->mq_list); 2583 blk_status_t ret; 2584 2585 if (hctx != rq->mq_hctx) { 2586 if (hctx) 2587 blk_mq_commit_rqs(hctx, &queued, from_schedule); 2588 hctx = rq->mq_hctx; 2589 } 2590 2591 ret = blk_mq_request_issue_directly(rq, last); 2592 switch (ret) { 2593 case BLK_STS_OK: 2594 queued++; 2595 break; 2596 case BLK_STS_RESOURCE: 2597 case BLK_STS_DEV_RESOURCE: 2598 blk_mq_request_bypass_insert(rq, false, last); 2599 blk_mq_commit_rqs(hctx, &queued, from_schedule); 2600 return; 2601 default: 2602 blk_mq_end_request(rq, ret); 2603 errors++; 2604 break; 2605 } 2606 } 2607 2608 /* 2609 * If we didn't flush the entire list, we could have told the driver 2610 * there was more coming, but that turned out to be a lie. 2611 */ 2612 if (errors) 2613 blk_mq_commit_rqs(hctx, &queued, from_schedule); 2614 } 2615 2616 static void __blk_mq_flush_plug_list(struct request_queue *q, 2617 struct blk_plug *plug) 2618 { 2619 if (blk_queue_quiesced(q)) 2620 return; 2621 q->mq_ops->queue_rqs(&plug->mq_list); 2622 } 2623 2624 static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) 2625 { 2626 struct blk_mq_hw_ctx *this_hctx = NULL; 2627 struct blk_mq_ctx *this_ctx = NULL; 2628 struct request *requeue_list = NULL; 2629 unsigned int depth = 0; 2630 LIST_HEAD(list); 2631 2632 do { 2633 struct request *rq = rq_list_pop(&plug->mq_list); 2634 2635 if (!this_hctx) { 2636 this_hctx = rq->mq_hctx; 2637 this_ctx = rq->mq_ctx; 2638 } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) { 2639 rq_list_add(&requeue_list, rq); 2640 continue; 2641 } 2642 list_add_tail(&rq->queuelist, &list); 2643 depth++; 2644 } while (!rq_list_empty(plug->mq_list)); 2645 2646 plug->mq_list = requeue_list; 2647 trace_block_unplug(this_hctx->queue, depth, !from_sched); 2648 blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_sched); 2649 } 2650 2651 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) 2652 { 2653 struct request *rq; 2654 2655 if (rq_list_empty(plug->mq_list)) 2656 return; 2657 plug->rq_count = 0; 2658 2659 if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) { 2660 struct request_queue *q; 2661 2662 rq = rq_list_peek(&plug->mq_list); 2663 q = rq->q; 2664 2665 /* 2666 * Peek first request and see if we have a ->queue_rqs() hook. 2667 * If we do, we can dispatch the whole plug list in one go. We 2668 * already know at this point that all requests belong to the 2669 * same queue, caller must ensure that's the case. 2670 * 2671 * Since we pass off the full list to the driver at this point, 2672 * we do not increment the active request count for the queue. 2673 * Bypass shared tags for now because of that. 2674 */ 2675 if (q->mq_ops->queue_rqs && 2676 !(rq->mq_hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { 2677 blk_mq_run_dispatch_ops(q, 2678 __blk_mq_flush_plug_list(q, plug)); 2679 if (rq_list_empty(plug->mq_list)) 2680 return; 2681 } 2682 2683 blk_mq_run_dispatch_ops(q, 2684 blk_mq_plug_issue_direct(plug, false)); 2685 if (rq_list_empty(plug->mq_list)) 2686 return; 2687 } 2688 2689 do { 2690 blk_mq_dispatch_plug_list(plug, from_schedule); 2691 } while (!rq_list_empty(plug->mq_list)); 2692 } 2693 2694 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 2695 struct list_head *list) 2696 { 2697 int queued = 0; 2698 int errors = 0; 2699 2700 while (!list_empty(list)) { 2701 blk_status_t ret; 2702 struct request *rq = list_first_entry(list, struct request, 2703 queuelist); 2704 2705 list_del_init(&rq->queuelist); 2706 ret = blk_mq_request_issue_directly(rq, list_empty(list)); 2707 if (ret != BLK_STS_OK) { 2708 if (ret == BLK_STS_RESOURCE || 2709 ret == BLK_STS_DEV_RESOURCE) { 2710 blk_mq_request_bypass_insert(rq, false, 2711 list_empty(list)); 2712 break; 2713 } 2714 blk_mq_end_request(rq, ret); 2715 errors++; 2716 } else 2717 queued++; 2718 } 2719 2720 /* 2721 * If we didn't flush the entire list, we could have told 2722 * the driver there was more coming, but that turned out to 2723 * be a lie. 2724 */ 2725 if ((!list_empty(list) || errors) && 2726 hctx->queue->mq_ops->commit_rqs && queued) 2727 hctx->queue->mq_ops->commit_rqs(hctx); 2728 } 2729 2730 static bool blk_mq_attempt_bio_merge(struct request_queue *q, 2731 struct bio *bio, unsigned int nr_segs) 2732 { 2733 if (!blk_queue_nomerges(q) && bio_mergeable(bio)) { 2734 if (blk_attempt_plug_merge(q, bio, nr_segs)) 2735 return true; 2736 if (blk_mq_sched_bio_merge(q, bio, nr_segs)) 2737 return true; 2738 } 2739 return false; 2740 } 2741 2742 static struct request *blk_mq_get_new_requests(struct request_queue *q, 2743 struct blk_plug *plug, 2744 struct bio *bio, 2745 unsigned int nsegs) 2746 { 2747 struct blk_mq_alloc_data data = { 2748 .q = q, 2749 .nr_tags = 1, 2750 .cmd_flags = bio->bi_opf, 2751 }; 2752 struct request *rq; 2753 2754 if (unlikely(bio_queue_enter(bio))) 2755 return NULL; 2756 2757 if (blk_mq_attempt_bio_merge(q, bio, nsegs)) 2758 goto queue_exit; 2759 2760 rq_qos_throttle(q, bio); 2761 2762 if (plug) { 2763 data.nr_tags = plug->nr_ios; 2764 plug->nr_ios = 1; 2765 data.cached_rq = &plug->cached_rq; 2766 } 2767 2768 rq = __blk_mq_alloc_requests(&data); 2769 if (rq) 2770 return rq; 2771 rq_qos_cleanup(q, bio); 2772 if (bio->bi_opf & REQ_NOWAIT) 2773 bio_wouldblock_error(bio); 2774 queue_exit: 2775 blk_queue_exit(q); 2776 return NULL; 2777 } 2778 2779 static inline struct request *blk_mq_get_cached_request(struct request_queue *q, 2780 struct blk_plug *plug, struct bio **bio, unsigned int nsegs) 2781 { 2782 struct request *rq; 2783 2784 if (!plug) 2785 return NULL; 2786 rq = rq_list_peek(&plug->cached_rq); 2787 if (!rq || rq->q != q) 2788 return NULL; 2789 2790 if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) { 2791 *bio = NULL; 2792 return NULL; 2793 } 2794 2795 rq_qos_throttle(q, *bio); 2796 2797 if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type) 2798 return NULL; 2799 if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf)) 2800 return NULL; 2801 2802 rq->cmd_flags = (*bio)->bi_opf; 2803 plug->cached_rq = rq_list_next(rq); 2804 INIT_LIST_HEAD(&rq->queuelist); 2805 return rq; 2806 } 2807 2808 /** 2809 * blk_mq_submit_bio - Create and send a request to block device. 2810 * @bio: Bio pointer. 2811 * 2812 * Builds up a request structure from @q and @bio and send to the device. The 2813 * request may not be queued directly to hardware if: 2814 * * This request can be merged with another one 2815 * * We want to place request at plug queue for possible future merging 2816 * * There is an IO scheduler active at this queue 2817 * 2818 * It will not queue the request if there is an error with the bio, or at the 2819 * request creation. 2820 */ 2821 void blk_mq_submit_bio(struct bio *bio) 2822 { 2823 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 2824 struct blk_plug *plug = blk_mq_plug(q, bio); 2825 const int is_sync = op_is_sync(bio->bi_opf); 2826 struct request *rq; 2827 unsigned int nr_segs = 1; 2828 blk_status_t ret; 2829 2830 blk_queue_bounce(q, &bio); 2831 if (blk_may_split(q, bio)) 2832 __blk_queue_split(q, &bio, &nr_segs); 2833 2834 if (!bio_integrity_prep(bio)) 2835 return; 2836 2837 rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs); 2838 if (!rq) { 2839 if (!bio) 2840 return; 2841 rq = blk_mq_get_new_requests(q, plug, bio, nr_segs); 2842 if (unlikely(!rq)) 2843 return; 2844 } 2845 2846 trace_block_getrq(bio); 2847 2848 rq_qos_track(q, rq, bio); 2849 2850 blk_mq_bio_to_request(rq, bio, nr_segs); 2851 2852 ret = blk_crypto_init_request(rq); 2853 if (ret != BLK_STS_OK) { 2854 bio->bi_status = ret; 2855 bio_endio(bio); 2856 blk_mq_free_request(rq); 2857 return; 2858 } 2859 2860 if (op_is_flush(bio->bi_opf)) { 2861 blk_insert_flush(rq); 2862 return; 2863 } 2864 2865 if (plug) 2866 blk_add_rq_to_plug(plug, rq); 2867 else if ((rq->rq_flags & RQF_ELV) || 2868 (rq->mq_hctx->dispatch_busy && 2869 (q->nr_hw_queues == 1 || !is_sync))) 2870 blk_mq_sched_insert_request(rq, false, true, true); 2871 else 2872 blk_mq_run_dispatch_ops(rq->q, 2873 blk_mq_try_issue_directly(rq->mq_hctx, rq)); 2874 } 2875 2876 #ifdef CONFIG_BLK_MQ_STACKING 2877 /** 2878 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 2879 * @rq: the request being queued 2880 */ 2881 blk_status_t blk_insert_cloned_request(struct request *rq) 2882 { 2883 struct request_queue *q = rq->q; 2884 unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq)); 2885 blk_status_t ret; 2886 2887 if (blk_rq_sectors(rq) > max_sectors) { 2888 /* 2889 * SCSI device does not have a good way to return if 2890 * Write Same/Zero is actually supported. If a device rejects 2891 * a non-read/write command (discard, write same,etc.) the 2892 * low-level device driver will set the relevant queue limit to 2893 * 0 to prevent blk-lib from issuing more of the offending 2894 * operations. Commands queued prior to the queue limit being 2895 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O 2896 * errors being propagated to upper layers. 2897 */ 2898 if (max_sectors == 0) 2899 return BLK_STS_NOTSUPP; 2900 2901 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n", 2902 __func__, blk_rq_sectors(rq), max_sectors); 2903 return BLK_STS_IOERR; 2904 } 2905 2906 /* 2907 * The queue settings related to segment counting may differ from the 2908 * original queue. 2909 */ 2910 rq->nr_phys_segments = blk_recalc_rq_segments(rq); 2911 if (rq->nr_phys_segments > queue_max_segments(q)) { 2912 printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n", 2913 __func__, rq->nr_phys_segments, queue_max_segments(q)); 2914 return BLK_STS_IOERR; 2915 } 2916 2917 if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq))) 2918 return BLK_STS_IOERR; 2919 2920 if (blk_crypto_insert_cloned_request(rq)) 2921 return BLK_STS_IOERR; 2922 2923 blk_account_io_start(rq); 2924 2925 /* 2926 * Since we have a scheduler attached on the top device, 2927 * bypass a potential scheduler on the bottom device for 2928 * insert. 2929 */ 2930 blk_mq_run_dispatch_ops(q, 2931 ret = blk_mq_request_issue_directly(rq, true)); 2932 if (ret) 2933 blk_account_io_done(rq, ktime_get_ns()); 2934 return ret; 2935 } 2936 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 2937 2938 /** 2939 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 2940 * @rq: the clone request to be cleaned up 2941 * 2942 * Description: 2943 * Free all bios in @rq for a cloned request. 2944 */ 2945 void blk_rq_unprep_clone(struct request *rq) 2946 { 2947 struct bio *bio; 2948 2949 while ((bio = rq->bio) != NULL) { 2950 rq->bio = bio->bi_next; 2951 2952 bio_put(bio); 2953 } 2954 } 2955 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 2956 2957 /** 2958 * blk_rq_prep_clone - Helper function to setup clone request 2959 * @rq: the request to be setup 2960 * @rq_src: original request to be cloned 2961 * @bs: bio_set that bios for clone are allocated from 2962 * @gfp_mask: memory allocation mask for bio 2963 * @bio_ctr: setup function to be called for each clone bio. 2964 * Returns %0 for success, non %0 for failure. 2965 * @data: private data to be passed to @bio_ctr 2966 * 2967 * Description: 2968 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 2969 * Also, pages which the original bios are pointing to are not copied 2970 * and the cloned bios just point same pages. 2971 * So cloned bios must be completed before original bios, which means 2972 * the caller must complete @rq before @rq_src. 2973 */ 2974 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 2975 struct bio_set *bs, gfp_t gfp_mask, 2976 int (*bio_ctr)(struct bio *, struct bio *, void *), 2977 void *data) 2978 { 2979 struct bio *bio, *bio_src; 2980 2981 if (!bs) 2982 bs = &fs_bio_set; 2983 2984 __rq_for_each_bio(bio_src, rq_src) { 2985 bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask, 2986 bs); 2987 if (!bio) 2988 goto free_and_out; 2989 2990 if (bio_ctr && bio_ctr(bio, bio_src, data)) 2991 goto free_and_out; 2992 2993 if (rq->bio) { 2994 rq->biotail->bi_next = bio; 2995 rq->biotail = bio; 2996 } else { 2997 rq->bio = rq->biotail = bio; 2998 } 2999 bio = NULL; 3000 } 3001 3002 /* Copy attributes of the original request to the clone request. */ 3003 rq->__sector = blk_rq_pos(rq_src); 3004 rq->__data_len = blk_rq_bytes(rq_src); 3005 if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) { 3006 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; 3007 rq->special_vec = rq_src->special_vec; 3008 } 3009 rq->nr_phys_segments = rq_src->nr_phys_segments; 3010 rq->ioprio = rq_src->ioprio; 3011 3012 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0) 3013 goto free_and_out; 3014 3015 return 0; 3016 3017 free_and_out: 3018 if (bio) 3019 bio_put(bio); 3020 blk_rq_unprep_clone(rq); 3021 3022 return -ENOMEM; 3023 } 3024 EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 3025 #endif /* CONFIG_BLK_MQ_STACKING */ 3026 3027 /* 3028 * Steal bios from a request and add them to a bio list. 3029 * The request must not have been partially completed before. 3030 */ 3031 void blk_steal_bios(struct bio_list *list, struct request *rq) 3032 { 3033 if (rq->bio) { 3034 if (list->tail) 3035 list->tail->bi_next = rq->bio; 3036 else 3037 list->head = rq->bio; 3038 list->tail = rq->biotail; 3039 3040 rq->bio = NULL; 3041 rq->biotail = NULL; 3042 } 3043 3044 rq->__data_len = 0; 3045 } 3046 EXPORT_SYMBOL_GPL(blk_steal_bios); 3047 3048 static size_t order_to_size(unsigned int order) 3049 { 3050 return (size_t)PAGE_SIZE << order; 3051 } 3052 3053 /* called before freeing request pool in @tags */ 3054 static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags, 3055 struct blk_mq_tags *tags) 3056 { 3057 struct page *page; 3058 unsigned long flags; 3059 3060 /* There is no need to clear a driver tags own mapping */ 3061 if (drv_tags == tags) 3062 return; 3063 3064 list_for_each_entry(page, &tags->page_list, lru) { 3065 unsigned long start = (unsigned long)page_address(page); 3066 unsigned long end = start + order_to_size(page->private); 3067 int i; 3068 3069 for (i = 0; i < drv_tags->nr_tags; i++) { 3070 struct request *rq = drv_tags->rqs[i]; 3071 unsigned long rq_addr = (unsigned long)rq; 3072 3073 if (rq_addr >= start && rq_addr < end) { 3074 WARN_ON_ONCE(req_ref_read(rq) != 0); 3075 cmpxchg(&drv_tags->rqs[i], rq, NULL); 3076 } 3077 } 3078 } 3079 3080 /* 3081 * Wait until all pending iteration is done. 3082 * 3083 * Request reference is cleared and it is guaranteed to be observed 3084 * after the ->lock is released. 3085 */ 3086 spin_lock_irqsave(&drv_tags->lock, flags); 3087 spin_unlock_irqrestore(&drv_tags->lock, flags); 3088 } 3089 3090 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 3091 unsigned int hctx_idx) 3092 { 3093 struct blk_mq_tags *drv_tags; 3094 struct page *page; 3095 3096 if (list_empty(&tags->page_list)) 3097 return; 3098 3099 if (blk_mq_is_shared_tags(set->flags)) 3100 drv_tags = set->shared_tags; 3101 else 3102 drv_tags = set->tags[hctx_idx]; 3103 3104 if (tags->static_rqs && set->ops->exit_request) { 3105 int i; 3106 3107 for (i = 0; i < tags->nr_tags; i++) { 3108 struct request *rq = tags->static_rqs[i]; 3109 3110 if (!rq) 3111 continue; 3112 set->ops->exit_request(set, rq, hctx_idx); 3113 tags->static_rqs[i] = NULL; 3114 } 3115 } 3116 3117 blk_mq_clear_rq_mapping(drv_tags, tags); 3118 3119 while (!list_empty(&tags->page_list)) { 3120 page = list_first_entry(&tags->page_list, struct page, lru); 3121 list_del_init(&page->lru); 3122 /* 3123 * Remove kmemleak object previously allocated in 3124 * blk_mq_alloc_rqs(). 3125 */ 3126 kmemleak_free(page_address(page)); 3127 __free_pages(page, page->private); 3128 } 3129 } 3130 3131 void blk_mq_free_rq_map(struct blk_mq_tags *tags) 3132 { 3133 kfree(tags->rqs); 3134 tags->rqs = NULL; 3135 kfree(tags->static_rqs); 3136 tags->static_rqs = NULL; 3137 3138 blk_mq_free_tags(tags); 3139 } 3140 3141 static enum hctx_type hctx_idx_to_type(struct blk_mq_tag_set *set, 3142 unsigned int hctx_idx) 3143 { 3144 int i; 3145 3146 for (i = 0; i < set->nr_maps; i++) { 3147 unsigned int start = set->map[i].queue_offset; 3148 unsigned int end = start + set->map[i].nr_queues; 3149 3150 if (hctx_idx >= start && hctx_idx < end) 3151 break; 3152 } 3153 3154 if (i >= set->nr_maps) 3155 i = HCTX_TYPE_DEFAULT; 3156 3157 return i; 3158 } 3159 3160 static int blk_mq_get_hctx_node(struct blk_mq_tag_set *set, 3161 unsigned int hctx_idx) 3162 { 3163 enum hctx_type type = hctx_idx_to_type(set, hctx_idx); 3164 3165 return blk_mq_hw_queue_to_node(&set->map[type], hctx_idx); 3166 } 3167 3168 static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, 3169 unsigned int hctx_idx, 3170 unsigned int nr_tags, 3171 unsigned int reserved_tags) 3172 { 3173 int node = blk_mq_get_hctx_node(set, hctx_idx); 3174 struct blk_mq_tags *tags; 3175 3176 if (node == NUMA_NO_NODE) 3177 node = set->numa_node; 3178 3179 tags = blk_mq_init_tags(nr_tags, reserved_tags, node, 3180 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); 3181 if (!tags) 3182 return NULL; 3183 3184 tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *), 3185 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 3186 node); 3187 if (!tags->rqs) { 3188 blk_mq_free_tags(tags); 3189 return NULL; 3190 } 3191 3192 tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *), 3193 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 3194 node); 3195 if (!tags->static_rqs) { 3196 kfree(tags->rqs); 3197 blk_mq_free_tags(tags); 3198 return NULL; 3199 } 3200 3201 return tags; 3202 } 3203 3204 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, 3205 unsigned int hctx_idx, int node) 3206 { 3207 int ret; 3208 3209 if (set->ops->init_request) { 3210 ret = set->ops->init_request(set, rq, hctx_idx, node); 3211 if (ret) 3212 return ret; 3213 } 3214 3215 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 3216 return 0; 3217 } 3218 3219 static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, 3220 struct blk_mq_tags *tags, 3221 unsigned int hctx_idx, unsigned int depth) 3222 { 3223 unsigned int i, j, entries_per_page, max_order = 4; 3224 int node = blk_mq_get_hctx_node(set, hctx_idx); 3225 size_t rq_size, left; 3226 3227 if (node == NUMA_NO_NODE) 3228 node = set->numa_node; 3229 3230 INIT_LIST_HEAD(&tags->page_list); 3231 3232 /* 3233 * rq_size is the size of the request plus driver payload, rounded 3234 * to the cacheline size 3235 */ 3236 rq_size = round_up(sizeof(struct request) + set->cmd_size, 3237 cache_line_size()); 3238 left = rq_size * depth; 3239 3240 for (i = 0; i < depth; ) { 3241 int this_order = max_order; 3242 struct page *page; 3243 int to_do; 3244 void *p; 3245 3246 while (this_order && left < order_to_size(this_order - 1)) 3247 this_order--; 3248 3249 do { 3250 page = alloc_pages_node(node, 3251 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, 3252 this_order); 3253 if (page) 3254 break; 3255 if (!this_order--) 3256 break; 3257 if (order_to_size(this_order) < rq_size) 3258 break; 3259 } while (1); 3260 3261 if (!page) 3262 goto fail; 3263 3264 page->private = this_order; 3265 list_add_tail(&page->lru, &tags->page_list); 3266 3267 p = page_address(page); 3268 /* 3269 * Allow kmemleak to scan these pages as they contain pointers 3270 * to additional allocations like via ops->init_request(). 3271 */ 3272 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO); 3273 entries_per_page = order_to_size(this_order) / rq_size; 3274 to_do = min(entries_per_page, depth - i); 3275 left -= to_do * rq_size; 3276 for (j = 0; j < to_do; j++) { 3277 struct request *rq = p; 3278 3279 tags->static_rqs[i] = rq; 3280 if (blk_mq_init_request(set, rq, hctx_idx, node)) { 3281 tags->static_rqs[i] = NULL; 3282 goto fail; 3283 } 3284 3285 p += rq_size; 3286 i++; 3287 } 3288 } 3289 return 0; 3290 3291 fail: 3292 blk_mq_free_rqs(set, tags, hctx_idx); 3293 return -ENOMEM; 3294 } 3295 3296 struct rq_iter_data { 3297 struct blk_mq_hw_ctx *hctx; 3298 bool has_rq; 3299 }; 3300 3301 static bool blk_mq_has_request(struct request *rq, void *data, bool reserved) 3302 { 3303 struct rq_iter_data *iter_data = data; 3304 3305 if (rq->mq_hctx != iter_data->hctx) 3306 return true; 3307 iter_data->has_rq = true; 3308 return false; 3309 } 3310 3311 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx) 3312 { 3313 struct blk_mq_tags *tags = hctx->sched_tags ? 3314 hctx->sched_tags : hctx->tags; 3315 struct rq_iter_data data = { 3316 .hctx = hctx, 3317 }; 3318 3319 blk_mq_all_tag_iter(tags, blk_mq_has_request, &data); 3320 return data.has_rq; 3321 } 3322 3323 static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu, 3324 struct blk_mq_hw_ctx *hctx) 3325 { 3326 if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu) 3327 return false; 3328 if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids) 3329 return false; 3330 return true; 3331 } 3332 3333 static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node) 3334 { 3335 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, 3336 struct blk_mq_hw_ctx, cpuhp_online); 3337 3338 if (!cpumask_test_cpu(cpu, hctx->cpumask) || 3339 !blk_mq_last_cpu_in_hctx(cpu, hctx)) 3340 return 0; 3341 3342 /* 3343 * Prevent new request from being allocated on the current hctx. 3344 * 3345 * The smp_mb__after_atomic() Pairs with the implied barrier in 3346 * test_and_set_bit_lock in sbitmap_get(). Ensures the inactive flag is 3347 * seen once we return from the tag allocator. 3348 */ 3349 set_bit(BLK_MQ_S_INACTIVE, &hctx->state); 3350 smp_mb__after_atomic(); 3351 3352 /* 3353 * Try to grab a reference to the queue and wait for any outstanding 3354 * requests. If we could not grab a reference the queue has been 3355 * frozen and there are no requests. 3356 */ 3357 if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) { 3358 while (blk_mq_hctx_has_requests(hctx)) 3359 msleep(5); 3360 percpu_ref_put(&hctx->queue->q_usage_counter); 3361 } 3362 3363 return 0; 3364 } 3365 3366 static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node) 3367 { 3368 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, 3369 struct blk_mq_hw_ctx, cpuhp_online); 3370 3371 if (cpumask_test_cpu(cpu, hctx->cpumask)) 3372 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state); 3373 return 0; 3374 } 3375 3376 /* 3377 * 'cpu' is going away. splice any existing rq_list entries from this 3378 * software queue to the hw queue dispatch list, and ensure that it 3379 * gets run. 3380 */ 3381 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node) 3382 { 3383 struct blk_mq_hw_ctx *hctx; 3384 struct blk_mq_ctx *ctx; 3385 LIST_HEAD(tmp); 3386 enum hctx_type type; 3387 3388 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead); 3389 if (!cpumask_test_cpu(cpu, hctx->cpumask)) 3390 return 0; 3391 3392 ctx = __blk_mq_get_ctx(hctx->queue, cpu); 3393 type = hctx->type; 3394 3395 spin_lock(&ctx->lock); 3396 if (!list_empty(&ctx->rq_lists[type])) { 3397 list_splice_init(&ctx->rq_lists[type], &tmp); 3398 blk_mq_hctx_clear_pending(hctx, ctx); 3399 } 3400 spin_unlock(&ctx->lock); 3401 3402 if (list_empty(&tmp)) 3403 return 0; 3404 3405 spin_lock(&hctx->lock); 3406 list_splice_tail_init(&tmp, &hctx->dispatch); 3407 spin_unlock(&hctx->lock); 3408 3409 blk_mq_run_hw_queue(hctx, true); 3410 return 0; 3411 } 3412 3413 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) 3414 { 3415 if (!(hctx->flags & BLK_MQ_F_STACKING)) 3416 cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, 3417 &hctx->cpuhp_online); 3418 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD, 3419 &hctx->cpuhp_dead); 3420 } 3421 3422 /* 3423 * Before freeing hw queue, clearing the flush request reference in 3424 * tags->rqs[] for avoiding potential UAF. 3425 */ 3426 static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags, 3427 unsigned int queue_depth, struct request *flush_rq) 3428 { 3429 int i; 3430 unsigned long flags; 3431 3432 /* The hw queue may not be mapped yet */ 3433 if (!tags) 3434 return; 3435 3436 WARN_ON_ONCE(req_ref_read(flush_rq) != 0); 3437 3438 for (i = 0; i < queue_depth; i++) 3439 cmpxchg(&tags->rqs[i], flush_rq, NULL); 3440 3441 /* 3442 * Wait until all pending iteration is done. 3443 * 3444 * Request reference is cleared and it is guaranteed to be observed 3445 * after the ->lock is released. 3446 */ 3447 spin_lock_irqsave(&tags->lock, flags); 3448 spin_unlock_irqrestore(&tags->lock, flags); 3449 } 3450 3451 /* hctx->ctxs will be freed in queue's release handler */ 3452 static void blk_mq_exit_hctx(struct request_queue *q, 3453 struct blk_mq_tag_set *set, 3454 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 3455 { 3456 struct request *flush_rq = hctx->fq->flush_rq; 3457 3458 if (blk_mq_hw_queue_mapped(hctx)) 3459 blk_mq_tag_idle(hctx); 3460 3461 blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx], 3462 set->queue_depth, flush_rq); 3463 if (set->ops->exit_request) 3464 set->ops->exit_request(set, flush_rq, hctx_idx); 3465 3466 if (set->ops->exit_hctx) 3467 set->ops->exit_hctx(hctx, hctx_idx); 3468 3469 blk_mq_remove_cpuhp(hctx); 3470 3471 xa_erase(&q->hctx_table, hctx_idx); 3472 3473 spin_lock(&q->unused_hctx_lock); 3474 list_add(&hctx->hctx_list, &q->unused_hctx_list); 3475 spin_unlock(&q->unused_hctx_lock); 3476 } 3477 3478 static void blk_mq_exit_hw_queues(struct request_queue *q, 3479 struct blk_mq_tag_set *set, int nr_queue) 3480 { 3481 struct blk_mq_hw_ctx *hctx; 3482 unsigned long i; 3483 3484 queue_for_each_hw_ctx(q, hctx, i) { 3485 if (i == nr_queue) 3486 break; 3487 blk_mq_exit_hctx(q, set, hctx, i); 3488 } 3489 } 3490 3491 static int blk_mq_init_hctx(struct request_queue *q, 3492 struct blk_mq_tag_set *set, 3493 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) 3494 { 3495 hctx->queue_num = hctx_idx; 3496 3497 if (!(hctx->flags & BLK_MQ_F_STACKING)) 3498 cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, 3499 &hctx->cpuhp_online); 3500 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); 3501 3502 hctx->tags = set->tags[hctx_idx]; 3503 3504 if (set->ops->init_hctx && 3505 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) 3506 goto unregister_cpu_notifier; 3507 3508 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, 3509 hctx->numa_node)) 3510 goto exit_hctx; 3511 3512 if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL)) 3513 goto exit_flush_rq; 3514 3515 return 0; 3516 3517 exit_flush_rq: 3518 if (set->ops->exit_request) 3519 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); 3520 exit_hctx: 3521 if (set->ops->exit_hctx) 3522 set->ops->exit_hctx(hctx, hctx_idx); 3523 unregister_cpu_notifier: 3524 blk_mq_remove_cpuhp(hctx); 3525 return -1; 3526 } 3527 3528 static struct blk_mq_hw_ctx * 3529 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, 3530 int node) 3531 { 3532 struct blk_mq_hw_ctx *hctx; 3533 gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY; 3534 3535 hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node); 3536 if (!hctx) 3537 goto fail_alloc_hctx; 3538 3539 if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node)) 3540 goto free_hctx; 3541 3542 atomic_set(&hctx->nr_active, 0); 3543 if (node == NUMA_NO_NODE) 3544 node = set->numa_node; 3545 hctx->numa_node = node; 3546 3547 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); 3548 spin_lock_init(&hctx->lock); 3549 INIT_LIST_HEAD(&hctx->dispatch); 3550 hctx->queue = q; 3551 hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED; 3552 3553 INIT_LIST_HEAD(&hctx->hctx_list); 3554 3555 /* 3556 * Allocate space for all possible cpus to avoid allocation at 3557 * runtime 3558 */ 3559 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), 3560 gfp, node); 3561 if (!hctx->ctxs) 3562 goto free_cpumask; 3563 3564 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), 3565 gfp, node, false, false)) 3566 goto free_ctxs; 3567 hctx->nr_ctx = 0; 3568 3569 spin_lock_init(&hctx->dispatch_wait_lock); 3570 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); 3571 INIT_LIST_HEAD(&hctx->dispatch_wait.entry); 3572 3573 hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp); 3574 if (!hctx->fq) 3575 goto free_bitmap; 3576 3577 blk_mq_hctx_kobj_init(hctx); 3578 3579 return hctx; 3580 3581 free_bitmap: 3582 sbitmap_free(&hctx->ctx_map); 3583 free_ctxs: 3584 kfree(hctx->ctxs); 3585 free_cpumask: 3586 free_cpumask_var(hctx->cpumask); 3587 free_hctx: 3588 kfree(hctx); 3589 fail_alloc_hctx: 3590 return NULL; 3591 } 3592 3593 static void blk_mq_init_cpu_queues(struct request_queue *q, 3594 unsigned int nr_hw_queues) 3595 { 3596 struct blk_mq_tag_set *set = q->tag_set; 3597 unsigned int i, j; 3598 3599 for_each_possible_cpu(i) { 3600 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); 3601 struct blk_mq_hw_ctx *hctx; 3602 int k; 3603 3604 __ctx->cpu = i; 3605 spin_lock_init(&__ctx->lock); 3606 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++) 3607 INIT_LIST_HEAD(&__ctx->rq_lists[k]); 3608 3609 __ctx->queue = q; 3610 3611 /* 3612 * Set local node, IFF we have more than one hw queue. If 3613 * not, we remain on the home node of the device 3614 */ 3615 for (j = 0; j < set->nr_maps; j++) { 3616 hctx = blk_mq_map_queue_type(q, j, i); 3617 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) 3618 hctx->numa_node = cpu_to_node(i); 3619 } 3620 } 3621 } 3622 3623 struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, 3624 unsigned int hctx_idx, 3625 unsigned int depth) 3626 { 3627 struct blk_mq_tags *tags; 3628 int ret; 3629 3630 tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags); 3631 if (!tags) 3632 return NULL; 3633 3634 ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth); 3635 if (ret) { 3636 blk_mq_free_rq_map(tags); 3637 return NULL; 3638 } 3639 3640 return tags; 3641 } 3642 3643 static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, 3644 int hctx_idx) 3645 { 3646 if (blk_mq_is_shared_tags(set->flags)) { 3647 set->tags[hctx_idx] = set->shared_tags; 3648 3649 return true; 3650 } 3651 3652 set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx, 3653 set->queue_depth); 3654 3655 return set->tags[hctx_idx]; 3656 } 3657 3658 void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, 3659 struct blk_mq_tags *tags, 3660 unsigned int hctx_idx) 3661 { 3662 if (tags) { 3663 blk_mq_free_rqs(set, tags, hctx_idx); 3664 blk_mq_free_rq_map(tags); 3665 } 3666 } 3667 3668 static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, 3669 unsigned int hctx_idx) 3670 { 3671 if (!blk_mq_is_shared_tags(set->flags)) 3672 blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx); 3673 3674 set->tags[hctx_idx] = NULL; 3675 } 3676 3677 static void blk_mq_map_swqueue(struct request_queue *q) 3678 { 3679 unsigned int j, hctx_idx; 3680 unsigned long i; 3681 struct blk_mq_hw_ctx *hctx; 3682 struct blk_mq_ctx *ctx; 3683 struct blk_mq_tag_set *set = q->tag_set; 3684 3685 queue_for_each_hw_ctx(q, hctx, i) { 3686 cpumask_clear(hctx->cpumask); 3687 hctx->nr_ctx = 0; 3688 hctx->dispatch_from = NULL; 3689 } 3690 3691 /* 3692 * Map software to hardware queues. 3693 * 3694 * If the cpu isn't present, the cpu is mapped to first hctx. 3695 */ 3696 for_each_possible_cpu(i) { 3697 3698 ctx = per_cpu_ptr(q->queue_ctx, i); 3699 for (j = 0; j < set->nr_maps; j++) { 3700 if (!set->map[j].nr_queues) { 3701 ctx->hctxs[j] = blk_mq_map_queue_type(q, 3702 HCTX_TYPE_DEFAULT, i); 3703 continue; 3704 } 3705 hctx_idx = set->map[j].mq_map[i]; 3706 /* unmapped hw queue can be remapped after CPU topo changed */ 3707 if (!set->tags[hctx_idx] && 3708 !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) { 3709 /* 3710 * If tags initialization fail for some hctx, 3711 * that hctx won't be brought online. In this 3712 * case, remap the current ctx to hctx[0] which 3713 * is guaranteed to always have tags allocated 3714 */ 3715 set->map[j].mq_map[i] = 0; 3716 } 3717 3718 hctx = blk_mq_map_queue_type(q, j, i); 3719 ctx->hctxs[j] = hctx; 3720 /* 3721 * If the CPU is already set in the mask, then we've 3722 * mapped this one already. This can happen if 3723 * devices share queues across queue maps. 3724 */ 3725 if (cpumask_test_cpu(i, hctx->cpumask)) 3726 continue; 3727 3728 cpumask_set_cpu(i, hctx->cpumask); 3729 hctx->type = j; 3730 ctx->index_hw[hctx->type] = hctx->nr_ctx; 3731 hctx->ctxs[hctx->nr_ctx++] = ctx; 3732 3733 /* 3734 * If the nr_ctx type overflows, we have exceeded the 3735 * amount of sw queues we can support. 3736 */ 3737 BUG_ON(!hctx->nr_ctx); 3738 } 3739 3740 for (; j < HCTX_MAX_TYPES; j++) 3741 ctx->hctxs[j] = blk_mq_map_queue_type(q, 3742 HCTX_TYPE_DEFAULT, i); 3743 } 3744 3745 queue_for_each_hw_ctx(q, hctx, i) { 3746 /* 3747 * If no software queues are mapped to this hardware queue, 3748 * disable it and free the request entries. 3749 */ 3750 if (!hctx->nr_ctx) { 3751 /* Never unmap queue 0. We need it as a 3752 * fallback in case of a new remap fails 3753 * allocation 3754 */ 3755 if (i) 3756 __blk_mq_free_map_and_rqs(set, i); 3757 3758 hctx->tags = NULL; 3759 continue; 3760 } 3761 3762 hctx->tags = set->tags[i]; 3763 WARN_ON(!hctx->tags); 3764 3765 /* 3766 * Set the map size to the number of mapped software queues. 3767 * This is more accurate and more efficient than looping 3768 * over all possibly mapped software queues. 3769 */ 3770 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx); 3771 3772 /* 3773 * Initialize batch roundrobin counts 3774 */ 3775 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); 3776 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 3777 } 3778 } 3779 3780 /* 3781 * Caller needs to ensure that we're either frozen/quiesced, or that 3782 * the queue isn't live yet. 3783 */ 3784 static void queue_set_hctx_shared(struct request_queue *q, bool shared) 3785 { 3786 struct blk_mq_hw_ctx *hctx; 3787 unsigned long i; 3788 3789 queue_for_each_hw_ctx(q, hctx, i) { 3790 if (shared) { 3791 hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; 3792 } else { 3793 blk_mq_tag_idle(hctx); 3794 hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; 3795 } 3796 } 3797 } 3798 3799 static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set, 3800 bool shared) 3801 { 3802 struct request_queue *q; 3803 3804 lockdep_assert_held(&set->tag_list_lock); 3805 3806 list_for_each_entry(q, &set->tag_list, tag_set_list) { 3807 blk_mq_freeze_queue(q); 3808 queue_set_hctx_shared(q, shared); 3809 blk_mq_unfreeze_queue(q); 3810 } 3811 } 3812 3813 static void blk_mq_del_queue_tag_set(struct request_queue *q) 3814 { 3815 struct blk_mq_tag_set *set = q->tag_set; 3816 3817 mutex_lock(&set->tag_list_lock); 3818 list_del(&q->tag_set_list); 3819 if (list_is_singular(&set->tag_list)) { 3820 /* just transitioned to unshared */ 3821 set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; 3822 /* update existing queue */ 3823 blk_mq_update_tag_set_shared(set, false); 3824 } 3825 mutex_unlock(&set->tag_list_lock); 3826 INIT_LIST_HEAD(&q->tag_set_list); 3827 } 3828 3829 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, 3830 struct request_queue *q) 3831 { 3832 mutex_lock(&set->tag_list_lock); 3833 3834 /* 3835 * Check to see if we're transitioning to shared (from 1 to 2 queues). 3836 */ 3837 if (!list_empty(&set->tag_list) && 3838 !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { 3839 set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; 3840 /* update existing queue */ 3841 blk_mq_update_tag_set_shared(set, true); 3842 } 3843 if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED) 3844 queue_set_hctx_shared(q, true); 3845 list_add_tail(&q->tag_set_list, &set->tag_list); 3846 3847 mutex_unlock(&set->tag_list_lock); 3848 } 3849 3850 /* All allocations will be freed in release handler of q->mq_kobj */ 3851 static int blk_mq_alloc_ctxs(struct request_queue *q) 3852 { 3853 struct blk_mq_ctxs *ctxs; 3854 int cpu; 3855 3856 ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL); 3857 if (!ctxs) 3858 return -ENOMEM; 3859 3860 ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx); 3861 if (!ctxs->queue_ctx) 3862 goto fail; 3863 3864 for_each_possible_cpu(cpu) { 3865 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu); 3866 ctx->ctxs = ctxs; 3867 } 3868 3869 q->mq_kobj = &ctxs->kobj; 3870 q->queue_ctx = ctxs->queue_ctx; 3871 3872 return 0; 3873 fail: 3874 kfree(ctxs); 3875 return -ENOMEM; 3876 } 3877 3878 /* 3879 * It is the actual release handler for mq, but we do it from 3880 * request queue's release handler for avoiding use-after-free 3881 * and headache because q->mq_kobj shouldn't have been introduced, 3882 * but we can't group ctx/kctx kobj without it. 3883 */ 3884 void blk_mq_release(struct request_queue *q) 3885 { 3886 struct blk_mq_hw_ctx *hctx, *next; 3887 unsigned long i; 3888 3889 queue_for_each_hw_ctx(q, hctx, i) 3890 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list)); 3891 3892 /* all hctx are in .unused_hctx_list now */ 3893 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) { 3894 list_del_init(&hctx->hctx_list); 3895 kobject_put(&hctx->kobj); 3896 } 3897 3898 xa_destroy(&q->hctx_table); 3899 3900 /* 3901 * release .mq_kobj and sw queue's kobject now because 3902 * both share lifetime with request queue. 3903 */ 3904 blk_mq_sysfs_deinit(q); 3905 } 3906 3907 static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set, 3908 void *queuedata) 3909 { 3910 struct request_queue *q; 3911 int ret; 3912 3913 q = blk_alloc_queue(set->numa_node, set->flags & BLK_MQ_F_BLOCKING); 3914 if (!q) 3915 return ERR_PTR(-ENOMEM); 3916 q->queuedata = queuedata; 3917 ret = blk_mq_init_allocated_queue(set, q); 3918 if (ret) { 3919 blk_cleanup_queue(q); 3920 return ERR_PTR(ret); 3921 } 3922 return q; 3923 } 3924 3925 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) 3926 { 3927 return blk_mq_init_queue_data(set, NULL); 3928 } 3929 EXPORT_SYMBOL(blk_mq_init_queue); 3930 3931 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata, 3932 struct lock_class_key *lkclass) 3933 { 3934 struct request_queue *q; 3935 struct gendisk *disk; 3936 3937 q = blk_mq_init_queue_data(set, queuedata); 3938 if (IS_ERR(q)) 3939 return ERR_CAST(q); 3940 3941 disk = __alloc_disk_node(q, set->numa_node, lkclass); 3942 if (!disk) { 3943 blk_cleanup_queue(q); 3944 return ERR_PTR(-ENOMEM); 3945 } 3946 return disk; 3947 } 3948 EXPORT_SYMBOL(__blk_mq_alloc_disk); 3949 3950 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx( 3951 struct blk_mq_tag_set *set, struct request_queue *q, 3952 int hctx_idx, int node) 3953 { 3954 struct blk_mq_hw_ctx *hctx = NULL, *tmp; 3955 3956 /* reuse dead hctx first */ 3957 spin_lock(&q->unused_hctx_lock); 3958 list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) { 3959 if (tmp->numa_node == node) { 3960 hctx = tmp; 3961 break; 3962 } 3963 } 3964 if (hctx) 3965 list_del_init(&hctx->hctx_list); 3966 spin_unlock(&q->unused_hctx_lock); 3967 3968 if (!hctx) 3969 hctx = blk_mq_alloc_hctx(q, set, node); 3970 if (!hctx) 3971 goto fail; 3972 3973 if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) 3974 goto free_hctx; 3975 3976 return hctx; 3977 3978 free_hctx: 3979 kobject_put(&hctx->kobj); 3980 fail: 3981 return NULL; 3982 } 3983 3984 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, 3985 struct request_queue *q) 3986 { 3987 struct blk_mq_hw_ctx *hctx; 3988 unsigned long i, j; 3989 3990 /* protect against switching io scheduler */ 3991 mutex_lock(&q->sysfs_lock); 3992 for (i = 0; i < set->nr_hw_queues; i++) { 3993 int old_node; 3994 int node = blk_mq_get_hctx_node(set, i); 3995 struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i); 3996 3997 if (old_hctx) { 3998 old_node = old_hctx->numa_node; 3999 blk_mq_exit_hctx(q, set, old_hctx, i); 4000 } 4001 4002 if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) { 4003 if (!old_hctx) 4004 break; 4005 pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n", 4006 node, old_node); 4007 hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node); 4008 WARN_ON_ONCE(!hctx); 4009 } 4010 } 4011 /* 4012 * Increasing nr_hw_queues fails. Free the newly allocated 4013 * hctxs and keep the previous q->nr_hw_queues. 4014 */ 4015 if (i != set->nr_hw_queues) { 4016 j = q->nr_hw_queues; 4017 } else { 4018 j = i; 4019 q->nr_hw_queues = set->nr_hw_queues; 4020 } 4021 4022 xa_for_each_start(&q->hctx_table, j, hctx, j) 4023 blk_mq_exit_hctx(q, set, hctx, j); 4024 mutex_unlock(&q->sysfs_lock); 4025 } 4026 4027 static void blk_mq_update_poll_flag(struct request_queue *q) 4028 { 4029 struct blk_mq_tag_set *set = q->tag_set; 4030 4031 if (set->nr_maps > HCTX_TYPE_POLL && 4032 set->map[HCTX_TYPE_POLL].nr_queues) 4033 blk_queue_flag_set(QUEUE_FLAG_POLL, q); 4034 else 4035 blk_queue_flag_clear(QUEUE_FLAG_POLL, q); 4036 } 4037 4038 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 4039 struct request_queue *q) 4040 { 4041 WARN_ON_ONCE(blk_queue_has_srcu(q) != 4042 !!(set->flags & BLK_MQ_F_BLOCKING)); 4043 4044 /* mark the queue as mq asap */ 4045 q->mq_ops = set->ops; 4046 4047 q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn, 4048 blk_mq_poll_stats_bkt, 4049 BLK_MQ_POLL_STATS_BKTS, q); 4050 if (!q->poll_cb) 4051 goto err_exit; 4052 4053 if (blk_mq_alloc_ctxs(q)) 4054 goto err_poll; 4055 4056 /* init q->mq_kobj and sw queues' kobjects */ 4057 blk_mq_sysfs_init(q); 4058 4059 INIT_LIST_HEAD(&q->unused_hctx_list); 4060 spin_lock_init(&q->unused_hctx_lock); 4061 4062 xa_init(&q->hctx_table); 4063 4064 blk_mq_realloc_hw_ctxs(set, q); 4065 if (!q->nr_hw_queues) 4066 goto err_hctxs; 4067 4068 INIT_WORK(&q->timeout_work, blk_mq_timeout_work); 4069 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); 4070 4071 q->tag_set = set; 4072 4073 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; 4074 blk_mq_update_poll_flag(q); 4075 4076 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); 4077 INIT_LIST_HEAD(&q->requeue_list); 4078 spin_lock_init(&q->requeue_lock); 4079 4080 q->nr_requests = set->queue_depth; 4081 4082 /* 4083 * Default to classic polling 4084 */ 4085 q->poll_nsec = BLK_MQ_POLL_CLASSIC; 4086 4087 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 4088 blk_mq_add_queue_tag_set(set, q); 4089 blk_mq_map_swqueue(q); 4090 return 0; 4091 4092 err_hctxs: 4093 xa_destroy(&q->hctx_table); 4094 q->nr_hw_queues = 0; 4095 blk_mq_sysfs_deinit(q); 4096 err_poll: 4097 blk_stat_free_callback(q->poll_cb); 4098 q->poll_cb = NULL; 4099 err_exit: 4100 q->mq_ops = NULL; 4101 return -ENOMEM; 4102 } 4103 EXPORT_SYMBOL(blk_mq_init_allocated_queue); 4104 4105 /* tags can _not_ be used after returning from blk_mq_exit_queue */ 4106 void blk_mq_exit_queue(struct request_queue *q) 4107 { 4108 struct blk_mq_tag_set *set = q->tag_set; 4109 4110 /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */ 4111 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); 4112 /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */ 4113 blk_mq_del_queue_tag_set(q); 4114 } 4115 4116 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 4117 { 4118 int i; 4119 4120 if (blk_mq_is_shared_tags(set->flags)) { 4121 set->shared_tags = blk_mq_alloc_map_and_rqs(set, 4122 BLK_MQ_NO_HCTX_IDX, 4123 set->queue_depth); 4124 if (!set->shared_tags) 4125 return -ENOMEM; 4126 } 4127 4128 for (i = 0; i < set->nr_hw_queues; i++) { 4129 if (!__blk_mq_alloc_map_and_rqs(set, i)) 4130 goto out_unwind; 4131 cond_resched(); 4132 } 4133 4134 return 0; 4135 4136 out_unwind: 4137 while (--i >= 0) 4138 __blk_mq_free_map_and_rqs(set, i); 4139 4140 if (blk_mq_is_shared_tags(set->flags)) { 4141 blk_mq_free_map_and_rqs(set, set->shared_tags, 4142 BLK_MQ_NO_HCTX_IDX); 4143 } 4144 4145 return -ENOMEM; 4146 } 4147 4148 /* 4149 * Allocate the request maps associated with this tag_set. Note that this 4150 * may reduce the depth asked for, if memory is tight. set->queue_depth 4151 * will be updated to reflect the allocated depth. 4152 */ 4153 static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set) 4154 { 4155 unsigned int depth; 4156 int err; 4157 4158 depth = set->queue_depth; 4159 do { 4160 err = __blk_mq_alloc_rq_maps(set); 4161 if (!err) 4162 break; 4163 4164 set->queue_depth >>= 1; 4165 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { 4166 err = -ENOMEM; 4167 break; 4168 } 4169 } while (set->queue_depth); 4170 4171 if (!set->queue_depth || err) { 4172 pr_err("blk-mq: failed to allocate request map\n"); 4173 return -ENOMEM; 4174 } 4175 4176 if (depth != set->queue_depth) 4177 pr_info("blk-mq: reduced tag depth (%u -> %u)\n", 4178 depth, set->queue_depth); 4179 4180 return 0; 4181 } 4182 4183 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) 4184 { 4185 /* 4186 * blk_mq_map_queues() and multiple .map_queues() implementations 4187 * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the 4188 * number of hardware queues. 4189 */ 4190 if (set->nr_maps == 1) 4191 set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues; 4192 4193 if (set->ops->map_queues && !is_kdump_kernel()) { 4194 int i; 4195 4196 /* 4197 * transport .map_queues is usually done in the following 4198 * way: 4199 * 4200 * for (queue = 0; queue < set->nr_hw_queues; queue++) { 4201 * mask = get_cpu_mask(queue) 4202 * for_each_cpu(cpu, mask) 4203 * set->map[x].mq_map[cpu] = queue; 4204 * } 4205 * 4206 * When we need to remap, the table has to be cleared for 4207 * killing stale mapping since one CPU may not be mapped 4208 * to any hw queue. 4209 */ 4210 for (i = 0; i < set->nr_maps; i++) 4211 blk_mq_clear_mq_map(&set->map[i]); 4212 4213 return set->ops->map_queues(set); 4214 } else { 4215 BUG_ON(set->nr_maps > 1); 4216 return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); 4217 } 4218 } 4219 4220 static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set, 4221 int cur_nr_hw_queues, int new_nr_hw_queues) 4222 { 4223 struct blk_mq_tags **new_tags; 4224 4225 if (cur_nr_hw_queues >= new_nr_hw_queues) 4226 return 0; 4227 4228 new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *), 4229 GFP_KERNEL, set->numa_node); 4230 if (!new_tags) 4231 return -ENOMEM; 4232 4233 if (set->tags) 4234 memcpy(new_tags, set->tags, cur_nr_hw_queues * 4235 sizeof(*set->tags)); 4236 kfree(set->tags); 4237 set->tags = new_tags; 4238 set->nr_hw_queues = new_nr_hw_queues; 4239 4240 return 0; 4241 } 4242 4243 static int blk_mq_alloc_tag_set_tags(struct blk_mq_tag_set *set, 4244 int new_nr_hw_queues) 4245 { 4246 return blk_mq_realloc_tag_set_tags(set, 0, new_nr_hw_queues); 4247 } 4248 4249 /* 4250 * Alloc a tag set to be associated with one or more request queues. 4251 * May fail with EINVAL for various error conditions. May adjust the 4252 * requested depth down, if it's too large. In that case, the set 4253 * value will be stored in set->queue_depth. 4254 */ 4255 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) 4256 { 4257 int i, ret; 4258 4259 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS); 4260 4261 if (!set->nr_hw_queues) 4262 return -EINVAL; 4263 if (!set->queue_depth) 4264 return -EINVAL; 4265 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) 4266 return -EINVAL; 4267 4268 if (!set->ops->queue_rq) 4269 return -EINVAL; 4270 4271 if (!set->ops->get_budget ^ !set->ops->put_budget) 4272 return -EINVAL; 4273 4274 if (set->queue_depth > BLK_MQ_MAX_DEPTH) { 4275 pr_info("blk-mq: reduced tag depth to %u\n", 4276 BLK_MQ_MAX_DEPTH); 4277 set->queue_depth = BLK_MQ_MAX_DEPTH; 4278 } 4279 4280 if (!set->nr_maps) 4281 set->nr_maps = 1; 4282 else if (set->nr_maps > HCTX_MAX_TYPES) 4283 return -EINVAL; 4284 4285 /* 4286 * If a crashdump is active, then we are potentially in a very 4287 * memory constrained environment. Limit us to 1 queue and 4288 * 64 tags to prevent using too much memory. 4289 */ 4290 if (is_kdump_kernel()) { 4291 set->nr_hw_queues = 1; 4292 set->nr_maps = 1; 4293 set->queue_depth = min(64U, set->queue_depth); 4294 } 4295 /* 4296 * There is no use for more h/w queues than cpus if we just have 4297 * a single map 4298 */ 4299 if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids) 4300 set->nr_hw_queues = nr_cpu_ids; 4301 4302 if (blk_mq_alloc_tag_set_tags(set, set->nr_hw_queues) < 0) 4303 return -ENOMEM; 4304 4305 ret = -ENOMEM; 4306 for (i = 0; i < set->nr_maps; i++) { 4307 set->map[i].mq_map = kcalloc_node(nr_cpu_ids, 4308 sizeof(set->map[i].mq_map[0]), 4309 GFP_KERNEL, set->numa_node); 4310 if (!set->map[i].mq_map) 4311 goto out_free_mq_map; 4312 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues; 4313 } 4314 4315 ret = blk_mq_update_queue_map(set); 4316 if (ret) 4317 goto out_free_mq_map; 4318 4319 ret = blk_mq_alloc_set_map_and_rqs(set); 4320 if (ret) 4321 goto out_free_mq_map; 4322 4323 mutex_init(&set->tag_list_lock); 4324 INIT_LIST_HEAD(&set->tag_list); 4325 4326 return 0; 4327 4328 out_free_mq_map: 4329 for (i = 0; i < set->nr_maps; i++) { 4330 kfree(set->map[i].mq_map); 4331 set->map[i].mq_map = NULL; 4332 } 4333 kfree(set->tags); 4334 set->tags = NULL; 4335 return ret; 4336 } 4337 EXPORT_SYMBOL(blk_mq_alloc_tag_set); 4338 4339 /* allocate and initialize a tagset for a simple single-queue device */ 4340 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set, 4341 const struct blk_mq_ops *ops, unsigned int queue_depth, 4342 unsigned int set_flags) 4343 { 4344 memset(set, 0, sizeof(*set)); 4345 set->ops = ops; 4346 set->nr_hw_queues = 1; 4347 set->nr_maps = 1; 4348 set->queue_depth = queue_depth; 4349 set->numa_node = NUMA_NO_NODE; 4350 set->flags = set_flags; 4351 return blk_mq_alloc_tag_set(set); 4352 } 4353 EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set); 4354 4355 void blk_mq_free_tag_set(struct blk_mq_tag_set *set) 4356 { 4357 int i, j; 4358 4359 for (i = 0; i < set->nr_hw_queues; i++) 4360 __blk_mq_free_map_and_rqs(set, i); 4361 4362 if (blk_mq_is_shared_tags(set->flags)) { 4363 blk_mq_free_map_and_rqs(set, set->shared_tags, 4364 BLK_MQ_NO_HCTX_IDX); 4365 } 4366 4367 for (j = 0; j < set->nr_maps; j++) { 4368 kfree(set->map[j].mq_map); 4369 set->map[j].mq_map = NULL; 4370 } 4371 4372 kfree(set->tags); 4373 set->tags = NULL; 4374 } 4375 EXPORT_SYMBOL(blk_mq_free_tag_set); 4376 4377 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) 4378 { 4379 struct blk_mq_tag_set *set = q->tag_set; 4380 struct blk_mq_hw_ctx *hctx; 4381 int ret; 4382 unsigned long i; 4383 4384 if (!set) 4385 return -EINVAL; 4386 4387 if (q->nr_requests == nr) 4388 return 0; 4389 4390 blk_mq_freeze_queue(q); 4391 blk_mq_quiesce_queue(q); 4392 4393 ret = 0; 4394 queue_for_each_hw_ctx(q, hctx, i) { 4395 if (!hctx->tags) 4396 continue; 4397 /* 4398 * If we're using an MQ scheduler, just update the scheduler 4399 * queue depth. This is similar to what the old code would do. 4400 */ 4401 if (hctx->sched_tags) { 4402 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, 4403 nr, true); 4404 } else { 4405 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, 4406 false); 4407 } 4408 if (ret) 4409 break; 4410 if (q->elevator && q->elevator->type->ops.depth_updated) 4411 q->elevator->type->ops.depth_updated(hctx); 4412 } 4413 if (!ret) { 4414 q->nr_requests = nr; 4415 if (blk_mq_is_shared_tags(set->flags)) { 4416 if (q->elevator) 4417 blk_mq_tag_update_sched_shared_tags(q); 4418 else 4419 blk_mq_tag_resize_shared_tags(set, nr); 4420 } 4421 } 4422 4423 blk_mq_unquiesce_queue(q); 4424 blk_mq_unfreeze_queue(q); 4425 4426 return ret; 4427 } 4428 4429 /* 4430 * request_queue and elevator_type pair. 4431 * It is just used by __blk_mq_update_nr_hw_queues to cache 4432 * the elevator_type associated with a request_queue. 4433 */ 4434 struct blk_mq_qe_pair { 4435 struct list_head node; 4436 struct request_queue *q; 4437 struct elevator_type *type; 4438 }; 4439 4440 /* 4441 * Cache the elevator_type in qe pair list and switch the 4442 * io scheduler to 'none' 4443 */ 4444 static bool blk_mq_elv_switch_none(struct list_head *head, 4445 struct request_queue *q) 4446 { 4447 struct blk_mq_qe_pair *qe; 4448 4449 if (!q->elevator) 4450 return true; 4451 4452 qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY); 4453 if (!qe) 4454 return false; 4455 4456 INIT_LIST_HEAD(&qe->node); 4457 qe->q = q; 4458 qe->type = q->elevator->type; 4459 list_add(&qe->node, head); 4460 4461 mutex_lock(&q->sysfs_lock); 4462 /* 4463 * After elevator_switch_mq, the previous elevator_queue will be 4464 * released by elevator_release. The reference of the io scheduler 4465 * module get by elevator_get will also be put. So we need to get 4466 * a reference of the io scheduler module here to prevent it to be 4467 * removed. 4468 */ 4469 __module_get(qe->type->elevator_owner); 4470 elevator_switch_mq(q, NULL); 4471 mutex_unlock(&q->sysfs_lock); 4472 4473 return true; 4474 } 4475 4476 static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head, 4477 struct request_queue *q) 4478 { 4479 struct blk_mq_qe_pair *qe; 4480 4481 list_for_each_entry(qe, head, node) 4482 if (qe->q == q) 4483 return qe; 4484 4485 return NULL; 4486 } 4487 4488 static void blk_mq_elv_switch_back(struct list_head *head, 4489 struct request_queue *q) 4490 { 4491 struct blk_mq_qe_pair *qe; 4492 struct elevator_type *t; 4493 4494 qe = blk_lookup_qe_pair(head, q); 4495 if (!qe) 4496 return; 4497 t = qe->type; 4498 list_del(&qe->node); 4499 kfree(qe); 4500 4501 mutex_lock(&q->sysfs_lock); 4502 elevator_switch_mq(q, t); 4503 mutex_unlock(&q->sysfs_lock); 4504 } 4505 4506 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, 4507 int nr_hw_queues) 4508 { 4509 struct request_queue *q; 4510 LIST_HEAD(head); 4511 int prev_nr_hw_queues; 4512 4513 lockdep_assert_held(&set->tag_list_lock); 4514 4515 if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids) 4516 nr_hw_queues = nr_cpu_ids; 4517 if (nr_hw_queues < 1) 4518 return; 4519 if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues) 4520 return; 4521 4522 list_for_each_entry(q, &set->tag_list, tag_set_list) 4523 blk_mq_freeze_queue(q); 4524 /* 4525 * Switch IO scheduler to 'none', cleaning up the data associated 4526 * with the previous scheduler. We will switch back once we are done 4527 * updating the new sw to hw queue mappings. 4528 */ 4529 list_for_each_entry(q, &set->tag_list, tag_set_list) 4530 if (!blk_mq_elv_switch_none(&head, q)) 4531 goto switch_back; 4532 4533 list_for_each_entry(q, &set->tag_list, tag_set_list) { 4534 blk_mq_debugfs_unregister_hctxs(q); 4535 blk_mq_sysfs_unregister(q); 4536 } 4537 4538 prev_nr_hw_queues = set->nr_hw_queues; 4539 if (blk_mq_realloc_tag_set_tags(set, set->nr_hw_queues, nr_hw_queues) < 4540 0) 4541 goto reregister; 4542 4543 set->nr_hw_queues = nr_hw_queues; 4544 fallback: 4545 blk_mq_update_queue_map(set); 4546 list_for_each_entry(q, &set->tag_list, tag_set_list) { 4547 blk_mq_realloc_hw_ctxs(set, q); 4548 blk_mq_update_poll_flag(q); 4549 if (q->nr_hw_queues != set->nr_hw_queues) { 4550 int i = prev_nr_hw_queues; 4551 4552 pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n", 4553 nr_hw_queues, prev_nr_hw_queues); 4554 for (; i < set->nr_hw_queues; i++) 4555 __blk_mq_free_map_and_rqs(set, i); 4556 4557 set->nr_hw_queues = prev_nr_hw_queues; 4558 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); 4559 goto fallback; 4560 } 4561 blk_mq_map_swqueue(q); 4562 } 4563 4564 reregister: 4565 list_for_each_entry(q, &set->tag_list, tag_set_list) { 4566 blk_mq_sysfs_register(q); 4567 blk_mq_debugfs_register_hctxs(q); 4568 } 4569 4570 switch_back: 4571 list_for_each_entry(q, &set->tag_list, tag_set_list) 4572 blk_mq_elv_switch_back(&head, q); 4573 4574 list_for_each_entry(q, &set->tag_list, tag_set_list) 4575 blk_mq_unfreeze_queue(q); 4576 } 4577 4578 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) 4579 { 4580 mutex_lock(&set->tag_list_lock); 4581 __blk_mq_update_nr_hw_queues(set, nr_hw_queues); 4582 mutex_unlock(&set->tag_list_lock); 4583 } 4584 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); 4585 4586 /* Enable polling stats and return whether they were already enabled. */ 4587 static bool blk_poll_stats_enable(struct request_queue *q) 4588 { 4589 if (q->poll_stat) 4590 return true; 4591 4592 return blk_stats_alloc_enable(q); 4593 } 4594 4595 static void blk_mq_poll_stats_start(struct request_queue *q) 4596 { 4597 /* 4598 * We don't arm the callback if polling stats are not enabled or the 4599 * callback is already active. 4600 */ 4601 if (!q->poll_stat || blk_stat_is_active(q->poll_cb)) 4602 return; 4603 4604 blk_stat_activate_msecs(q->poll_cb, 100); 4605 } 4606 4607 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb) 4608 { 4609 struct request_queue *q = cb->data; 4610 int bucket; 4611 4612 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) { 4613 if (cb->stat[bucket].nr_samples) 4614 q->poll_stat[bucket] = cb->stat[bucket]; 4615 } 4616 } 4617 4618 static unsigned long blk_mq_poll_nsecs(struct request_queue *q, 4619 struct request *rq) 4620 { 4621 unsigned long ret = 0; 4622 int bucket; 4623 4624 /* 4625 * If stats collection isn't on, don't sleep but turn it on for 4626 * future users 4627 */ 4628 if (!blk_poll_stats_enable(q)) 4629 return 0; 4630 4631 /* 4632 * As an optimistic guess, use half of the mean service time 4633 * for this type of request. We can (and should) make this smarter. 4634 * For instance, if the completion latencies are tight, we can 4635 * get closer than just half the mean. This is especially 4636 * important on devices where the completion latencies are longer 4637 * than ~10 usec. We do use the stats for the relevant IO size 4638 * if available which does lead to better estimates. 4639 */ 4640 bucket = blk_mq_poll_stats_bkt(rq); 4641 if (bucket < 0) 4642 return ret; 4643 4644 if (q->poll_stat[bucket].nr_samples) 4645 ret = (q->poll_stat[bucket].mean + 1) / 2; 4646 4647 return ret; 4648 } 4649 4650 static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc) 4651 { 4652 struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, qc); 4653 struct request *rq = blk_qc_to_rq(hctx, qc); 4654 struct hrtimer_sleeper hs; 4655 enum hrtimer_mode mode; 4656 unsigned int nsecs; 4657 ktime_t kt; 4658 4659 /* 4660 * If a request has completed on queue that uses an I/O scheduler, we 4661 * won't get back a request from blk_qc_to_rq. 4662 */ 4663 if (!rq || (rq->rq_flags & RQF_MQ_POLL_SLEPT)) 4664 return false; 4665 4666 /* 4667 * If we get here, hybrid polling is enabled. Hence poll_nsec can be: 4668 * 4669 * 0: use half of prev avg 4670 * >0: use this specific value 4671 */ 4672 if (q->poll_nsec > 0) 4673 nsecs = q->poll_nsec; 4674 else 4675 nsecs = blk_mq_poll_nsecs(q, rq); 4676 4677 if (!nsecs) 4678 return false; 4679 4680 rq->rq_flags |= RQF_MQ_POLL_SLEPT; 4681 4682 /* 4683 * This will be replaced with the stats tracking code, using 4684 * 'avg_completion_time / 2' as the pre-sleep target. 4685 */ 4686 kt = nsecs; 4687 4688 mode = HRTIMER_MODE_REL; 4689 hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode); 4690 hrtimer_set_expires(&hs.timer, kt); 4691 4692 do { 4693 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE) 4694 break; 4695 set_current_state(TASK_UNINTERRUPTIBLE); 4696 hrtimer_sleeper_start_expires(&hs, mode); 4697 if (hs.task) 4698 io_schedule(); 4699 hrtimer_cancel(&hs.timer); 4700 mode = HRTIMER_MODE_ABS; 4701 } while (hs.task && !signal_pending(current)); 4702 4703 __set_current_state(TASK_RUNNING); 4704 destroy_hrtimer_on_stack(&hs.timer); 4705 4706 /* 4707 * If we sleep, have the caller restart the poll loop to reset the 4708 * state. Like for the other success return cases, the caller is 4709 * responsible for checking if the IO completed. If the IO isn't 4710 * complete, we'll get called again and will go straight to the busy 4711 * poll loop. 4712 */ 4713 return true; 4714 } 4715 4716 static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, 4717 struct io_comp_batch *iob, unsigned int flags) 4718 { 4719 struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie); 4720 long state = get_current_state(); 4721 int ret; 4722 4723 do { 4724 ret = q->mq_ops->poll(hctx, iob); 4725 if (ret > 0) { 4726 __set_current_state(TASK_RUNNING); 4727 return ret; 4728 } 4729 4730 if (signal_pending_state(state, current)) 4731 __set_current_state(TASK_RUNNING); 4732 if (task_is_running(current)) 4733 return 1; 4734 4735 if (ret < 0 || (flags & BLK_POLL_ONESHOT)) 4736 break; 4737 cpu_relax(); 4738 } while (!need_resched()); 4739 4740 __set_current_state(TASK_RUNNING); 4741 return 0; 4742 } 4743 4744 int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob, 4745 unsigned int flags) 4746 { 4747 if (!(flags & BLK_POLL_NOSLEEP) && 4748 q->poll_nsec != BLK_MQ_POLL_CLASSIC) { 4749 if (blk_mq_poll_hybrid(q, cookie)) 4750 return 1; 4751 } 4752 return blk_mq_poll_classic(q, cookie, iob, flags); 4753 } 4754 4755 unsigned int blk_mq_rq_cpu(struct request *rq) 4756 { 4757 return rq->mq_ctx->cpu; 4758 } 4759 EXPORT_SYMBOL(blk_mq_rq_cpu); 4760 4761 void blk_mq_cancel_work_sync(struct request_queue *q) 4762 { 4763 if (queue_is_mq(q)) { 4764 struct blk_mq_hw_ctx *hctx; 4765 unsigned long i; 4766 4767 cancel_delayed_work_sync(&q->requeue_work); 4768 4769 queue_for_each_hw_ctx(q, hctx, i) 4770 cancel_delayed_work_sync(&hctx->run_work); 4771 } 4772 } 4773 4774 static int __init blk_mq_init(void) 4775 { 4776 int i; 4777 4778 for_each_possible_cpu(i) 4779 init_llist_head(&per_cpu(blk_cpu_done, i)); 4780 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); 4781 4782 cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, 4783 "block/softirq:dead", NULL, 4784 blk_softirq_cpu_dead); 4785 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, 4786 blk_mq_hctx_notify_dead); 4787 cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online", 4788 blk_mq_hctx_notify_online, 4789 blk_mq_hctx_notify_offline); 4790 return 0; 4791 } 4792 subsys_initcall(blk_mq_init); 4793