1 /* 2 * Block multiqueue core code 3 * 4 * Copyright (C) 2013-2014 Jens Axboe 5 * Copyright (C) 2013-2014 Christoph Hellwig 6 */ 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/backing-dev.h> 10 #include <linux/bio.h> 11 #include <linux/blkdev.h> 12 #include <linux/kmemleak.h> 13 #include <linux/mm.h> 14 #include <linux/init.h> 15 #include <linux/slab.h> 16 #include <linux/workqueue.h> 17 #include <linux/smp.h> 18 #include <linux/llist.h> 19 #include <linux/list_sort.h> 20 #include <linux/cpu.h> 21 #include <linux/cache.h> 22 #include <linux/sched/sysctl.h> 23 #include <linux/delay.h> 24 #include <linux/crash_dump.h> 25 #include <linux/prefetch.h> 26 27 #include <trace/events/block.h> 28 29 #include <linux/blk-mq.h> 30 #include "blk.h" 31 #include "blk-mq.h" 32 #include "blk-mq-tag.h" 33 #include "blk-stat.h" 34 #include "blk-wbt.h" 35 #include "blk-mq-sched.h" 36 37 static DEFINE_MUTEX(all_q_mutex); 38 static LIST_HEAD(all_q_list); 39 40 /* 41 * Check if any of the ctx's have pending work in this hardware queue 42 */ 43 bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) 44 { 45 return sbitmap_any_bit_set(&hctx->ctx_map) || 46 !list_empty_careful(&hctx->dispatch) || 47 blk_mq_sched_has_work(hctx); 48 } 49 50 /* 51 * Mark this ctx as having pending work in this hardware queue 52 */ 53 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, 54 struct blk_mq_ctx *ctx) 55 { 56 if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw)) 57 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw); 58 } 59 60 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, 61 struct blk_mq_ctx *ctx) 62 { 63 sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw); 64 } 65 66 void blk_mq_freeze_queue_start(struct request_queue *q) 67 { 68 int freeze_depth; 69 70 freeze_depth = atomic_inc_return(&q->mq_freeze_depth); 71 if (freeze_depth == 1) { 72 percpu_ref_kill(&q->q_usage_counter); 73 blk_mq_run_hw_queues(q, false); 74 } 75 } 76 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start); 77 78 static void blk_mq_freeze_queue_wait(struct request_queue *q) 79 { 80 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); 81 } 82 83 /* 84 * Guarantee no request is in use, so we can change any data structure of 85 * the queue afterward. 86 */ 87 void blk_freeze_queue(struct request_queue *q) 88 { 89 /* 90 * In the !blk_mq case we are only calling this to kill the 91 * q_usage_counter, otherwise this increases the freeze depth 92 * and waits for it to return to zero. For this reason there is 93 * no blk_unfreeze_queue(), and blk_freeze_queue() is not 94 * exported to drivers as the only user for unfreeze is blk_mq. 95 */ 96 blk_mq_freeze_queue_start(q); 97 blk_mq_freeze_queue_wait(q); 98 } 99 100 void blk_mq_freeze_queue(struct request_queue *q) 101 { 102 /* 103 * ...just an alias to keep freeze and unfreeze actions balanced 104 * in the blk_mq_* namespace 105 */ 106 blk_freeze_queue(q); 107 } 108 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); 109 110 void blk_mq_unfreeze_queue(struct request_queue *q) 111 { 112 int freeze_depth; 113 114 freeze_depth = atomic_dec_return(&q->mq_freeze_depth); 115 WARN_ON_ONCE(freeze_depth < 0); 116 if (!freeze_depth) { 117 percpu_ref_reinit(&q->q_usage_counter); 118 wake_up_all(&q->mq_freeze_wq); 119 } 120 } 121 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); 122 123 /** 124 * blk_mq_quiesce_queue() - wait until all ongoing queue_rq calls have finished 125 * @q: request queue. 126 * 127 * Note: this function does not prevent that the struct request end_io() 128 * callback function is invoked. Additionally, it is not prevented that 129 * new queue_rq() calls occur unless the queue has been stopped first. 130 */ 131 void blk_mq_quiesce_queue(struct request_queue *q) 132 { 133 struct blk_mq_hw_ctx *hctx; 134 unsigned int i; 135 bool rcu = false; 136 137 blk_mq_stop_hw_queues(q); 138 139 queue_for_each_hw_ctx(q, hctx, i) { 140 if (hctx->flags & BLK_MQ_F_BLOCKING) 141 synchronize_srcu(&hctx->queue_rq_srcu); 142 else 143 rcu = true; 144 } 145 if (rcu) 146 synchronize_rcu(); 147 } 148 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); 149 150 void blk_mq_wake_waiters(struct request_queue *q) 151 { 152 struct blk_mq_hw_ctx *hctx; 153 unsigned int i; 154 155 queue_for_each_hw_ctx(q, hctx, i) 156 if (blk_mq_hw_queue_mapped(hctx)) 157 blk_mq_tag_wakeup_all(hctx->tags, true); 158 159 /* 160 * If we are called because the queue has now been marked as 161 * dying, we need to ensure that processes currently waiting on 162 * the queue are notified as well. 163 */ 164 wake_up_all(&q->mq_freeze_wq); 165 } 166 167 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) 168 { 169 return blk_mq_has_free_tags(hctx->tags); 170 } 171 EXPORT_SYMBOL(blk_mq_can_queue); 172 173 void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, 174 struct request *rq, unsigned int op) 175 { 176 INIT_LIST_HEAD(&rq->queuelist); 177 /* csd/requeue_work/fifo_time is initialized before use */ 178 rq->q = q; 179 rq->mq_ctx = ctx; 180 rq->cmd_flags = op; 181 if (blk_queue_io_stat(q)) 182 rq->rq_flags |= RQF_IO_STAT; 183 /* do not touch atomic flags, it needs atomic ops against the timer */ 184 rq->cpu = -1; 185 INIT_HLIST_NODE(&rq->hash); 186 RB_CLEAR_NODE(&rq->rb_node); 187 rq->rq_disk = NULL; 188 rq->part = NULL; 189 rq->start_time = jiffies; 190 #ifdef CONFIG_BLK_CGROUP 191 rq->rl = NULL; 192 set_start_time_ns(rq); 193 rq->io_start_time_ns = 0; 194 #endif 195 rq->nr_phys_segments = 0; 196 #if defined(CONFIG_BLK_DEV_INTEGRITY) 197 rq->nr_integrity_segments = 0; 198 #endif 199 rq->special = NULL; 200 /* tag was already set */ 201 rq->errors = 0; 202 203 rq->cmd = rq->__cmd; 204 205 rq->extra_len = 0; 206 rq->sense_len = 0; 207 rq->resid_len = 0; 208 rq->sense = NULL; 209 210 INIT_LIST_HEAD(&rq->timeout_list); 211 rq->timeout = 0; 212 213 rq->end_io = NULL; 214 rq->end_io_data = NULL; 215 rq->next_rq = NULL; 216 217 ctx->rq_dispatched[op_is_sync(op)]++; 218 } 219 EXPORT_SYMBOL_GPL(blk_mq_rq_ctx_init); 220 221 struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data, 222 unsigned int op) 223 { 224 struct request *rq; 225 unsigned int tag; 226 227 tag = blk_mq_get_tag(data); 228 if (tag != BLK_MQ_TAG_FAIL) { 229 struct blk_mq_tags *tags = blk_mq_tags_from_data(data); 230 231 rq = tags->static_rqs[tag]; 232 233 if (data->flags & BLK_MQ_REQ_INTERNAL) { 234 rq->tag = -1; 235 rq->internal_tag = tag; 236 } else { 237 if (blk_mq_tag_busy(data->hctx)) { 238 rq->rq_flags = RQF_MQ_INFLIGHT; 239 atomic_inc(&data->hctx->nr_active); 240 } 241 rq->tag = tag; 242 rq->internal_tag = -1; 243 } 244 245 blk_mq_rq_ctx_init(data->q, data->ctx, rq, op); 246 return rq; 247 } 248 249 return NULL; 250 } 251 EXPORT_SYMBOL_GPL(__blk_mq_alloc_request); 252 253 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, 254 unsigned int flags) 255 { 256 struct blk_mq_alloc_data alloc_data = { .flags = flags }; 257 struct request *rq; 258 int ret; 259 260 ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT); 261 if (ret) 262 return ERR_PTR(ret); 263 264 rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data); 265 266 blk_mq_put_ctx(alloc_data.ctx); 267 blk_queue_exit(q); 268 269 if (!rq) 270 return ERR_PTR(-EWOULDBLOCK); 271 272 rq->__data_len = 0; 273 rq->__sector = (sector_t) -1; 274 rq->bio = rq->biotail = NULL; 275 return rq; 276 } 277 EXPORT_SYMBOL(blk_mq_alloc_request); 278 279 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw, 280 unsigned int flags, unsigned int hctx_idx) 281 { 282 struct blk_mq_hw_ctx *hctx; 283 struct blk_mq_ctx *ctx; 284 struct request *rq; 285 struct blk_mq_alloc_data alloc_data; 286 int ret; 287 288 /* 289 * If the tag allocator sleeps we could get an allocation for a 290 * different hardware context. No need to complicate the low level 291 * allocator for this for the rare use case of a command tied to 292 * a specific queue. 293 */ 294 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT))) 295 return ERR_PTR(-EINVAL); 296 297 if (hctx_idx >= q->nr_hw_queues) 298 return ERR_PTR(-EIO); 299 300 ret = blk_queue_enter(q, true); 301 if (ret) 302 return ERR_PTR(ret); 303 304 /* 305 * Check if the hardware context is actually mapped to anything. 306 * If not tell the caller that it should skip this queue. 307 */ 308 hctx = q->queue_hw_ctx[hctx_idx]; 309 if (!blk_mq_hw_queue_mapped(hctx)) { 310 ret = -EXDEV; 311 goto out_queue_exit; 312 } 313 ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask)); 314 315 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); 316 rq = __blk_mq_alloc_request(&alloc_data, rw); 317 if (!rq) { 318 ret = -EWOULDBLOCK; 319 goto out_queue_exit; 320 } 321 322 return rq; 323 324 out_queue_exit: 325 blk_queue_exit(q); 326 return ERR_PTR(ret); 327 } 328 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 329 330 void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 331 struct request *rq) 332 { 333 const int sched_tag = rq->internal_tag; 334 struct request_queue *q = rq->q; 335 336 if (rq->rq_flags & RQF_MQ_INFLIGHT) 337 atomic_dec(&hctx->nr_active); 338 339 wbt_done(q->rq_wb, &rq->issue_stat); 340 rq->rq_flags = 0; 341 342 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 343 clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags); 344 if (rq->tag != -1) 345 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); 346 if (sched_tag != -1) 347 blk_mq_sched_completed_request(hctx, rq); 348 blk_mq_sched_restart_queues(hctx); 349 blk_queue_exit(q); 350 } 351 352 static void blk_mq_finish_hctx_request(struct blk_mq_hw_ctx *hctx, 353 struct request *rq) 354 { 355 struct blk_mq_ctx *ctx = rq->mq_ctx; 356 357 ctx->rq_completed[rq_is_sync(rq)]++; 358 __blk_mq_finish_request(hctx, ctx, rq); 359 } 360 361 void blk_mq_finish_request(struct request *rq) 362 { 363 blk_mq_finish_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq); 364 } 365 366 void blk_mq_free_request(struct request *rq) 367 { 368 blk_mq_sched_put_request(rq); 369 } 370 EXPORT_SYMBOL_GPL(blk_mq_free_request); 371 372 inline void __blk_mq_end_request(struct request *rq, int error) 373 { 374 blk_account_io_done(rq); 375 376 if (rq->end_io) { 377 wbt_done(rq->q->rq_wb, &rq->issue_stat); 378 rq->end_io(rq, error); 379 } else { 380 if (unlikely(blk_bidi_rq(rq))) 381 blk_mq_free_request(rq->next_rq); 382 blk_mq_free_request(rq); 383 } 384 } 385 EXPORT_SYMBOL(__blk_mq_end_request); 386 387 void blk_mq_end_request(struct request *rq, int error) 388 { 389 if (blk_update_request(rq, error, blk_rq_bytes(rq))) 390 BUG(); 391 __blk_mq_end_request(rq, error); 392 } 393 EXPORT_SYMBOL(blk_mq_end_request); 394 395 static void __blk_mq_complete_request_remote(void *data) 396 { 397 struct request *rq = data; 398 399 rq->q->softirq_done_fn(rq); 400 } 401 402 static void blk_mq_ipi_complete_request(struct request *rq) 403 { 404 struct blk_mq_ctx *ctx = rq->mq_ctx; 405 bool shared = false; 406 int cpu; 407 408 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) { 409 rq->q->softirq_done_fn(rq); 410 return; 411 } 412 413 cpu = get_cpu(); 414 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) 415 shared = cpus_share_cache(cpu, ctx->cpu); 416 417 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { 418 rq->csd.func = __blk_mq_complete_request_remote; 419 rq->csd.info = rq; 420 rq->csd.flags = 0; 421 smp_call_function_single_async(ctx->cpu, &rq->csd); 422 } else { 423 rq->q->softirq_done_fn(rq); 424 } 425 put_cpu(); 426 } 427 428 static void blk_mq_stat_add(struct request *rq) 429 { 430 if (rq->rq_flags & RQF_STATS) { 431 /* 432 * We could rq->mq_ctx here, but there's less of a risk 433 * of races if we have the completion event add the stats 434 * to the local software queue. 435 */ 436 struct blk_mq_ctx *ctx; 437 438 ctx = __blk_mq_get_ctx(rq->q, raw_smp_processor_id()); 439 blk_stat_add(&ctx->stat[rq_data_dir(rq)], rq); 440 } 441 } 442 443 static void __blk_mq_complete_request(struct request *rq) 444 { 445 struct request_queue *q = rq->q; 446 447 blk_mq_stat_add(rq); 448 449 if (!q->softirq_done_fn) 450 blk_mq_end_request(rq, rq->errors); 451 else 452 blk_mq_ipi_complete_request(rq); 453 } 454 455 /** 456 * blk_mq_complete_request - end I/O on a request 457 * @rq: the request being processed 458 * 459 * Description: 460 * Ends all I/O on a request. It does not handle partial completions. 461 * The actual completion happens out-of-order, through a IPI handler. 462 **/ 463 void blk_mq_complete_request(struct request *rq, int error) 464 { 465 struct request_queue *q = rq->q; 466 467 if (unlikely(blk_should_fake_timeout(q))) 468 return; 469 if (!blk_mark_rq_complete(rq)) { 470 rq->errors = error; 471 __blk_mq_complete_request(rq); 472 } 473 } 474 EXPORT_SYMBOL(blk_mq_complete_request); 475 476 int blk_mq_request_started(struct request *rq) 477 { 478 return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 479 } 480 EXPORT_SYMBOL_GPL(blk_mq_request_started); 481 482 void blk_mq_start_request(struct request *rq) 483 { 484 struct request_queue *q = rq->q; 485 486 blk_mq_sched_started_request(rq); 487 488 trace_block_rq_issue(q, rq); 489 490 rq->resid_len = blk_rq_bytes(rq); 491 if (unlikely(blk_bidi_rq(rq))) 492 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq); 493 494 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { 495 blk_stat_set_issue_time(&rq->issue_stat); 496 rq->rq_flags |= RQF_STATS; 497 wbt_issue(q->rq_wb, &rq->issue_stat); 498 } 499 500 blk_add_timer(rq); 501 502 /* 503 * Ensure that ->deadline is visible before set the started 504 * flag and clear the completed flag. 505 */ 506 smp_mb__before_atomic(); 507 508 /* 509 * Mark us as started and clear complete. Complete might have been 510 * set if requeue raced with timeout, which then marked it as 511 * complete. So be sure to clear complete again when we start 512 * the request, otherwise we'll ignore the completion event. 513 */ 514 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) 515 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 516 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) 517 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); 518 519 if (q->dma_drain_size && blk_rq_bytes(rq)) { 520 /* 521 * Make sure space for the drain appears. We know we can do 522 * this because max_hw_segments has been adjusted to be one 523 * fewer than the device can handle. 524 */ 525 rq->nr_phys_segments++; 526 } 527 } 528 EXPORT_SYMBOL(blk_mq_start_request); 529 530 static void __blk_mq_requeue_request(struct request *rq) 531 { 532 struct request_queue *q = rq->q; 533 534 trace_block_rq_requeue(q, rq); 535 wbt_requeue(q->rq_wb, &rq->issue_stat); 536 blk_mq_sched_requeue_request(rq); 537 538 if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { 539 if (q->dma_drain_size && blk_rq_bytes(rq)) 540 rq->nr_phys_segments--; 541 } 542 } 543 544 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) 545 { 546 __blk_mq_requeue_request(rq); 547 548 BUG_ON(blk_queued_rq(rq)); 549 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); 550 } 551 EXPORT_SYMBOL(blk_mq_requeue_request); 552 553 static void blk_mq_requeue_work(struct work_struct *work) 554 { 555 struct request_queue *q = 556 container_of(work, struct request_queue, requeue_work.work); 557 LIST_HEAD(rq_list); 558 struct request *rq, *next; 559 unsigned long flags; 560 561 spin_lock_irqsave(&q->requeue_lock, flags); 562 list_splice_init(&q->requeue_list, &rq_list); 563 spin_unlock_irqrestore(&q->requeue_lock, flags); 564 565 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { 566 if (!(rq->rq_flags & RQF_SOFTBARRIER)) 567 continue; 568 569 rq->rq_flags &= ~RQF_SOFTBARRIER; 570 list_del_init(&rq->queuelist); 571 blk_mq_sched_insert_request(rq, true, false, false, true); 572 } 573 574 while (!list_empty(&rq_list)) { 575 rq = list_entry(rq_list.next, struct request, queuelist); 576 list_del_init(&rq->queuelist); 577 blk_mq_sched_insert_request(rq, false, false, false, true); 578 } 579 580 blk_mq_run_hw_queues(q, false); 581 } 582 583 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, 584 bool kick_requeue_list) 585 { 586 struct request_queue *q = rq->q; 587 unsigned long flags; 588 589 /* 590 * We abuse this flag that is otherwise used by the I/O scheduler to 591 * request head insertation from the workqueue. 592 */ 593 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER); 594 595 spin_lock_irqsave(&q->requeue_lock, flags); 596 if (at_head) { 597 rq->rq_flags |= RQF_SOFTBARRIER; 598 list_add(&rq->queuelist, &q->requeue_list); 599 } else { 600 list_add_tail(&rq->queuelist, &q->requeue_list); 601 } 602 spin_unlock_irqrestore(&q->requeue_lock, flags); 603 604 if (kick_requeue_list) 605 blk_mq_kick_requeue_list(q); 606 } 607 EXPORT_SYMBOL(blk_mq_add_to_requeue_list); 608 609 void blk_mq_kick_requeue_list(struct request_queue *q) 610 { 611 kblockd_schedule_delayed_work(&q->requeue_work, 0); 612 } 613 EXPORT_SYMBOL(blk_mq_kick_requeue_list); 614 615 void blk_mq_delay_kick_requeue_list(struct request_queue *q, 616 unsigned long msecs) 617 { 618 kblockd_schedule_delayed_work(&q->requeue_work, 619 msecs_to_jiffies(msecs)); 620 } 621 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); 622 623 void blk_mq_abort_requeue_list(struct request_queue *q) 624 { 625 unsigned long flags; 626 LIST_HEAD(rq_list); 627 628 spin_lock_irqsave(&q->requeue_lock, flags); 629 list_splice_init(&q->requeue_list, &rq_list); 630 spin_unlock_irqrestore(&q->requeue_lock, flags); 631 632 while (!list_empty(&rq_list)) { 633 struct request *rq; 634 635 rq = list_first_entry(&rq_list, struct request, queuelist); 636 list_del_init(&rq->queuelist); 637 rq->errors = -EIO; 638 blk_mq_end_request(rq, rq->errors); 639 } 640 } 641 EXPORT_SYMBOL(blk_mq_abort_requeue_list); 642 643 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) 644 { 645 if (tag < tags->nr_tags) { 646 prefetch(tags->rqs[tag]); 647 return tags->rqs[tag]; 648 } 649 650 return NULL; 651 } 652 EXPORT_SYMBOL(blk_mq_tag_to_rq); 653 654 struct blk_mq_timeout_data { 655 unsigned long next; 656 unsigned int next_set; 657 }; 658 659 void blk_mq_rq_timed_out(struct request *req, bool reserved) 660 { 661 const struct blk_mq_ops *ops = req->q->mq_ops; 662 enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER; 663 664 /* 665 * We know that complete is set at this point. If STARTED isn't set 666 * anymore, then the request isn't active and the "timeout" should 667 * just be ignored. This can happen due to the bitflag ordering. 668 * Timeout first checks if STARTED is set, and if it is, assumes 669 * the request is active. But if we race with completion, then 670 * we both flags will get cleared. So check here again, and ignore 671 * a timeout event with a request that isn't active. 672 */ 673 if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags)) 674 return; 675 676 if (ops->timeout) 677 ret = ops->timeout(req, reserved); 678 679 switch (ret) { 680 case BLK_EH_HANDLED: 681 __blk_mq_complete_request(req); 682 break; 683 case BLK_EH_RESET_TIMER: 684 blk_add_timer(req); 685 blk_clear_rq_complete(req); 686 break; 687 case BLK_EH_NOT_HANDLED: 688 break; 689 default: 690 printk(KERN_ERR "block: bad eh return: %d\n", ret); 691 break; 692 } 693 } 694 695 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, 696 struct request *rq, void *priv, bool reserved) 697 { 698 struct blk_mq_timeout_data *data = priv; 699 700 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { 701 /* 702 * If a request wasn't started before the queue was 703 * marked dying, kill it here or it'll go unnoticed. 704 */ 705 if (unlikely(blk_queue_dying(rq->q))) { 706 rq->errors = -EIO; 707 blk_mq_end_request(rq, rq->errors); 708 } 709 return; 710 } 711 712 if (time_after_eq(jiffies, rq->deadline)) { 713 if (!blk_mark_rq_complete(rq)) 714 blk_mq_rq_timed_out(rq, reserved); 715 } else if (!data->next_set || time_after(data->next, rq->deadline)) { 716 data->next = rq->deadline; 717 data->next_set = 1; 718 } 719 } 720 721 static void blk_mq_timeout_work(struct work_struct *work) 722 { 723 struct request_queue *q = 724 container_of(work, struct request_queue, timeout_work); 725 struct blk_mq_timeout_data data = { 726 .next = 0, 727 .next_set = 0, 728 }; 729 int i; 730 731 /* A deadlock might occur if a request is stuck requiring a 732 * timeout at the same time a queue freeze is waiting 733 * completion, since the timeout code would not be able to 734 * acquire the queue reference here. 735 * 736 * That's why we don't use blk_queue_enter here; instead, we use 737 * percpu_ref_tryget directly, because we need to be able to 738 * obtain a reference even in the short window between the queue 739 * starting to freeze, by dropping the first reference in 740 * blk_mq_freeze_queue_start, and the moment the last request is 741 * consumed, marked by the instant q_usage_counter reaches 742 * zero. 743 */ 744 if (!percpu_ref_tryget(&q->q_usage_counter)) 745 return; 746 747 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data); 748 749 if (data.next_set) { 750 data.next = blk_rq_timeout(round_jiffies_up(data.next)); 751 mod_timer(&q->timeout, data.next); 752 } else { 753 struct blk_mq_hw_ctx *hctx; 754 755 queue_for_each_hw_ctx(q, hctx, i) { 756 /* the hctx may be unmapped, so check it here */ 757 if (blk_mq_hw_queue_mapped(hctx)) 758 blk_mq_tag_idle(hctx); 759 } 760 } 761 blk_queue_exit(q); 762 } 763 764 /* 765 * Reverse check our software queue for entries that we could potentially 766 * merge with. Currently includes a hand-wavy stop count of 8, to not spend 767 * too much time checking for merges. 768 */ 769 static bool blk_mq_attempt_merge(struct request_queue *q, 770 struct blk_mq_ctx *ctx, struct bio *bio) 771 { 772 struct request *rq; 773 int checked = 8; 774 775 list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) { 776 int el_ret; 777 778 if (!checked--) 779 break; 780 781 if (!blk_rq_merge_ok(rq, bio)) 782 continue; 783 784 el_ret = blk_try_merge(rq, bio); 785 if (el_ret == ELEVATOR_NO_MERGE) 786 continue; 787 788 if (!blk_mq_sched_allow_merge(q, rq, bio)) 789 break; 790 791 if (el_ret == ELEVATOR_BACK_MERGE) { 792 if (bio_attempt_back_merge(q, rq, bio)) { 793 ctx->rq_merged++; 794 return true; 795 } 796 break; 797 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 798 if (bio_attempt_front_merge(q, rq, bio)) { 799 ctx->rq_merged++; 800 return true; 801 } 802 break; 803 } 804 } 805 806 return false; 807 } 808 809 struct flush_busy_ctx_data { 810 struct blk_mq_hw_ctx *hctx; 811 struct list_head *list; 812 }; 813 814 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data) 815 { 816 struct flush_busy_ctx_data *flush_data = data; 817 struct blk_mq_hw_ctx *hctx = flush_data->hctx; 818 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 819 820 sbitmap_clear_bit(sb, bitnr); 821 spin_lock(&ctx->lock); 822 list_splice_tail_init(&ctx->rq_list, flush_data->list); 823 spin_unlock(&ctx->lock); 824 return true; 825 } 826 827 /* 828 * Process software queues that have been marked busy, splicing them 829 * to the for-dispatch 830 */ 831 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) 832 { 833 struct flush_busy_ctx_data data = { 834 .hctx = hctx, 835 .list = list, 836 }; 837 838 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data); 839 } 840 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs); 841 842 static inline unsigned int queued_to_index(unsigned int queued) 843 { 844 if (!queued) 845 return 0; 846 847 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1); 848 } 849 850 bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, 851 bool wait) 852 { 853 struct blk_mq_alloc_data data = { 854 .q = rq->q, 855 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), 856 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT, 857 }; 858 859 if (rq->tag != -1) { 860 done: 861 if (hctx) 862 *hctx = data.hctx; 863 return true; 864 } 865 866 rq->tag = blk_mq_get_tag(&data); 867 if (rq->tag >= 0) { 868 if (blk_mq_tag_busy(data.hctx)) { 869 rq->rq_flags |= RQF_MQ_INFLIGHT; 870 atomic_inc(&data.hctx->nr_active); 871 } 872 data.hctx->tags->rqs[rq->tag] = rq; 873 goto done; 874 } 875 876 return false; 877 } 878 879 static void blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, 880 struct request *rq) 881 { 882 if (rq->tag == -1 || rq->internal_tag == -1) 883 return; 884 885 blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag); 886 rq->tag = -1; 887 888 if (rq->rq_flags & RQF_MQ_INFLIGHT) { 889 rq->rq_flags &= ~RQF_MQ_INFLIGHT; 890 atomic_dec(&hctx->nr_active); 891 } 892 } 893 894 /* 895 * If we fail getting a driver tag because all the driver tags are already 896 * assigned and on the dispatch list, BUT the first entry does not have a 897 * tag, then we could deadlock. For that case, move entries with assigned 898 * driver tags to the front, leaving the set of tagged requests in the 899 * same order, and the untagged set in the same order. 900 */ 901 static bool reorder_tags_to_front(struct list_head *list) 902 { 903 struct request *rq, *tmp, *first = NULL; 904 905 list_for_each_entry_safe_reverse(rq, tmp, list, queuelist) { 906 if (rq == first) 907 break; 908 if (rq->tag != -1) { 909 list_move(&rq->queuelist, list); 910 if (!first) 911 first = rq; 912 } 913 } 914 915 return first != NULL; 916 } 917 918 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) 919 { 920 struct request_queue *q = hctx->queue; 921 struct request *rq; 922 LIST_HEAD(driver_list); 923 struct list_head *dptr; 924 int queued, ret = BLK_MQ_RQ_QUEUE_OK; 925 926 /* 927 * Start off with dptr being NULL, so we start the first request 928 * immediately, even if we have more pending. 929 */ 930 dptr = NULL; 931 932 /* 933 * Now process all the entries, sending them to the driver. 934 */ 935 queued = 0; 936 while (!list_empty(list)) { 937 struct blk_mq_queue_data bd; 938 939 rq = list_first_entry(list, struct request, queuelist); 940 if (!blk_mq_get_driver_tag(rq, &hctx, false)) { 941 if (!queued && reorder_tags_to_front(list)) 942 continue; 943 944 /* 945 * We failed getting a driver tag. Mark the queue(s) 946 * as needing a restart. Retry getting a tag again, 947 * in case the needed IO completed right before we 948 * marked the queue as needing a restart. 949 */ 950 blk_mq_sched_mark_restart(hctx); 951 if (!blk_mq_get_driver_tag(rq, &hctx, false)) 952 break; 953 } 954 list_del_init(&rq->queuelist); 955 956 bd.rq = rq; 957 bd.list = dptr; 958 bd.last = list_empty(list); 959 960 ret = q->mq_ops->queue_rq(hctx, &bd); 961 switch (ret) { 962 case BLK_MQ_RQ_QUEUE_OK: 963 queued++; 964 break; 965 case BLK_MQ_RQ_QUEUE_BUSY: 966 blk_mq_put_driver_tag(hctx, rq); 967 list_add(&rq->queuelist, list); 968 __blk_mq_requeue_request(rq); 969 break; 970 default: 971 pr_err("blk-mq: bad return on queue: %d\n", ret); 972 case BLK_MQ_RQ_QUEUE_ERROR: 973 rq->errors = -EIO; 974 blk_mq_end_request(rq, rq->errors); 975 break; 976 } 977 978 if (ret == BLK_MQ_RQ_QUEUE_BUSY) 979 break; 980 981 /* 982 * We've done the first request. If we have more than 1 983 * left in the list, set dptr to defer issue. 984 */ 985 if (!dptr && list->next != list->prev) 986 dptr = &driver_list; 987 } 988 989 hctx->dispatched[queued_to_index(queued)]++; 990 991 /* 992 * Any items that need requeuing? Stuff them into hctx->dispatch, 993 * that is where we will continue on next queue run. 994 */ 995 if (!list_empty(list)) { 996 spin_lock(&hctx->lock); 997 list_splice_init(list, &hctx->dispatch); 998 spin_unlock(&hctx->lock); 999 1000 /* 1001 * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but 1002 * it's possible the queue is stopped and restarted again 1003 * before this. Queue restart will dispatch requests. And since 1004 * requests in rq_list aren't added into hctx->dispatch yet, 1005 * the requests in rq_list might get lost. 1006 * 1007 * blk_mq_run_hw_queue() already checks the STOPPED bit 1008 * 1009 * If RESTART is set, then let completion restart the queue 1010 * instead of potentially looping here. 1011 */ 1012 if (!blk_mq_sched_needs_restart(hctx)) 1013 blk_mq_run_hw_queue(hctx, true); 1014 } 1015 1016 return ret != BLK_MQ_RQ_QUEUE_BUSY; 1017 } 1018 1019 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) 1020 { 1021 int srcu_idx; 1022 1023 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) && 1024 cpu_online(hctx->next_cpu)); 1025 1026 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { 1027 rcu_read_lock(); 1028 blk_mq_sched_dispatch_requests(hctx); 1029 rcu_read_unlock(); 1030 } else { 1031 srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu); 1032 blk_mq_sched_dispatch_requests(hctx); 1033 srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx); 1034 } 1035 } 1036 1037 /* 1038 * It'd be great if the workqueue API had a way to pass 1039 * in a mask and had some smarts for more clever placement. 1040 * For now we just round-robin here, switching for every 1041 * BLK_MQ_CPU_WORK_BATCH queued items. 1042 */ 1043 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) 1044 { 1045 if (hctx->queue->nr_hw_queues == 1) 1046 return WORK_CPU_UNBOUND; 1047 1048 if (--hctx->next_cpu_batch <= 0) { 1049 int next_cpu; 1050 1051 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask); 1052 if (next_cpu >= nr_cpu_ids) 1053 next_cpu = cpumask_first(hctx->cpumask); 1054 1055 hctx->next_cpu = next_cpu; 1056 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 1057 } 1058 1059 return hctx->next_cpu; 1060 } 1061 1062 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 1063 { 1064 if (unlikely(blk_mq_hctx_stopped(hctx) || 1065 !blk_mq_hw_queue_mapped(hctx))) 1066 return; 1067 1068 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { 1069 int cpu = get_cpu(); 1070 if (cpumask_test_cpu(cpu, hctx->cpumask)) { 1071 __blk_mq_run_hw_queue(hctx); 1072 put_cpu(); 1073 return; 1074 } 1075 1076 put_cpu(); 1077 } 1078 1079 kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work); 1080 } 1081 1082 void blk_mq_run_hw_queues(struct request_queue *q, bool async) 1083 { 1084 struct blk_mq_hw_ctx *hctx; 1085 int i; 1086 1087 queue_for_each_hw_ctx(q, hctx, i) { 1088 if (!blk_mq_hctx_has_pending(hctx) || 1089 blk_mq_hctx_stopped(hctx)) 1090 continue; 1091 1092 blk_mq_run_hw_queue(hctx, async); 1093 } 1094 } 1095 EXPORT_SYMBOL(blk_mq_run_hw_queues); 1096 1097 /** 1098 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped 1099 * @q: request queue. 1100 * 1101 * The caller is responsible for serializing this function against 1102 * blk_mq_{start,stop}_hw_queue(). 1103 */ 1104 bool blk_mq_queue_stopped(struct request_queue *q) 1105 { 1106 struct blk_mq_hw_ctx *hctx; 1107 int i; 1108 1109 queue_for_each_hw_ctx(q, hctx, i) 1110 if (blk_mq_hctx_stopped(hctx)) 1111 return true; 1112 1113 return false; 1114 } 1115 EXPORT_SYMBOL(blk_mq_queue_stopped); 1116 1117 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) 1118 { 1119 cancel_work(&hctx->run_work); 1120 cancel_delayed_work(&hctx->delay_work); 1121 set_bit(BLK_MQ_S_STOPPED, &hctx->state); 1122 } 1123 EXPORT_SYMBOL(blk_mq_stop_hw_queue); 1124 1125 void blk_mq_stop_hw_queues(struct request_queue *q) 1126 { 1127 struct blk_mq_hw_ctx *hctx; 1128 int i; 1129 1130 queue_for_each_hw_ctx(q, hctx, i) 1131 blk_mq_stop_hw_queue(hctx); 1132 } 1133 EXPORT_SYMBOL(blk_mq_stop_hw_queues); 1134 1135 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) 1136 { 1137 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 1138 1139 blk_mq_run_hw_queue(hctx, false); 1140 } 1141 EXPORT_SYMBOL(blk_mq_start_hw_queue); 1142 1143 void blk_mq_start_hw_queues(struct request_queue *q) 1144 { 1145 struct blk_mq_hw_ctx *hctx; 1146 int i; 1147 1148 queue_for_each_hw_ctx(q, hctx, i) 1149 blk_mq_start_hw_queue(hctx); 1150 } 1151 EXPORT_SYMBOL(blk_mq_start_hw_queues); 1152 1153 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 1154 { 1155 if (!blk_mq_hctx_stopped(hctx)) 1156 return; 1157 1158 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 1159 blk_mq_run_hw_queue(hctx, async); 1160 } 1161 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue); 1162 1163 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) 1164 { 1165 struct blk_mq_hw_ctx *hctx; 1166 int i; 1167 1168 queue_for_each_hw_ctx(q, hctx, i) 1169 blk_mq_start_stopped_hw_queue(hctx, async); 1170 } 1171 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); 1172 1173 static void blk_mq_run_work_fn(struct work_struct *work) 1174 { 1175 struct blk_mq_hw_ctx *hctx; 1176 1177 hctx = container_of(work, struct blk_mq_hw_ctx, run_work); 1178 1179 __blk_mq_run_hw_queue(hctx); 1180 } 1181 1182 static void blk_mq_delay_work_fn(struct work_struct *work) 1183 { 1184 struct blk_mq_hw_ctx *hctx; 1185 1186 hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work); 1187 1188 if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state)) 1189 __blk_mq_run_hw_queue(hctx); 1190 } 1191 1192 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) 1193 { 1194 if (unlikely(!blk_mq_hw_queue_mapped(hctx))) 1195 return; 1196 1197 blk_mq_stop_hw_queue(hctx); 1198 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), 1199 &hctx->delay_work, msecs_to_jiffies(msecs)); 1200 } 1201 EXPORT_SYMBOL(blk_mq_delay_queue); 1202 1203 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, 1204 struct request *rq, 1205 bool at_head) 1206 { 1207 struct blk_mq_ctx *ctx = rq->mq_ctx; 1208 1209 trace_block_rq_insert(hctx->queue, rq); 1210 1211 if (at_head) 1212 list_add(&rq->queuelist, &ctx->rq_list); 1213 else 1214 list_add_tail(&rq->queuelist, &ctx->rq_list); 1215 } 1216 1217 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 1218 bool at_head) 1219 { 1220 struct blk_mq_ctx *ctx = rq->mq_ctx; 1221 1222 __blk_mq_insert_req_list(hctx, rq, at_head); 1223 blk_mq_hctx_mark_pending(hctx, ctx); 1224 } 1225 1226 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 1227 struct list_head *list) 1228 1229 { 1230 /* 1231 * preemption doesn't flush plug list, so it's possible ctx->cpu is 1232 * offline now 1233 */ 1234 spin_lock(&ctx->lock); 1235 while (!list_empty(list)) { 1236 struct request *rq; 1237 1238 rq = list_first_entry(list, struct request, queuelist); 1239 BUG_ON(rq->mq_ctx != ctx); 1240 list_del_init(&rq->queuelist); 1241 __blk_mq_insert_req_list(hctx, rq, false); 1242 } 1243 blk_mq_hctx_mark_pending(hctx, ctx); 1244 spin_unlock(&ctx->lock); 1245 } 1246 1247 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b) 1248 { 1249 struct request *rqa = container_of(a, struct request, queuelist); 1250 struct request *rqb = container_of(b, struct request, queuelist); 1251 1252 return !(rqa->mq_ctx < rqb->mq_ctx || 1253 (rqa->mq_ctx == rqb->mq_ctx && 1254 blk_rq_pos(rqa) < blk_rq_pos(rqb))); 1255 } 1256 1257 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) 1258 { 1259 struct blk_mq_ctx *this_ctx; 1260 struct request_queue *this_q; 1261 struct request *rq; 1262 LIST_HEAD(list); 1263 LIST_HEAD(ctx_list); 1264 unsigned int depth; 1265 1266 list_splice_init(&plug->mq_list, &list); 1267 1268 list_sort(NULL, &list, plug_ctx_cmp); 1269 1270 this_q = NULL; 1271 this_ctx = NULL; 1272 depth = 0; 1273 1274 while (!list_empty(&list)) { 1275 rq = list_entry_rq(list.next); 1276 list_del_init(&rq->queuelist); 1277 BUG_ON(!rq->q); 1278 if (rq->mq_ctx != this_ctx) { 1279 if (this_ctx) { 1280 trace_block_unplug(this_q, depth, from_schedule); 1281 blk_mq_sched_insert_requests(this_q, this_ctx, 1282 &ctx_list, 1283 from_schedule); 1284 } 1285 1286 this_ctx = rq->mq_ctx; 1287 this_q = rq->q; 1288 depth = 0; 1289 } 1290 1291 depth++; 1292 list_add_tail(&rq->queuelist, &ctx_list); 1293 } 1294 1295 /* 1296 * If 'this_ctx' is set, we know we have entries to complete 1297 * on 'ctx_list'. Do those. 1298 */ 1299 if (this_ctx) { 1300 trace_block_unplug(this_q, depth, from_schedule); 1301 blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list, 1302 from_schedule); 1303 } 1304 } 1305 1306 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) 1307 { 1308 init_request_from_bio(rq, bio); 1309 1310 blk_account_io_start(rq, true); 1311 } 1312 1313 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx) 1314 { 1315 return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) && 1316 !blk_queue_nomerges(hctx->queue); 1317 } 1318 1319 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, 1320 struct blk_mq_ctx *ctx, 1321 struct request *rq, struct bio *bio) 1322 { 1323 if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) { 1324 blk_mq_bio_to_request(rq, bio); 1325 spin_lock(&ctx->lock); 1326 insert_rq: 1327 __blk_mq_insert_request(hctx, rq, false); 1328 spin_unlock(&ctx->lock); 1329 return false; 1330 } else { 1331 struct request_queue *q = hctx->queue; 1332 1333 spin_lock(&ctx->lock); 1334 if (!blk_mq_attempt_merge(q, ctx, bio)) { 1335 blk_mq_bio_to_request(rq, bio); 1336 goto insert_rq; 1337 } 1338 1339 spin_unlock(&ctx->lock); 1340 __blk_mq_finish_request(hctx, ctx, rq); 1341 return true; 1342 } 1343 } 1344 1345 static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq) 1346 { 1347 if (rq->tag != -1) 1348 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false); 1349 1350 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true); 1351 } 1352 1353 static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie) 1354 { 1355 struct request_queue *q = rq->q; 1356 struct blk_mq_queue_data bd = { 1357 .rq = rq, 1358 .list = NULL, 1359 .last = 1 1360 }; 1361 struct blk_mq_hw_ctx *hctx; 1362 blk_qc_t new_cookie; 1363 int ret; 1364 1365 if (q->elevator) 1366 goto insert; 1367 1368 if (!blk_mq_get_driver_tag(rq, &hctx, false)) 1369 goto insert; 1370 1371 new_cookie = request_to_qc_t(hctx, rq); 1372 1373 /* 1374 * For OK queue, we are done. For error, kill it. Any other 1375 * error (busy), just add it to our list as we previously 1376 * would have done 1377 */ 1378 ret = q->mq_ops->queue_rq(hctx, &bd); 1379 if (ret == BLK_MQ_RQ_QUEUE_OK) { 1380 *cookie = new_cookie; 1381 return; 1382 } 1383 1384 __blk_mq_requeue_request(rq); 1385 1386 if (ret == BLK_MQ_RQ_QUEUE_ERROR) { 1387 *cookie = BLK_QC_T_NONE; 1388 rq->errors = -EIO; 1389 blk_mq_end_request(rq, rq->errors); 1390 return; 1391 } 1392 1393 insert: 1394 blk_mq_sched_insert_request(rq, false, true, true, false); 1395 } 1396 1397 /* 1398 * Multiple hardware queue variant. This will not use per-process plugs, 1399 * but will attempt to bypass the hctx queueing if we can go straight to 1400 * hardware for SYNC IO. 1401 */ 1402 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) 1403 { 1404 const int is_sync = op_is_sync(bio->bi_opf); 1405 const int is_flush_fua = op_is_flush(bio->bi_opf); 1406 struct blk_mq_alloc_data data = { .flags = 0 }; 1407 struct request *rq; 1408 unsigned int request_count = 0, srcu_idx; 1409 struct blk_plug *plug; 1410 struct request *same_queue_rq = NULL; 1411 blk_qc_t cookie; 1412 unsigned int wb_acct; 1413 1414 blk_queue_bounce(q, &bio); 1415 1416 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1417 bio_io_error(bio); 1418 return BLK_QC_T_NONE; 1419 } 1420 1421 blk_queue_split(q, &bio, q->bio_split); 1422 1423 if (!is_flush_fua && !blk_queue_nomerges(q) && 1424 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) 1425 return BLK_QC_T_NONE; 1426 1427 if (blk_mq_sched_bio_merge(q, bio)) 1428 return BLK_QC_T_NONE; 1429 1430 wb_acct = wbt_wait(q->rq_wb, bio, NULL); 1431 1432 trace_block_getrq(q, bio, bio->bi_opf); 1433 1434 rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data); 1435 if (unlikely(!rq)) { 1436 __wbt_done(q->rq_wb, wb_acct); 1437 return BLK_QC_T_NONE; 1438 } 1439 1440 wbt_track(&rq->issue_stat, wb_acct); 1441 1442 cookie = request_to_qc_t(data.hctx, rq); 1443 1444 if (unlikely(is_flush_fua)) { 1445 blk_mq_put_ctx(data.ctx); 1446 blk_mq_bio_to_request(rq, bio); 1447 blk_mq_get_driver_tag(rq, NULL, true); 1448 blk_insert_flush(rq); 1449 blk_mq_run_hw_queue(data.hctx, true); 1450 goto done; 1451 } 1452 1453 plug = current->plug; 1454 /* 1455 * If the driver supports defer issued based on 'last', then 1456 * queue it up like normal since we can potentially save some 1457 * CPU this way. 1458 */ 1459 if (((plug && !blk_queue_nomerges(q)) || is_sync) && 1460 !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) { 1461 struct request *old_rq = NULL; 1462 1463 blk_mq_bio_to_request(rq, bio); 1464 1465 /* 1466 * We do limited plugging. If the bio can be merged, do that. 1467 * Otherwise the existing request in the plug list will be 1468 * issued. So the plug list will have one request at most 1469 */ 1470 if (plug) { 1471 /* 1472 * The plug list might get flushed before this. If that 1473 * happens, same_queue_rq is invalid and plug list is 1474 * empty 1475 */ 1476 if (same_queue_rq && !list_empty(&plug->mq_list)) { 1477 old_rq = same_queue_rq; 1478 list_del_init(&old_rq->queuelist); 1479 } 1480 list_add_tail(&rq->queuelist, &plug->mq_list); 1481 } else /* is_sync */ 1482 old_rq = rq; 1483 blk_mq_put_ctx(data.ctx); 1484 if (!old_rq) 1485 goto done; 1486 1487 if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) { 1488 rcu_read_lock(); 1489 blk_mq_try_issue_directly(old_rq, &cookie); 1490 rcu_read_unlock(); 1491 } else { 1492 srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu); 1493 blk_mq_try_issue_directly(old_rq, &cookie); 1494 srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx); 1495 } 1496 goto done; 1497 } 1498 1499 if (q->elevator) { 1500 blk_mq_put_ctx(data.ctx); 1501 blk_mq_bio_to_request(rq, bio); 1502 blk_mq_sched_insert_request(rq, false, true, 1503 !is_sync || is_flush_fua, true); 1504 goto done; 1505 } 1506 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { 1507 /* 1508 * For a SYNC request, send it to the hardware immediately. For 1509 * an ASYNC request, just ensure that we run it later on. The 1510 * latter allows for merging opportunities and more efficient 1511 * dispatching. 1512 */ 1513 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); 1514 } 1515 blk_mq_put_ctx(data.ctx); 1516 done: 1517 return cookie; 1518 } 1519 1520 /* 1521 * Single hardware queue variant. This will attempt to use any per-process 1522 * plug for merging and IO deferral. 1523 */ 1524 static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) 1525 { 1526 const int is_sync = op_is_sync(bio->bi_opf); 1527 const int is_flush_fua = op_is_flush(bio->bi_opf); 1528 struct blk_plug *plug; 1529 unsigned int request_count = 0; 1530 struct blk_mq_alloc_data data = { .flags = 0 }; 1531 struct request *rq; 1532 blk_qc_t cookie; 1533 unsigned int wb_acct; 1534 1535 blk_queue_bounce(q, &bio); 1536 1537 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1538 bio_io_error(bio); 1539 return BLK_QC_T_NONE; 1540 } 1541 1542 blk_queue_split(q, &bio, q->bio_split); 1543 1544 if (!is_flush_fua && !blk_queue_nomerges(q)) { 1545 if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) 1546 return BLK_QC_T_NONE; 1547 } else 1548 request_count = blk_plug_queued_count(q); 1549 1550 if (blk_mq_sched_bio_merge(q, bio)) 1551 return BLK_QC_T_NONE; 1552 1553 wb_acct = wbt_wait(q->rq_wb, bio, NULL); 1554 1555 trace_block_getrq(q, bio, bio->bi_opf); 1556 1557 rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data); 1558 if (unlikely(!rq)) { 1559 __wbt_done(q->rq_wb, wb_acct); 1560 return BLK_QC_T_NONE; 1561 } 1562 1563 wbt_track(&rq->issue_stat, wb_acct); 1564 1565 cookie = request_to_qc_t(data.hctx, rq); 1566 1567 if (unlikely(is_flush_fua)) { 1568 blk_mq_put_ctx(data.ctx); 1569 blk_mq_bio_to_request(rq, bio); 1570 blk_mq_get_driver_tag(rq, NULL, true); 1571 blk_insert_flush(rq); 1572 blk_mq_run_hw_queue(data.hctx, true); 1573 goto done; 1574 } 1575 1576 /* 1577 * A task plug currently exists. Since this is completely lockless, 1578 * utilize that to temporarily store requests until the task is 1579 * either done or scheduled away. 1580 */ 1581 plug = current->plug; 1582 if (plug) { 1583 struct request *last = NULL; 1584 1585 blk_mq_bio_to_request(rq, bio); 1586 1587 /* 1588 * @request_count may become stale because of schedule 1589 * out, so check the list again. 1590 */ 1591 if (list_empty(&plug->mq_list)) 1592 request_count = 0; 1593 if (!request_count) 1594 trace_block_plug(q); 1595 else 1596 last = list_entry_rq(plug->mq_list.prev); 1597 1598 blk_mq_put_ctx(data.ctx); 1599 1600 if (request_count >= BLK_MAX_REQUEST_COUNT || (last && 1601 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { 1602 blk_flush_plug_list(plug, false); 1603 trace_block_plug(q); 1604 } 1605 1606 list_add_tail(&rq->queuelist, &plug->mq_list); 1607 return cookie; 1608 } 1609 1610 if (q->elevator) { 1611 blk_mq_put_ctx(data.ctx); 1612 blk_mq_bio_to_request(rq, bio); 1613 blk_mq_sched_insert_request(rq, false, true, 1614 !is_sync || is_flush_fua, true); 1615 goto done; 1616 } 1617 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { 1618 /* 1619 * For a SYNC request, send it to the hardware immediately. For 1620 * an ASYNC request, just ensure that we run it later on. The 1621 * latter allows for merging opportunities and more efficient 1622 * dispatching. 1623 */ 1624 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); 1625 } 1626 1627 blk_mq_put_ctx(data.ctx); 1628 done: 1629 return cookie; 1630 } 1631 1632 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 1633 unsigned int hctx_idx) 1634 { 1635 struct page *page; 1636 1637 if (tags->rqs && set->ops->exit_request) { 1638 int i; 1639 1640 for (i = 0; i < tags->nr_tags; i++) { 1641 struct request *rq = tags->static_rqs[i]; 1642 1643 if (!rq) 1644 continue; 1645 set->ops->exit_request(set->driver_data, rq, 1646 hctx_idx, i); 1647 tags->static_rqs[i] = NULL; 1648 } 1649 } 1650 1651 while (!list_empty(&tags->page_list)) { 1652 page = list_first_entry(&tags->page_list, struct page, lru); 1653 list_del_init(&page->lru); 1654 /* 1655 * Remove kmemleak object previously allocated in 1656 * blk_mq_init_rq_map(). 1657 */ 1658 kmemleak_free(page_address(page)); 1659 __free_pages(page, page->private); 1660 } 1661 } 1662 1663 void blk_mq_free_rq_map(struct blk_mq_tags *tags) 1664 { 1665 kfree(tags->rqs); 1666 tags->rqs = NULL; 1667 kfree(tags->static_rqs); 1668 tags->static_rqs = NULL; 1669 1670 blk_mq_free_tags(tags); 1671 } 1672 1673 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, 1674 unsigned int hctx_idx, 1675 unsigned int nr_tags, 1676 unsigned int reserved_tags) 1677 { 1678 struct blk_mq_tags *tags; 1679 1680 tags = blk_mq_init_tags(nr_tags, reserved_tags, 1681 set->numa_node, 1682 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); 1683 if (!tags) 1684 return NULL; 1685 1686 tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *), 1687 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 1688 set->numa_node); 1689 if (!tags->rqs) { 1690 blk_mq_free_tags(tags); 1691 return NULL; 1692 } 1693 1694 tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *), 1695 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 1696 set->numa_node); 1697 if (!tags->static_rqs) { 1698 kfree(tags->rqs); 1699 blk_mq_free_tags(tags); 1700 return NULL; 1701 } 1702 1703 return tags; 1704 } 1705 1706 static size_t order_to_size(unsigned int order) 1707 { 1708 return (size_t)PAGE_SIZE << order; 1709 } 1710 1711 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 1712 unsigned int hctx_idx, unsigned int depth) 1713 { 1714 unsigned int i, j, entries_per_page, max_order = 4; 1715 size_t rq_size, left; 1716 1717 INIT_LIST_HEAD(&tags->page_list); 1718 1719 /* 1720 * rq_size is the size of the request plus driver payload, rounded 1721 * to the cacheline size 1722 */ 1723 rq_size = round_up(sizeof(struct request) + set->cmd_size, 1724 cache_line_size()); 1725 left = rq_size * depth; 1726 1727 for (i = 0; i < depth; ) { 1728 int this_order = max_order; 1729 struct page *page; 1730 int to_do; 1731 void *p; 1732 1733 while (this_order && left < order_to_size(this_order - 1)) 1734 this_order--; 1735 1736 do { 1737 page = alloc_pages_node(set->numa_node, 1738 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, 1739 this_order); 1740 if (page) 1741 break; 1742 if (!this_order--) 1743 break; 1744 if (order_to_size(this_order) < rq_size) 1745 break; 1746 } while (1); 1747 1748 if (!page) 1749 goto fail; 1750 1751 page->private = this_order; 1752 list_add_tail(&page->lru, &tags->page_list); 1753 1754 p = page_address(page); 1755 /* 1756 * Allow kmemleak to scan these pages as they contain pointers 1757 * to additional allocations like via ops->init_request(). 1758 */ 1759 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO); 1760 entries_per_page = order_to_size(this_order) / rq_size; 1761 to_do = min(entries_per_page, depth - i); 1762 left -= to_do * rq_size; 1763 for (j = 0; j < to_do; j++) { 1764 struct request *rq = p; 1765 1766 tags->static_rqs[i] = rq; 1767 if (set->ops->init_request) { 1768 if (set->ops->init_request(set->driver_data, 1769 rq, hctx_idx, i, 1770 set->numa_node)) { 1771 tags->static_rqs[i] = NULL; 1772 goto fail; 1773 } 1774 } 1775 1776 p += rq_size; 1777 i++; 1778 } 1779 } 1780 return 0; 1781 1782 fail: 1783 blk_mq_free_rqs(set, tags, hctx_idx); 1784 return -ENOMEM; 1785 } 1786 1787 /* 1788 * 'cpu' is going away. splice any existing rq_list entries from this 1789 * software queue to the hw queue dispatch list, and ensure that it 1790 * gets run. 1791 */ 1792 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node) 1793 { 1794 struct blk_mq_hw_ctx *hctx; 1795 struct blk_mq_ctx *ctx; 1796 LIST_HEAD(tmp); 1797 1798 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead); 1799 ctx = __blk_mq_get_ctx(hctx->queue, cpu); 1800 1801 spin_lock(&ctx->lock); 1802 if (!list_empty(&ctx->rq_list)) { 1803 list_splice_init(&ctx->rq_list, &tmp); 1804 blk_mq_hctx_clear_pending(hctx, ctx); 1805 } 1806 spin_unlock(&ctx->lock); 1807 1808 if (list_empty(&tmp)) 1809 return 0; 1810 1811 spin_lock(&hctx->lock); 1812 list_splice_tail_init(&tmp, &hctx->dispatch); 1813 spin_unlock(&hctx->lock); 1814 1815 blk_mq_run_hw_queue(hctx, true); 1816 return 0; 1817 } 1818 1819 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) 1820 { 1821 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD, 1822 &hctx->cpuhp_dead); 1823 } 1824 1825 /* hctx->ctxs will be freed in queue's release handler */ 1826 static void blk_mq_exit_hctx(struct request_queue *q, 1827 struct blk_mq_tag_set *set, 1828 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 1829 { 1830 unsigned flush_start_tag = set->queue_depth; 1831 1832 blk_mq_tag_idle(hctx); 1833 1834 if (set->ops->exit_request) 1835 set->ops->exit_request(set->driver_data, 1836 hctx->fq->flush_rq, hctx_idx, 1837 flush_start_tag + hctx_idx); 1838 1839 if (set->ops->exit_hctx) 1840 set->ops->exit_hctx(hctx, hctx_idx); 1841 1842 if (hctx->flags & BLK_MQ_F_BLOCKING) 1843 cleanup_srcu_struct(&hctx->queue_rq_srcu); 1844 1845 blk_mq_remove_cpuhp(hctx); 1846 blk_free_flush_queue(hctx->fq); 1847 sbitmap_free(&hctx->ctx_map); 1848 } 1849 1850 static void blk_mq_exit_hw_queues(struct request_queue *q, 1851 struct blk_mq_tag_set *set, int nr_queue) 1852 { 1853 struct blk_mq_hw_ctx *hctx; 1854 unsigned int i; 1855 1856 queue_for_each_hw_ctx(q, hctx, i) { 1857 if (i == nr_queue) 1858 break; 1859 blk_mq_exit_hctx(q, set, hctx, i); 1860 } 1861 } 1862 1863 static void blk_mq_free_hw_queues(struct request_queue *q, 1864 struct blk_mq_tag_set *set) 1865 { 1866 struct blk_mq_hw_ctx *hctx; 1867 unsigned int i; 1868 1869 queue_for_each_hw_ctx(q, hctx, i) 1870 free_cpumask_var(hctx->cpumask); 1871 } 1872 1873 static int blk_mq_init_hctx(struct request_queue *q, 1874 struct blk_mq_tag_set *set, 1875 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) 1876 { 1877 int node; 1878 unsigned flush_start_tag = set->queue_depth; 1879 1880 node = hctx->numa_node; 1881 if (node == NUMA_NO_NODE) 1882 node = hctx->numa_node = set->numa_node; 1883 1884 INIT_WORK(&hctx->run_work, blk_mq_run_work_fn); 1885 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn); 1886 spin_lock_init(&hctx->lock); 1887 INIT_LIST_HEAD(&hctx->dispatch); 1888 hctx->queue = q; 1889 hctx->queue_num = hctx_idx; 1890 hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED; 1891 1892 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); 1893 1894 hctx->tags = set->tags[hctx_idx]; 1895 1896 /* 1897 * Allocate space for all possible cpus to avoid allocation at 1898 * runtime 1899 */ 1900 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *), 1901 GFP_KERNEL, node); 1902 if (!hctx->ctxs) 1903 goto unregister_cpu_notifier; 1904 1905 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL, 1906 node)) 1907 goto free_ctxs; 1908 1909 hctx->nr_ctx = 0; 1910 1911 if (set->ops->init_hctx && 1912 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) 1913 goto free_bitmap; 1914 1915 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); 1916 if (!hctx->fq) 1917 goto exit_hctx; 1918 1919 if (set->ops->init_request && 1920 set->ops->init_request(set->driver_data, 1921 hctx->fq->flush_rq, hctx_idx, 1922 flush_start_tag + hctx_idx, node)) 1923 goto free_fq; 1924 1925 if (hctx->flags & BLK_MQ_F_BLOCKING) 1926 init_srcu_struct(&hctx->queue_rq_srcu); 1927 1928 return 0; 1929 1930 free_fq: 1931 kfree(hctx->fq); 1932 exit_hctx: 1933 if (set->ops->exit_hctx) 1934 set->ops->exit_hctx(hctx, hctx_idx); 1935 free_bitmap: 1936 sbitmap_free(&hctx->ctx_map); 1937 free_ctxs: 1938 kfree(hctx->ctxs); 1939 unregister_cpu_notifier: 1940 blk_mq_remove_cpuhp(hctx); 1941 return -1; 1942 } 1943 1944 static void blk_mq_init_cpu_queues(struct request_queue *q, 1945 unsigned int nr_hw_queues) 1946 { 1947 unsigned int i; 1948 1949 for_each_possible_cpu(i) { 1950 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); 1951 struct blk_mq_hw_ctx *hctx; 1952 1953 memset(__ctx, 0, sizeof(*__ctx)); 1954 __ctx->cpu = i; 1955 spin_lock_init(&__ctx->lock); 1956 INIT_LIST_HEAD(&__ctx->rq_list); 1957 __ctx->queue = q; 1958 blk_stat_init(&__ctx->stat[BLK_STAT_READ]); 1959 blk_stat_init(&__ctx->stat[BLK_STAT_WRITE]); 1960 1961 /* If the cpu isn't online, the cpu is mapped to first hctx */ 1962 if (!cpu_online(i)) 1963 continue; 1964 1965 hctx = blk_mq_map_queue(q, i); 1966 1967 /* 1968 * Set local node, IFF we have more than one hw queue. If 1969 * not, we remain on the home node of the device 1970 */ 1971 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) 1972 hctx->numa_node = local_memory_node(cpu_to_node(i)); 1973 } 1974 } 1975 1976 static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx) 1977 { 1978 int ret = 0; 1979 1980 set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx, 1981 set->queue_depth, set->reserved_tags); 1982 if (!set->tags[hctx_idx]) 1983 return false; 1984 1985 ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx, 1986 set->queue_depth); 1987 if (!ret) 1988 return true; 1989 1990 blk_mq_free_rq_map(set->tags[hctx_idx]); 1991 set->tags[hctx_idx] = NULL; 1992 return false; 1993 } 1994 1995 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, 1996 unsigned int hctx_idx) 1997 { 1998 if (set->tags[hctx_idx]) { 1999 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx); 2000 blk_mq_free_rq_map(set->tags[hctx_idx]); 2001 set->tags[hctx_idx] = NULL; 2002 } 2003 } 2004 2005 static void blk_mq_map_swqueue(struct request_queue *q, 2006 const struct cpumask *online_mask) 2007 { 2008 unsigned int i, hctx_idx; 2009 struct blk_mq_hw_ctx *hctx; 2010 struct blk_mq_ctx *ctx; 2011 struct blk_mq_tag_set *set = q->tag_set; 2012 2013 /* 2014 * Avoid others reading imcomplete hctx->cpumask through sysfs 2015 */ 2016 mutex_lock(&q->sysfs_lock); 2017 2018 queue_for_each_hw_ctx(q, hctx, i) { 2019 cpumask_clear(hctx->cpumask); 2020 hctx->nr_ctx = 0; 2021 } 2022 2023 /* 2024 * Map software to hardware queues 2025 */ 2026 for_each_possible_cpu(i) { 2027 /* If the cpu isn't online, the cpu is mapped to first hctx */ 2028 if (!cpumask_test_cpu(i, online_mask)) 2029 continue; 2030 2031 hctx_idx = q->mq_map[i]; 2032 /* unmapped hw queue can be remapped after CPU topo changed */ 2033 if (!set->tags[hctx_idx] && 2034 !__blk_mq_alloc_rq_map(set, hctx_idx)) { 2035 /* 2036 * If tags initialization fail for some hctx, 2037 * that hctx won't be brought online. In this 2038 * case, remap the current ctx to hctx[0] which 2039 * is guaranteed to always have tags allocated 2040 */ 2041 q->mq_map[i] = 0; 2042 } 2043 2044 ctx = per_cpu_ptr(q->queue_ctx, i); 2045 hctx = blk_mq_map_queue(q, i); 2046 2047 cpumask_set_cpu(i, hctx->cpumask); 2048 ctx->index_hw = hctx->nr_ctx; 2049 hctx->ctxs[hctx->nr_ctx++] = ctx; 2050 } 2051 2052 mutex_unlock(&q->sysfs_lock); 2053 2054 queue_for_each_hw_ctx(q, hctx, i) { 2055 /* 2056 * If no software queues are mapped to this hardware queue, 2057 * disable it and free the request entries. 2058 */ 2059 if (!hctx->nr_ctx) { 2060 /* Never unmap queue 0. We need it as a 2061 * fallback in case of a new remap fails 2062 * allocation 2063 */ 2064 if (i && set->tags[i]) 2065 blk_mq_free_map_and_requests(set, i); 2066 2067 hctx->tags = NULL; 2068 continue; 2069 } 2070 2071 hctx->tags = set->tags[i]; 2072 WARN_ON(!hctx->tags); 2073 2074 /* 2075 * Set the map size to the number of mapped software queues. 2076 * This is more accurate and more efficient than looping 2077 * over all possibly mapped software queues. 2078 */ 2079 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx); 2080 2081 /* 2082 * Initialize batch roundrobin counts 2083 */ 2084 hctx->next_cpu = cpumask_first(hctx->cpumask); 2085 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 2086 } 2087 } 2088 2089 static void queue_set_hctx_shared(struct request_queue *q, bool shared) 2090 { 2091 struct blk_mq_hw_ctx *hctx; 2092 int i; 2093 2094 queue_for_each_hw_ctx(q, hctx, i) { 2095 if (shared) 2096 hctx->flags |= BLK_MQ_F_TAG_SHARED; 2097 else 2098 hctx->flags &= ~BLK_MQ_F_TAG_SHARED; 2099 } 2100 } 2101 2102 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared) 2103 { 2104 struct request_queue *q; 2105 2106 list_for_each_entry(q, &set->tag_list, tag_set_list) { 2107 blk_mq_freeze_queue(q); 2108 queue_set_hctx_shared(q, shared); 2109 blk_mq_unfreeze_queue(q); 2110 } 2111 } 2112 2113 static void blk_mq_del_queue_tag_set(struct request_queue *q) 2114 { 2115 struct blk_mq_tag_set *set = q->tag_set; 2116 2117 mutex_lock(&set->tag_list_lock); 2118 list_del_init(&q->tag_set_list); 2119 if (list_is_singular(&set->tag_list)) { 2120 /* just transitioned to unshared */ 2121 set->flags &= ~BLK_MQ_F_TAG_SHARED; 2122 /* update existing queue */ 2123 blk_mq_update_tag_set_depth(set, false); 2124 } 2125 mutex_unlock(&set->tag_list_lock); 2126 } 2127 2128 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, 2129 struct request_queue *q) 2130 { 2131 q->tag_set = set; 2132 2133 mutex_lock(&set->tag_list_lock); 2134 2135 /* Check to see if we're transitioning to shared (from 1 to 2 queues). */ 2136 if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) { 2137 set->flags |= BLK_MQ_F_TAG_SHARED; 2138 /* update existing queue */ 2139 blk_mq_update_tag_set_depth(set, true); 2140 } 2141 if (set->flags & BLK_MQ_F_TAG_SHARED) 2142 queue_set_hctx_shared(q, true); 2143 list_add_tail(&q->tag_set_list, &set->tag_list); 2144 2145 mutex_unlock(&set->tag_list_lock); 2146 } 2147 2148 /* 2149 * It is the actual release handler for mq, but we do it from 2150 * request queue's release handler for avoiding use-after-free 2151 * and headache because q->mq_kobj shouldn't have been introduced, 2152 * but we can't group ctx/kctx kobj without it. 2153 */ 2154 void blk_mq_release(struct request_queue *q) 2155 { 2156 struct blk_mq_hw_ctx *hctx; 2157 unsigned int i; 2158 2159 blk_mq_sched_teardown(q); 2160 2161 /* hctx kobj stays in hctx */ 2162 queue_for_each_hw_ctx(q, hctx, i) { 2163 if (!hctx) 2164 continue; 2165 kfree(hctx->ctxs); 2166 kfree(hctx); 2167 } 2168 2169 q->mq_map = NULL; 2170 2171 kfree(q->queue_hw_ctx); 2172 2173 /* ctx kobj stays in queue_ctx */ 2174 free_percpu(q->queue_ctx); 2175 } 2176 2177 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) 2178 { 2179 struct request_queue *uninit_q, *q; 2180 2181 uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node); 2182 if (!uninit_q) 2183 return ERR_PTR(-ENOMEM); 2184 2185 q = blk_mq_init_allocated_queue(set, uninit_q); 2186 if (IS_ERR(q)) 2187 blk_cleanup_queue(uninit_q); 2188 2189 return q; 2190 } 2191 EXPORT_SYMBOL(blk_mq_init_queue); 2192 2193 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, 2194 struct request_queue *q) 2195 { 2196 int i, j; 2197 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; 2198 2199 blk_mq_sysfs_unregister(q); 2200 for (i = 0; i < set->nr_hw_queues; i++) { 2201 int node; 2202 2203 if (hctxs[i]) 2204 continue; 2205 2206 node = blk_mq_hw_queue_to_node(q->mq_map, i); 2207 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx), 2208 GFP_KERNEL, node); 2209 if (!hctxs[i]) 2210 break; 2211 2212 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL, 2213 node)) { 2214 kfree(hctxs[i]); 2215 hctxs[i] = NULL; 2216 break; 2217 } 2218 2219 atomic_set(&hctxs[i]->nr_active, 0); 2220 hctxs[i]->numa_node = node; 2221 hctxs[i]->queue_num = i; 2222 2223 if (blk_mq_init_hctx(q, set, hctxs[i], i)) { 2224 free_cpumask_var(hctxs[i]->cpumask); 2225 kfree(hctxs[i]); 2226 hctxs[i] = NULL; 2227 break; 2228 } 2229 blk_mq_hctx_kobj_init(hctxs[i]); 2230 } 2231 for (j = i; j < q->nr_hw_queues; j++) { 2232 struct blk_mq_hw_ctx *hctx = hctxs[j]; 2233 2234 if (hctx) { 2235 if (hctx->tags) 2236 blk_mq_free_map_and_requests(set, j); 2237 blk_mq_exit_hctx(q, set, hctx, j); 2238 free_cpumask_var(hctx->cpumask); 2239 kobject_put(&hctx->kobj); 2240 kfree(hctx->ctxs); 2241 kfree(hctx); 2242 hctxs[j] = NULL; 2243 2244 } 2245 } 2246 q->nr_hw_queues = i; 2247 blk_mq_sysfs_register(q); 2248 } 2249 2250 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 2251 struct request_queue *q) 2252 { 2253 /* mark the queue as mq asap */ 2254 q->mq_ops = set->ops; 2255 2256 q->queue_ctx = alloc_percpu(struct blk_mq_ctx); 2257 if (!q->queue_ctx) 2258 goto err_exit; 2259 2260 q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)), 2261 GFP_KERNEL, set->numa_node); 2262 if (!q->queue_hw_ctx) 2263 goto err_percpu; 2264 2265 q->mq_map = set->mq_map; 2266 2267 blk_mq_realloc_hw_ctxs(set, q); 2268 if (!q->nr_hw_queues) 2269 goto err_hctxs; 2270 2271 INIT_WORK(&q->timeout_work, blk_mq_timeout_work); 2272 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); 2273 2274 q->nr_queues = nr_cpu_ids; 2275 2276 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; 2277 2278 if (!(set->flags & BLK_MQ_F_SG_MERGE)) 2279 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE; 2280 2281 q->sg_reserved_size = INT_MAX; 2282 2283 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); 2284 INIT_LIST_HEAD(&q->requeue_list); 2285 spin_lock_init(&q->requeue_lock); 2286 2287 if (q->nr_hw_queues > 1) 2288 blk_queue_make_request(q, blk_mq_make_request); 2289 else 2290 blk_queue_make_request(q, blk_sq_make_request); 2291 2292 /* 2293 * Do this after blk_queue_make_request() overrides it... 2294 */ 2295 q->nr_requests = set->queue_depth; 2296 2297 /* 2298 * Default to classic polling 2299 */ 2300 q->poll_nsec = -1; 2301 2302 if (set->ops->complete) 2303 blk_queue_softirq_done(q, set->ops->complete); 2304 2305 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 2306 2307 get_online_cpus(); 2308 mutex_lock(&all_q_mutex); 2309 2310 list_add_tail(&q->all_q_node, &all_q_list); 2311 blk_mq_add_queue_tag_set(set, q); 2312 blk_mq_map_swqueue(q, cpu_online_mask); 2313 2314 mutex_unlock(&all_q_mutex); 2315 put_online_cpus(); 2316 2317 if (!(set->flags & BLK_MQ_F_NO_SCHED)) { 2318 int ret; 2319 2320 ret = blk_mq_sched_init(q); 2321 if (ret) 2322 return ERR_PTR(ret); 2323 } 2324 2325 return q; 2326 2327 err_hctxs: 2328 kfree(q->queue_hw_ctx); 2329 err_percpu: 2330 free_percpu(q->queue_ctx); 2331 err_exit: 2332 q->mq_ops = NULL; 2333 return ERR_PTR(-ENOMEM); 2334 } 2335 EXPORT_SYMBOL(blk_mq_init_allocated_queue); 2336 2337 void blk_mq_free_queue(struct request_queue *q) 2338 { 2339 struct blk_mq_tag_set *set = q->tag_set; 2340 2341 mutex_lock(&all_q_mutex); 2342 list_del_init(&q->all_q_node); 2343 mutex_unlock(&all_q_mutex); 2344 2345 wbt_exit(q); 2346 2347 blk_mq_del_queue_tag_set(q); 2348 2349 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); 2350 blk_mq_free_hw_queues(q, set); 2351 } 2352 2353 /* Basically redo blk_mq_init_queue with queue frozen */ 2354 static void blk_mq_queue_reinit(struct request_queue *q, 2355 const struct cpumask *online_mask) 2356 { 2357 WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); 2358 2359 blk_mq_sysfs_unregister(q); 2360 2361 /* 2362 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe 2363 * we should change hctx numa_node according to new topology (this 2364 * involves free and re-allocate memory, worthy doing?) 2365 */ 2366 2367 blk_mq_map_swqueue(q, online_mask); 2368 2369 blk_mq_sysfs_register(q); 2370 } 2371 2372 /* 2373 * New online cpumask which is going to be set in this hotplug event. 2374 * Declare this cpumasks as global as cpu-hotplug operation is invoked 2375 * one-by-one and dynamically allocating this could result in a failure. 2376 */ 2377 static struct cpumask cpuhp_online_new; 2378 2379 static void blk_mq_queue_reinit_work(void) 2380 { 2381 struct request_queue *q; 2382 2383 mutex_lock(&all_q_mutex); 2384 /* 2385 * We need to freeze and reinit all existing queues. Freezing 2386 * involves synchronous wait for an RCU grace period and doing it 2387 * one by one may take a long time. Start freezing all queues in 2388 * one swoop and then wait for the completions so that freezing can 2389 * take place in parallel. 2390 */ 2391 list_for_each_entry(q, &all_q_list, all_q_node) 2392 blk_mq_freeze_queue_start(q); 2393 list_for_each_entry(q, &all_q_list, all_q_node) 2394 blk_mq_freeze_queue_wait(q); 2395 2396 list_for_each_entry(q, &all_q_list, all_q_node) 2397 blk_mq_queue_reinit(q, &cpuhp_online_new); 2398 2399 list_for_each_entry(q, &all_q_list, all_q_node) 2400 blk_mq_unfreeze_queue(q); 2401 2402 mutex_unlock(&all_q_mutex); 2403 } 2404 2405 static int blk_mq_queue_reinit_dead(unsigned int cpu) 2406 { 2407 cpumask_copy(&cpuhp_online_new, cpu_online_mask); 2408 blk_mq_queue_reinit_work(); 2409 return 0; 2410 } 2411 2412 /* 2413 * Before hotadded cpu starts handling requests, new mappings must be 2414 * established. Otherwise, these requests in hw queue might never be 2415 * dispatched. 2416 * 2417 * For example, there is a single hw queue (hctx) and two CPU queues (ctx0 2418 * for CPU0, and ctx1 for CPU1). 2419 * 2420 * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list 2421 * and set bit0 in pending bitmap as ctx1->index_hw is still zero. 2422 * 2423 * And then while running hw queue, blk_mq_flush_busy_ctxs() finds bit0 is set 2424 * in pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list. 2425 * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list is 2426 * ignored. 2427 */ 2428 static int blk_mq_queue_reinit_prepare(unsigned int cpu) 2429 { 2430 cpumask_copy(&cpuhp_online_new, cpu_online_mask); 2431 cpumask_set_cpu(cpu, &cpuhp_online_new); 2432 blk_mq_queue_reinit_work(); 2433 return 0; 2434 } 2435 2436 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 2437 { 2438 int i; 2439 2440 for (i = 0; i < set->nr_hw_queues; i++) 2441 if (!__blk_mq_alloc_rq_map(set, i)) 2442 goto out_unwind; 2443 2444 return 0; 2445 2446 out_unwind: 2447 while (--i >= 0) 2448 blk_mq_free_rq_map(set->tags[i]); 2449 2450 return -ENOMEM; 2451 } 2452 2453 /* 2454 * Allocate the request maps associated with this tag_set. Note that this 2455 * may reduce the depth asked for, if memory is tight. set->queue_depth 2456 * will be updated to reflect the allocated depth. 2457 */ 2458 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 2459 { 2460 unsigned int depth; 2461 int err; 2462 2463 depth = set->queue_depth; 2464 do { 2465 err = __blk_mq_alloc_rq_maps(set); 2466 if (!err) 2467 break; 2468 2469 set->queue_depth >>= 1; 2470 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { 2471 err = -ENOMEM; 2472 break; 2473 } 2474 } while (set->queue_depth); 2475 2476 if (!set->queue_depth || err) { 2477 pr_err("blk-mq: failed to allocate request map\n"); 2478 return -ENOMEM; 2479 } 2480 2481 if (depth != set->queue_depth) 2482 pr_info("blk-mq: reduced tag depth (%u -> %u)\n", 2483 depth, set->queue_depth); 2484 2485 return 0; 2486 } 2487 2488 /* 2489 * Alloc a tag set to be associated with one or more request queues. 2490 * May fail with EINVAL for various error conditions. May adjust the 2491 * requested depth down, if if it too large. In that case, the set 2492 * value will be stored in set->queue_depth. 2493 */ 2494 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) 2495 { 2496 int ret; 2497 2498 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS); 2499 2500 if (!set->nr_hw_queues) 2501 return -EINVAL; 2502 if (!set->queue_depth) 2503 return -EINVAL; 2504 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) 2505 return -EINVAL; 2506 2507 if (!set->ops->queue_rq) 2508 return -EINVAL; 2509 2510 if (set->queue_depth > BLK_MQ_MAX_DEPTH) { 2511 pr_info("blk-mq: reduced tag depth to %u\n", 2512 BLK_MQ_MAX_DEPTH); 2513 set->queue_depth = BLK_MQ_MAX_DEPTH; 2514 } 2515 2516 /* 2517 * If a crashdump is active, then we are potentially in a very 2518 * memory constrained environment. Limit us to 1 queue and 2519 * 64 tags to prevent using too much memory. 2520 */ 2521 if (is_kdump_kernel()) { 2522 set->nr_hw_queues = 1; 2523 set->queue_depth = min(64U, set->queue_depth); 2524 } 2525 /* 2526 * There is no use for more h/w queues than cpus. 2527 */ 2528 if (set->nr_hw_queues > nr_cpu_ids) 2529 set->nr_hw_queues = nr_cpu_ids; 2530 2531 set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *), 2532 GFP_KERNEL, set->numa_node); 2533 if (!set->tags) 2534 return -ENOMEM; 2535 2536 ret = -ENOMEM; 2537 set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids, 2538 GFP_KERNEL, set->numa_node); 2539 if (!set->mq_map) 2540 goto out_free_tags; 2541 2542 if (set->ops->map_queues) 2543 ret = set->ops->map_queues(set); 2544 else 2545 ret = blk_mq_map_queues(set); 2546 if (ret) 2547 goto out_free_mq_map; 2548 2549 ret = blk_mq_alloc_rq_maps(set); 2550 if (ret) 2551 goto out_free_mq_map; 2552 2553 mutex_init(&set->tag_list_lock); 2554 INIT_LIST_HEAD(&set->tag_list); 2555 2556 return 0; 2557 2558 out_free_mq_map: 2559 kfree(set->mq_map); 2560 set->mq_map = NULL; 2561 out_free_tags: 2562 kfree(set->tags); 2563 set->tags = NULL; 2564 return ret; 2565 } 2566 EXPORT_SYMBOL(blk_mq_alloc_tag_set); 2567 2568 void blk_mq_free_tag_set(struct blk_mq_tag_set *set) 2569 { 2570 int i; 2571 2572 for (i = 0; i < nr_cpu_ids; i++) 2573 blk_mq_free_map_and_requests(set, i); 2574 2575 kfree(set->mq_map); 2576 set->mq_map = NULL; 2577 2578 kfree(set->tags); 2579 set->tags = NULL; 2580 } 2581 EXPORT_SYMBOL(blk_mq_free_tag_set); 2582 2583 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) 2584 { 2585 struct blk_mq_tag_set *set = q->tag_set; 2586 struct blk_mq_hw_ctx *hctx; 2587 int i, ret; 2588 2589 if (!set) 2590 return -EINVAL; 2591 2592 blk_mq_freeze_queue(q); 2593 blk_mq_quiesce_queue(q); 2594 2595 ret = 0; 2596 queue_for_each_hw_ctx(q, hctx, i) { 2597 if (!hctx->tags) 2598 continue; 2599 /* 2600 * If we're using an MQ scheduler, just update the scheduler 2601 * queue depth. This is similar to what the old code would do. 2602 */ 2603 if (!hctx->sched_tags) { 2604 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, 2605 min(nr, set->queue_depth), 2606 false); 2607 } else { 2608 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, 2609 nr, true); 2610 } 2611 if (ret) 2612 break; 2613 } 2614 2615 if (!ret) 2616 q->nr_requests = nr; 2617 2618 blk_mq_unfreeze_queue(q); 2619 blk_mq_start_stopped_hw_queues(q, true); 2620 2621 return ret; 2622 } 2623 2624 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) 2625 { 2626 struct request_queue *q; 2627 2628 if (nr_hw_queues > nr_cpu_ids) 2629 nr_hw_queues = nr_cpu_ids; 2630 if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues) 2631 return; 2632 2633 list_for_each_entry(q, &set->tag_list, tag_set_list) 2634 blk_mq_freeze_queue(q); 2635 2636 set->nr_hw_queues = nr_hw_queues; 2637 list_for_each_entry(q, &set->tag_list, tag_set_list) { 2638 blk_mq_realloc_hw_ctxs(set, q); 2639 2640 if (q->nr_hw_queues > 1) 2641 blk_queue_make_request(q, blk_mq_make_request); 2642 else 2643 blk_queue_make_request(q, blk_sq_make_request); 2644 2645 blk_mq_queue_reinit(q, cpu_online_mask); 2646 } 2647 2648 list_for_each_entry(q, &set->tag_list, tag_set_list) 2649 blk_mq_unfreeze_queue(q); 2650 } 2651 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); 2652 2653 static unsigned long blk_mq_poll_nsecs(struct request_queue *q, 2654 struct blk_mq_hw_ctx *hctx, 2655 struct request *rq) 2656 { 2657 struct blk_rq_stat stat[2]; 2658 unsigned long ret = 0; 2659 2660 /* 2661 * If stats collection isn't on, don't sleep but turn it on for 2662 * future users 2663 */ 2664 if (!blk_stat_enable(q)) 2665 return 0; 2666 2667 /* 2668 * We don't have to do this once per IO, should optimize this 2669 * to just use the current window of stats until it changes 2670 */ 2671 memset(&stat, 0, sizeof(stat)); 2672 blk_hctx_stat_get(hctx, stat); 2673 2674 /* 2675 * As an optimistic guess, use half of the mean service time 2676 * for this type of request. We can (and should) make this smarter. 2677 * For instance, if the completion latencies are tight, we can 2678 * get closer than just half the mean. This is especially 2679 * important on devices where the completion latencies are longer 2680 * than ~10 usec. 2681 */ 2682 if (req_op(rq) == REQ_OP_READ && stat[BLK_STAT_READ].nr_samples) 2683 ret = (stat[BLK_STAT_READ].mean + 1) / 2; 2684 else if (req_op(rq) == REQ_OP_WRITE && stat[BLK_STAT_WRITE].nr_samples) 2685 ret = (stat[BLK_STAT_WRITE].mean + 1) / 2; 2686 2687 return ret; 2688 } 2689 2690 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, 2691 struct blk_mq_hw_ctx *hctx, 2692 struct request *rq) 2693 { 2694 struct hrtimer_sleeper hs; 2695 enum hrtimer_mode mode; 2696 unsigned int nsecs; 2697 ktime_t kt; 2698 2699 if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags)) 2700 return false; 2701 2702 /* 2703 * poll_nsec can be: 2704 * 2705 * -1: don't ever hybrid sleep 2706 * 0: use half of prev avg 2707 * >0: use this specific value 2708 */ 2709 if (q->poll_nsec == -1) 2710 return false; 2711 else if (q->poll_nsec > 0) 2712 nsecs = q->poll_nsec; 2713 else 2714 nsecs = blk_mq_poll_nsecs(q, hctx, rq); 2715 2716 if (!nsecs) 2717 return false; 2718 2719 set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags); 2720 2721 /* 2722 * This will be replaced with the stats tracking code, using 2723 * 'avg_completion_time / 2' as the pre-sleep target. 2724 */ 2725 kt = nsecs; 2726 2727 mode = HRTIMER_MODE_REL; 2728 hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode); 2729 hrtimer_set_expires(&hs.timer, kt); 2730 2731 hrtimer_init_sleeper(&hs, current); 2732 do { 2733 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) 2734 break; 2735 set_current_state(TASK_UNINTERRUPTIBLE); 2736 hrtimer_start_expires(&hs.timer, mode); 2737 if (hs.task) 2738 io_schedule(); 2739 hrtimer_cancel(&hs.timer); 2740 mode = HRTIMER_MODE_ABS; 2741 } while (hs.task && !signal_pending(current)); 2742 2743 __set_current_state(TASK_RUNNING); 2744 destroy_hrtimer_on_stack(&hs.timer); 2745 return true; 2746 } 2747 2748 static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq) 2749 { 2750 struct request_queue *q = hctx->queue; 2751 long state; 2752 2753 /* 2754 * If we sleep, have the caller restart the poll loop to reset 2755 * the state. Like for the other success return cases, the 2756 * caller is responsible for checking if the IO completed. If 2757 * the IO isn't complete, we'll get called again and will go 2758 * straight to the busy poll loop. 2759 */ 2760 if (blk_mq_poll_hybrid_sleep(q, hctx, rq)) 2761 return true; 2762 2763 hctx->poll_considered++; 2764 2765 state = current->state; 2766 while (!need_resched()) { 2767 int ret; 2768 2769 hctx->poll_invoked++; 2770 2771 ret = q->mq_ops->poll(hctx, rq->tag); 2772 if (ret > 0) { 2773 hctx->poll_success++; 2774 set_current_state(TASK_RUNNING); 2775 return true; 2776 } 2777 2778 if (signal_pending_state(state, current)) 2779 set_current_state(TASK_RUNNING); 2780 2781 if (current->state == TASK_RUNNING) 2782 return true; 2783 if (ret < 0) 2784 break; 2785 cpu_relax(); 2786 } 2787 2788 return false; 2789 } 2790 2791 bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie) 2792 { 2793 struct blk_mq_hw_ctx *hctx; 2794 struct blk_plug *plug; 2795 struct request *rq; 2796 2797 if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) || 2798 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) 2799 return false; 2800 2801 plug = current->plug; 2802 if (plug) 2803 blk_flush_plug_list(plug, false); 2804 2805 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; 2806 if (!blk_qc_t_is_internal(cookie)) 2807 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); 2808 else 2809 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); 2810 2811 return __blk_mq_poll(hctx, rq); 2812 } 2813 EXPORT_SYMBOL_GPL(blk_mq_poll); 2814 2815 void blk_mq_disable_hotplug(void) 2816 { 2817 mutex_lock(&all_q_mutex); 2818 } 2819 2820 void blk_mq_enable_hotplug(void) 2821 { 2822 mutex_unlock(&all_q_mutex); 2823 } 2824 2825 static int __init blk_mq_init(void) 2826 { 2827 blk_mq_debugfs_init(); 2828 2829 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, 2830 blk_mq_hctx_notify_dead); 2831 2832 cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare", 2833 blk_mq_queue_reinit_prepare, 2834 blk_mq_queue_reinit_dead); 2835 return 0; 2836 } 2837 subsys_initcall(blk_mq_init); 2838