1 /* 2 * Block multiqueue core code 3 * 4 * Copyright (C) 2013-2014 Jens Axboe 5 * Copyright (C) 2013-2014 Christoph Hellwig 6 */ 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/backing-dev.h> 10 #include <linux/bio.h> 11 #include <linux/blkdev.h> 12 #include <linux/kmemleak.h> 13 #include <linux/mm.h> 14 #include <linux/init.h> 15 #include <linux/slab.h> 16 #include <linux/workqueue.h> 17 #include <linux/smp.h> 18 #include <linux/llist.h> 19 #include <linux/list_sort.h> 20 #include <linux/cpu.h> 21 #include <linux/cache.h> 22 #include <linux/sched/sysctl.h> 23 #include <linux/sched/topology.h> 24 #include <linux/sched/signal.h> 25 #include <linux/delay.h> 26 #include <linux/crash_dump.h> 27 #include <linux/prefetch.h> 28 29 #include <trace/events/block.h> 30 31 #include <linux/blk-mq.h> 32 #include "blk.h" 33 #include "blk-mq.h" 34 #include "blk-mq-debugfs.h" 35 #include "blk-mq-tag.h" 36 #include "blk-stat.h" 37 #include "blk-wbt.h" 38 #include "blk-mq-sched.h" 39 40 static void blk_mq_poll_stats_start(struct request_queue *q); 41 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); 42 43 static int blk_mq_poll_stats_bkt(const struct request *rq) 44 { 45 int ddir, bytes, bucket; 46 47 ddir = rq_data_dir(rq); 48 bytes = blk_rq_bytes(rq); 49 50 bucket = ddir + 2*(ilog2(bytes) - 9); 51 52 if (bucket < 0) 53 return -1; 54 else if (bucket >= BLK_MQ_POLL_STATS_BKTS) 55 return ddir + BLK_MQ_POLL_STATS_BKTS - 2; 56 57 return bucket; 58 } 59 60 /* 61 * Check if any of the ctx's have pending work in this hardware queue 62 */ 63 bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) 64 { 65 return sbitmap_any_bit_set(&hctx->ctx_map) || 66 !list_empty_careful(&hctx->dispatch) || 67 blk_mq_sched_has_work(hctx); 68 } 69 70 /* 71 * Mark this ctx as having pending work in this hardware queue 72 */ 73 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, 74 struct blk_mq_ctx *ctx) 75 { 76 if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw)) 77 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw); 78 } 79 80 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, 81 struct blk_mq_ctx *ctx) 82 { 83 sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw); 84 } 85 86 struct mq_inflight { 87 struct hd_struct *part; 88 unsigned int *inflight; 89 }; 90 91 static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, 92 struct request *rq, void *priv, 93 bool reserved) 94 { 95 struct mq_inflight *mi = priv; 96 97 if (test_bit(REQ_ATOM_STARTED, &rq->atomic_flags) && 98 !test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) { 99 /* 100 * index[0] counts the specific partition that was asked 101 * for. index[1] counts the ones that are active on the 102 * whole device, so increment that if mi->part is indeed 103 * a partition, and not a whole device. 104 */ 105 if (rq->part == mi->part) 106 mi->inflight[0]++; 107 if (mi->part->partno) 108 mi->inflight[1]++; 109 } 110 } 111 112 void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, 113 unsigned int inflight[2]) 114 { 115 struct mq_inflight mi = { .part = part, .inflight = inflight, }; 116 117 inflight[0] = inflight[1] = 0; 118 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 119 } 120 121 void blk_freeze_queue_start(struct request_queue *q) 122 { 123 int freeze_depth; 124 125 freeze_depth = atomic_inc_return(&q->mq_freeze_depth); 126 if (freeze_depth == 1) { 127 percpu_ref_kill(&q->q_usage_counter); 128 blk_mq_run_hw_queues(q, false); 129 } 130 } 131 EXPORT_SYMBOL_GPL(blk_freeze_queue_start); 132 133 void blk_mq_freeze_queue_wait(struct request_queue *q) 134 { 135 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); 136 } 137 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait); 138 139 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, 140 unsigned long timeout) 141 { 142 return wait_event_timeout(q->mq_freeze_wq, 143 percpu_ref_is_zero(&q->q_usage_counter), 144 timeout); 145 } 146 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout); 147 148 /* 149 * Guarantee no request is in use, so we can change any data structure of 150 * the queue afterward. 151 */ 152 void blk_freeze_queue(struct request_queue *q) 153 { 154 /* 155 * In the !blk_mq case we are only calling this to kill the 156 * q_usage_counter, otherwise this increases the freeze depth 157 * and waits for it to return to zero. For this reason there is 158 * no blk_unfreeze_queue(), and blk_freeze_queue() is not 159 * exported to drivers as the only user for unfreeze is blk_mq. 160 */ 161 blk_freeze_queue_start(q); 162 blk_mq_freeze_queue_wait(q); 163 } 164 165 void blk_mq_freeze_queue(struct request_queue *q) 166 { 167 /* 168 * ...just an alias to keep freeze and unfreeze actions balanced 169 * in the blk_mq_* namespace 170 */ 171 blk_freeze_queue(q); 172 } 173 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); 174 175 void blk_mq_unfreeze_queue(struct request_queue *q) 176 { 177 int freeze_depth; 178 179 freeze_depth = atomic_dec_return(&q->mq_freeze_depth); 180 WARN_ON_ONCE(freeze_depth < 0); 181 if (!freeze_depth) { 182 percpu_ref_reinit(&q->q_usage_counter); 183 wake_up_all(&q->mq_freeze_wq); 184 } 185 } 186 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); 187 188 /* 189 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the 190 * mpt3sas driver such that this function can be removed. 191 */ 192 void blk_mq_quiesce_queue_nowait(struct request_queue *q) 193 { 194 unsigned long flags; 195 196 spin_lock_irqsave(q->queue_lock, flags); 197 queue_flag_set(QUEUE_FLAG_QUIESCED, q); 198 spin_unlock_irqrestore(q->queue_lock, flags); 199 } 200 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); 201 202 /** 203 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished 204 * @q: request queue. 205 * 206 * Note: this function does not prevent that the struct request end_io() 207 * callback function is invoked. Once this function is returned, we make 208 * sure no dispatch can happen until the queue is unquiesced via 209 * blk_mq_unquiesce_queue(). 210 */ 211 void blk_mq_quiesce_queue(struct request_queue *q) 212 { 213 struct blk_mq_hw_ctx *hctx; 214 unsigned int i; 215 bool rcu = false; 216 217 blk_mq_quiesce_queue_nowait(q); 218 219 queue_for_each_hw_ctx(q, hctx, i) { 220 if (hctx->flags & BLK_MQ_F_BLOCKING) 221 synchronize_srcu(hctx->queue_rq_srcu); 222 else 223 rcu = true; 224 } 225 if (rcu) 226 synchronize_rcu(); 227 } 228 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); 229 230 /* 231 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue() 232 * @q: request queue. 233 * 234 * This function recovers queue into the state before quiescing 235 * which is done by blk_mq_quiesce_queue. 236 */ 237 void blk_mq_unquiesce_queue(struct request_queue *q) 238 { 239 unsigned long flags; 240 241 spin_lock_irqsave(q->queue_lock, flags); 242 queue_flag_clear(QUEUE_FLAG_QUIESCED, q); 243 spin_unlock_irqrestore(q->queue_lock, flags); 244 245 /* dispatch requests which are inserted during quiescing */ 246 blk_mq_run_hw_queues(q, true); 247 } 248 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue); 249 250 void blk_mq_wake_waiters(struct request_queue *q) 251 { 252 struct blk_mq_hw_ctx *hctx; 253 unsigned int i; 254 255 queue_for_each_hw_ctx(q, hctx, i) 256 if (blk_mq_hw_queue_mapped(hctx)) 257 blk_mq_tag_wakeup_all(hctx->tags, true); 258 259 /* 260 * If we are called because the queue has now been marked as 261 * dying, we need to ensure that processes currently waiting on 262 * the queue are notified as well. 263 */ 264 wake_up_all(&q->mq_freeze_wq); 265 } 266 267 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) 268 { 269 return blk_mq_has_free_tags(hctx->tags); 270 } 271 EXPORT_SYMBOL(blk_mq_can_queue); 272 273 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, 274 unsigned int tag, unsigned int op) 275 { 276 struct blk_mq_tags *tags = blk_mq_tags_from_data(data); 277 struct request *rq = tags->static_rqs[tag]; 278 279 rq->rq_flags = 0; 280 281 if (data->flags & BLK_MQ_REQ_INTERNAL) { 282 rq->tag = -1; 283 rq->internal_tag = tag; 284 } else { 285 if (blk_mq_tag_busy(data->hctx)) { 286 rq->rq_flags = RQF_MQ_INFLIGHT; 287 atomic_inc(&data->hctx->nr_active); 288 } 289 rq->tag = tag; 290 rq->internal_tag = -1; 291 data->hctx->tags->rqs[rq->tag] = rq; 292 } 293 294 INIT_LIST_HEAD(&rq->queuelist); 295 /* csd/requeue_work/fifo_time is initialized before use */ 296 rq->q = data->q; 297 rq->mq_ctx = data->ctx; 298 rq->cmd_flags = op; 299 if (blk_queue_io_stat(data->q)) 300 rq->rq_flags |= RQF_IO_STAT; 301 /* do not touch atomic flags, it needs atomic ops against the timer */ 302 rq->cpu = -1; 303 INIT_HLIST_NODE(&rq->hash); 304 RB_CLEAR_NODE(&rq->rb_node); 305 rq->rq_disk = NULL; 306 rq->part = NULL; 307 rq->start_time = jiffies; 308 #ifdef CONFIG_BLK_CGROUP 309 rq->rl = NULL; 310 set_start_time_ns(rq); 311 rq->io_start_time_ns = 0; 312 #endif 313 rq->nr_phys_segments = 0; 314 #if defined(CONFIG_BLK_DEV_INTEGRITY) 315 rq->nr_integrity_segments = 0; 316 #endif 317 rq->special = NULL; 318 /* tag was already set */ 319 rq->extra_len = 0; 320 321 INIT_LIST_HEAD(&rq->timeout_list); 322 rq->timeout = 0; 323 324 rq->end_io = NULL; 325 rq->end_io_data = NULL; 326 rq->next_rq = NULL; 327 328 data->ctx->rq_dispatched[op_is_sync(op)]++; 329 return rq; 330 } 331 332 static struct request *blk_mq_get_request(struct request_queue *q, 333 struct bio *bio, unsigned int op, 334 struct blk_mq_alloc_data *data) 335 { 336 struct elevator_queue *e = q->elevator; 337 struct request *rq; 338 unsigned int tag; 339 struct blk_mq_ctx *local_ctx = NULL; 340 341 blk_queue_enter_live(q); 342 data->q = q; 343 if (likely(!data->ctx)) 344 data->ctx = local_ctx = blk_mq_get_ctx(q); 345 if (likely(!data->hctx)) 346 data->hctx = blk_mq_map_queue(q, data->ctx->cpu); 347 if (op & REQ_NOWAIT) 348 data->flags |= BLK_MQ_REQ_NOWAIT; 349 350 if (e) { 351 data->flags |= BLK_MQ_REQ_INTERNAL; 352 353 /* 354 * Flush requests are special and go directly to the 355 * dispatch list. 356 */ 357 if (!op_is_flush(op) && e->type->ops.mq.limit_depth) 358 e->type->ops.mq.limit_depth(op, data); 359 } 360 361 tag = blk_mq_get_tag(data); 362 if (tag == BLK_MQ_TAG_FAIL) { 363 if (local_ctx) { 364 blk_mq_put_ctx(local_ctx); 365 data->ctx = NULL; 366 } 367 blk_queue_exit(q); 368 return NULL; 369 } 370 371 rq = blk_mq_rq_ctx_init(data, tag, op); 372 if (!op_is_flush(op)) { 373 rq->elv.icq = NULL; 374 if (e && e->type->ops.mq.prepare_request) { 375 if (e->type->icq_cache && rq_ioc(bio)) 376 blk_mq_sched_assign_ioc(rq, bio); 377 378 e->type->ops.mq.prepare_request(rq, bio); 379 rq->rq_flags |= RQF_ELVPRIV; 380 } 381 } 382 data->hctx->queued++; 383 return rq; 384 } 385 386 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, 387 unsigned int flags) 388 { 389 struct blk_mq_alloc_data alloc_data = { .flags = flags }; 390 struct request *rq; 391 int ret; 392 393 ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT); 394 if (ret) 395 return ERR_PTR(ret); 396 397 rq = blk_mq_get_request(q, NULL, op, &alloc_data); 398 blk_queue_exit(q); 399 400 if (!rq) 401 return ERR_PTR(-EWOULDBLOCK); 402 403 blk_mq_put_ctx(alloc_data.ctx); 404 405 rq->__data_len = 0; 406 rq->__sector = (sector_t) -1; 407 rq->bio = rq->biotail = NULL; 408 return rq; 409 } 410 EXPORT_SYMBOL(blk_mq_alloc_request); 411 412 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, 413 unsigned int op, unsigned int flags, unsigned int hctx_idx) 414 { 415 struct blk_mq_alloc_data alloc_data = { .flags = flags }; 416 struct request *rq; 417 unsigned int cpu; 418 int ret; 419 420 /* 421 * If the tag allocator sleeps we could get an allocation for a 422 * different hardware context. No need to complicate the low level 423 * allocator for this for the rare use case of a command tied to 424 * a specific queue. 425 */ 426 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT))) 427 return ERR_PTR(-EINVAL); 428 429 if (hctx_idx >= q->nr_hw_queues) 430 return ERR_PTR(-EIO); 431 432 ret = blk_queue_enter(q, true); 433 if (ret) 434 return ERR_PTR(ret); 435 436 /* 437 * Check if the hardware context is actually mapped to anything. 438 * If not tell the caller that it should skip this queue. 439 */ 440 alloc_data.hctx = q->queue_hw_ctx[hctx_idx]; 441 if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) { 442 blk_queue_exit(q); 443 return ERR_PTR(-EXDEV); 444 } 445 cpu = cpumask_first(alloc_data.hctx->cpumask); 446 alloc_data.ctx = __blk_mq_get_ctx(q, cpu); 447 448 rq = blk_mq_get_request(q, NULL, op, &alloc_data); 449 blk_queue_exit(q); 450 451 if (!rq) 452 return ERR_PTR(-EWOULDBLOCK); 453 454 return rq; 455 } 456 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 457 458 void blk_mq_free_request(struct request *rq) 459 { 460 struct request_queue *q = rq->q; 461 struct elevator_queue *e = q->elevator; 462 struct blk_mq_ctx *ctx = rq->mq_ctx; 463 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); 464 const int sched_tag = rq->internal_tag; 465 466 if (rq->rq_flags & RQF_ELVPRIV) { 467 if (e && e->type->ops.mq.finish_request) 468 e->type->ops.mq.finish_request(rq); 469 if (rq->elv.icq) { 470 put_io_context(rq->elv.icq->ioc); 471 rq->elv.icq = NULL; 472 } 473 } 474 475 ctx->rq_completed[rq_is_sync(rq)]++; 476 if (rq->rq_flags & RQF_MQ_INFLIGHT) 477 atomic_dec(&hctx->nr_active); 478 479 wbt_done(q->rq_wb, &rq->issue_stat); 480 481 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 482 clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags); 483 if (rq->tag != -1) 484 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); 485 if (sched_tag != -1) 486 blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag); 487 blk_mq_sched_restart(hctx); 488 blk_queue_exit(q); 489 } 490 EXPORT_SYMBOL_GPL(blk_mq_free_request); 491 492 inline void __blk_mq_end_request(struct request *rq, blk_status_t error) 493 { 494 blk_account_io_done(rq); 495 496 if (rq->end_io) { 497 wbt_done(rq->q->rq_wb, &rq->issue_stat); 498 rq->end_io(rq, error); 499 } else { 500 if (unlikely(blk_bidi_rq(rq))) 501 blk_mq_free_request(rq->next_rq); 502 blk_mq_free_request(rq); 503 } 504 } 505 EXPORT_SYMBOL(__blk_mq_end_request); 506 507 void blk_mq_end_request(struct request *rq, blk_status_t error) 508 { 509 if (blk_update_request(rq, error, blk_rq_bytes(rq))) 510 BUG(); 511 __blk_mq_end_request(rq, error); 512 } 513 EXPORT_SYMBOL(blk_mq_end_request); 514 515 static void __blk_mq_complete_request_remote(void *data) 516 { 517 struct request *rq = data; 518 519 rq->q->softirq_done_fn(rq); 520 } 521 522 static void __blk_mq_complete_request(struct request *rq) 523 { 524 struct blk_mq_ctx *ctx = rq->mq_ctx; 525 bool shared = false; 526 int cpu; 527 528 if (rq->internal_tag != -1) 529 blk_mq_sched_completed_request(rq); 530 if (rq->rq_flags & RQF_STATS) { 531 blk_mq_poll_stats_start(rq->q); 532 blk_stat_add(rq); 533 } 534 535 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) { 536 rq->q->softirq_done_fn(rq); 537 return; 538 } 539 540 cpu = get_cpu(); 541 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) 542 shared = cpus_share_cache(cpu, ctx->cpu); 543 544 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { 545 rq->csd.func = __blk_mq_complete_request_remote; 546 rq->csd.info = rq; 547 rq->csd.flags = 0; 548 smp_call_function_single_async(ctx->cpu, &rq->csd); 549 } else { 550 rq->q->softirq_done_fn(rq); 551 } 552 put_cpu(); 553 } 554 555 /** 556 * blk_mq_complete_request - end I/O on a request 557 * @rq: the request being processed 558 * 559 * Description: 560 * Ends all I/O on a request. It does not handle partial completions. 561 * The actual completion happens out-of-order, through a IPI handler. 562 **/ 563 void blk_mq_complete_request(struct request *rq) 564 { 565 struct request_queue *q = rq->q; 566 567 if (unlikely(blk_should_fake_timeout(q))) 568 return; 569 if (!blk_mark_rq_complete(rq)) 570 __blk_mq_complete_request(rq); 571 } 572 EXPORT_SYMBOL(blk_mq_complete_request); 573 574 int blk_mq_request_started(struct request *rq) 575 { 576 return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 577 } 578 EXPORT_SYMBOL_GPL(blk_mq_request_started); 579 580 void blk_mq_start_request(struct request *rq) 581 { 582 struct request_queue *q = rq->q; 583 584 blk_mq_sched_started_request(rq); 585 586 trace_block_rq_issue(q, rq); 587 588 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { 589 blk_stat_set_issue(&rq->issue_stat, blk_rq_sectors(rq)); 590 rq->rq_flags |= RQF_STATS; 591 wbt_issue(q->rq_wb, &rq->issue_stat); 592 } 593 594 blk_add_timer(rq); 595 596 /* 597 * Ensure that ->deadline is visible before set the started 598 * flag and clear the completed flag. 599 */ 600 smp_mb__before_atomic(); 601 602 /* 603 * Mark us as started and clear complete. Complete might have been 604 * set if requeue raced with timeout, which then marked it as 605 * complete. So be sure to clear complete again when we start 606 * the request, otherwise we'll ignore the completion event. 607 */ 608 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) 609 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 610 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) 611 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); 612 613 if (q->dma_drain_size && blk_rq_bytes(rq)) { 614 /* 615 * Make sure space for the drain appears. We know we can do 616 * this because max_hw_segments has been adjusted to be one 617 * fewer than the device can handle. 618 */ 619 rq->nr_phys_segments++; 620 } 621 } 622 EXPORT_SYMBOL(blk_mq_start_request); 623 624 /* 625 * When we reach here because queue is busy, REQ_ATOM_COMPLETE 626 * flag isn't set yet, so there may be race with timeout handler, 627 * but given rq->deadline is just set in .queue_rq() under 628 * this situation, the race won't be possible in reality because 629 * rq->timeout should be set as big enough to cover the window 630 * between blk_mq_start_request() called from .queue_rq() and 631 * clearing REQ_ATOM_STARTED here. 632 */ 633 static void __blk_mq_requeue_request(struct request *rq) 634 { 635 struct request_queue *q = rq->q; 636 637 trace_block_rq_requeue(q, rq); 638 wbt_requeue(q->rq_wb, &rq->issue_stat); 639 blk_mq_sched_requeue_request(rq); 640 641 if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { 642 if (q->dma_drain_size && blk_rq_bytes(rq)) 643 rq->nr_phys_segments--; 644 } 645 } 646 647 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) 648 { 649 __blk_mq_requeue_request(rq); 650 651 BUG_ON(blk_queued_rq(rq)); 652 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); 653 } 654 EXPORT_SYMBOL(blk_mq_requeue_request); 655 656 static void blk_mq_requeue_work(struct work_struct *work) 657 { 658 struct request_queue *q = 659 container_of(work, struct request_queue, requeue_work.work); 660 LIST_HEAD(rq_list); 661 struct request *rq, *next; 662 663 spin_lock_irq(&q->requeue_lock); 664 list_splice_init(&q->requeue_list, &rq_list); 665 spin_unlock_irq(&q->requeue_lock); 666 667 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { 668 if (!(rq->rq_flags & RQF_SOFTBARRIER)) 669 continue; 670 671 rq->rq_flags &= ~RQF_SOFTBARRIER; 672 list_del_init(&rq->queuelist); 673 blk_mq_sched_insert_request(rq, true, false, false, true); 674 } 675 676 while (!list_empty(&rq_list)) { 677 rq = list_entry(rq_list.next, struct request, queuelist); 678 list_del_init(&rq->queuelist); 679 blk_mq_sched_insert_request(rq, false, false, false, true); 680 } 681 682 blk_mq_run_hw_queues(q, false); 683 } 684 685 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, 686 bool kick_requeue_list) 687 { 688 struct request_queue *q = rq->q; 689 unsigned long flags; 690 691 /* 692 * We abuse this flag that is otherwise used by the I/O scheduler to 693 * request head insertation from the workqueue. 694 */ 695 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER); 696 697 spin_lock_irqsave(&q->requeue_lock, flags); 698 if (at_head) { 699 rq->rq_flags |= RQF_SOFTBARRIER; 700 list_add(&rq->queuelist, &q->requeue_list); 701 } else { 702 list_add_tail(&rq->queuelist, &q->requeue_list); 703 } 704 spin_unlock_irqrestore(&q->requeue_lock, flags); 705 706 if (kick_requeue_list) 707 blk_mq_kick_requeue_list(q); 708 } 709 EXPORT_SYMBOL(blk_mq_add_to_requeue_list); 710 711 void blk_mq_kick_requeue_list(struct request_queue *q) 712 { 713 kblockd_schedule_delayed_work(&q->requeue_work, 0); 714 } 715 EXPORT_SYMBOL(blk_mq_kick_requeue_list); 716 717 void blk_mq_delay_kick_requeue_list(struct request_queue *q, 718 unsigned long msecs) 719 { 720 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 721 msecs_to_jiffies(msecs)); 722 } 723 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); 724 725 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) 726 { 727 if (tag < tags->nr_tags) { 728 prefetch(tags->rqs[tag]); 729 return tags->rqs[tag]; 730 } 731 732 return NULL; 733 } 734 EXPORT_SYMBOL(blk_mq_tag_to_rq); 735 736 struct blk_mq_timeout_data { 737 unsigned long next; 738 unsigned int next_set; 739 }; 740 741 void blk_mq_rq_timed_out(struct request *req, bool reserved) 742 { 743 const struct blk_mq_ops *ops = req->q->mq_ops; 744 enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER; 745 746 /* 747 * We know that complete is set at this point. If STARTED isn't set 748 * anymore, then the request isn't active and the "timeout" should 749 * just be ignored. This can happen due to the bitflag ordering. 750 * Timeout first checks if STARTED is set, and if it is, assumes 751 * the request is active. But if we race with completion, then 752 * both flags will get cleared. So check here again, and ignore 753 * a timeout event with a request that isn't active. 754 */ 755 if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags)) 756 return; 757 758 if (ops->timeout) 759 ret = ops->timeout(req, reserved); 760 761 switch (ret) { 762 case BLK_EH_HANDLED: 763 __blk_mq_complete_request(req); 764 break; 765 case BLK_EH_RESET_TIMER: 766 blk_add_timer(req); 767 blk_clear_rq_complete(req); 768 break; 769 case BLK_EH_NOT_HANDLED: 770 break; 771 default: 772 printk(KERN_ERR "block: bad eh return: %d\n", ret); 773 break; 774 } 775 } 776 777 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, 778 struct request *rq, void *priv, bool reserved) 779 { 780 struct blk_mq_timeout_data *data = priv; 781 782 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) 783 return; 784 785 /* 786 * The rq being checked may have been freed and reallocated 787 * out already here, we avoid this race by checking rq->deadline 788 * and REQ_ATOM_COMPLETE flag together: 789 * 790 * - if rq->deadline is observed as new value because of 791 * reusing, the rq won't be timed out because of timing. 792 * - if rq->deadline is observed as previous value, 793 * REQ_ATOM_COMPLETE flag won't be cleared in reuse path 794 * because we put a barrier between setting rq->deadline 795 * and clearing the flag in blk_mq_start_request(), so 796 * this rq won't be timed out too. 797 */ 798 if (time_after_eq(jiffies, rq->deadline)) { 799 if (!blk_mark_rq_complete(rq)) 800 blk_mq_rq_timed_out(rq, reserved); 801 } else if (!data->next_set || time_after(data->next, rq->deadline)) { 802 data->next = rq->deadline; 803 data->next_set = 1; 804 } 805 } 806 807 static void blk_mq_timeout_work(struct work_struct *work) 808 { 809 struct request_queue *q = 810 container_of(work, struct request_queue, timeout_work); 811 struct blk_mq_timeout_data data = { 812 .next = 0, 813 .next_set = 0, 814 }; 815 int i; 816 817 /* A deadlock might occur if a request is stuck requiring a 818 * timeout at the same time a queue freeze is waiting 819 * completion, since the timeout code would not be able to 820 * acquire the queue reference here. 821 * 822 * That's why we don't use blk_queue_enter here; instead, we use 823 * percpu_ref_tryget directly, because we need to be able to 824 * obtain a reference even in the short window between the queue 825 * starting to freeze, by dropping the first reference in 826 * blk_freeze_queue_start, and the moment the last request is 827 * consumed, marked by the instant q_usage_counter reaches 828 * zero. 829 */ 830 if (!percpu_ref_tryget(&q->q_usage_counter)) 831 return; 832 833 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data); 834 835 if (data.next_set) { 836 data.next = blk_rq_timeout(round_jiffies_up(data.next)); 837 mod_timer(&q->timeout, data.next); 838 } else { 839 struct blk_mq_hw_ctx *hctx; 840 841 queue_for_each_hw_ctx(q, hctx, i) { 842 /* the hctx may be unmapped, so check it here */ 843 if (blk_mq_hw_queue_mapped(hctx)) 844 blk_mq_tag_idle(hctx); 845 } 846 } 847 blk_queue_exit(q); 848 } 849 850 struct flush_busy_ctx_data { 851 struct blk_mq_hw_ctx *hctx; 852 struct list_head *list; 853 }; 854 855 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data) 856 { 857 struct flush_busy_ctx_data *flush_data = data; 858 struct blk_mq_hw_ctx *hctx = flush_data->hctx; 859 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 860 861 sbitmap_clear_bit(sb, bitnr); 862 spin_lock(&ctx->lock); 863 list_splice_tail_init(&ctx->rq_list, flush_data->list); 864 spin_unlock(&ctx->lock); 865 return true; 866 } 867 868 /* 869 * Process software queues that have been marked busy, splicing them 870 * to the for-dispatch 871 */ 872 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) 873 { 874 struct flush_busy_ctx_data data = { 875 .hctx = hctx, 876 .list = list, 877 }; 878 879 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data); 880 } 881 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs); 882 883 static inline unsigned int queued_to_index(unsigned int queued) 884 { 885 if (!queued) 886 return 0; 887 888 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1); 889 } 890 891 bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, 892 bool wait) 893 { 894 struct blk_mq_alloc_data data = { 895 .q = rq->q, 896 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), 897 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT, 898 }; 899 900 might_sleep_if(wait); 901 902 if (rq->tag != -1) 903 goto done; 904 905 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag)) 906 data.flags |= BLK_MQ_REQ_RESERVED; 907 908 rq->tag = blk_mq_get_tag(&data); 909 if (rq->tag >= 0) { 910 if (blk_mq_tag_busy(data.hctx)) { 911 rq->rq_flags |= RQF_MQ_INFLIGHT; 912 atomic_inc(&data.hctx->nr_active); 913 } 914 data.hctx->tags->rqs[rq->tag] = rq; 915 } 916 917 done: 918 if (hctx) 919 *hctx = data.hctx; 920 return rq->tag != -1; 921 } 922 923 static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, 924 struct request *rq) 925 { 926 blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag); 927 rq->tag = -1; 928 929 if (rq->rq_flags & RQF_MQ_INFLIGHT) { 930 rq->rq_flags &= ~RQF_MQ_INFLIGHT; 931 atomic_dec(&hctx->nr_active); 932 } 933 } 934 935 static void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx, 936 struct request *rq) 937 { 938 if (rq->tag == -1 || rq->internal_tag == -1) 939 return; 940 941 __blk_mq_put_driver_tag(hctx, rq); 942 } 943 944 static void blk_mq_put_driver_tag(struct request *rq) 945 { 946 struct blk_mq_hw_ctx *hctx; 947 948 if (rq->tag == -1 || rq->internal_tag == -1) 949 return; 950 951 hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); 952 __blk_mq_put_driver_tag(hctx, rq); 953 } 954 955 /* 956 * If we fail getting a driver tag because all the driver tags are already 957 * assigned and on the dispatch list, BUT the first entry does not have a 958 * tag, then we could deadlock. For that case, move entries with assigned 959 * driver tags to the front, leaving the set of tagged requests in the 960 * same order, and the untagged set in the same order. 961 */ 962 static bool reorder_tags_to_front(struct list_head *list) 963 { 964 struct request *rq, *tmp, *first = NULL; 965 966 list_for_each_entry_safe_reverse(rq, tmp, list, queuelist) { 967 if (rq == first) 968 break; 969 if (rq->tag != -1) { 970 list_move(&rq->queuelist, list); 971 if (!first) 972 first = rq; 973 } 974 } 975 976 return first != NULL; 977 } 978 979 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, int flags, 980 void *key) 981 { 982 struct blk_mq_hw_ctx *hctx; 983 984 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); 985 986 list_del(&wait->entry); 987 clear_bit_unlock(BLK_MQ_S_TAG_WAITING, &hctx->state); 988 blk_mq_run_hw_queue(hctx, true); 989 return 1; 990 } 991 992 static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx) 993 { 994 struct sbq_wait_state *ws; 995 996 /* 997 * The TAG_WAITING bit serves as a lock protecting hctx->dispatch_wait. 998 * The thread which wins the race to grab this bit adds the hardware 999 * queue to the wait queue. 1000 */ 1001 if (test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state) || 1002 test_and_set_bit_lock(BLK_MQ_S_TAG_WAITING, &hctx->state)) 1003 return false; 1004 1005 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); 1006 ws = bt_wait_ptr(&hctx->tags->bitmap_tags, hctx); 1007 1008 /* 1009 * As soon as this returns, it's no longer safe to fiddle with 1010 * hctx->dispatch_wait, since a completion can wake up the wait queue 1011 * and unlock the bit. 1012 */ 1013 add_wait_queue(&ws->wait, &hctx->dispatch_wait); 1014 return true; 1015 } 1016 1017 bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list) 1018 { 1019 struct blk_mq_hw_ctx *hctx; 1020 struct request *rq; 1021 int errors, queued; 1022 1023 if (list_empty(list)) 1024 return false; 1025 1026 /* 1027 * Now process all the entries, sending them to the driver. 1028 */ 1029 errors = queued = 0; 1030 do { 1031 struct blk_mq_queue_data bd; 1032 blk_status_t ret; 1033 1034 rq = list_first_entry(list, struct request, queuelist); 1035 if (!blk_mq_get_driver_tag(rq, &hctx, false)) { 1036 if (!queued && reorder_tags_to_front(list)) 1037 continue; 1038 1039 /* 1040 * The initial allocation attempt failed, so we need to 1041 * rerun the hardware queue when a tag is freed. 1042 */ 1043 if (!blk_mq_dispatch_wait_add(hctx)) 1044 break; 1045 1046 /* 1047 * It's possible that a tag was freed in the window 1048 * between the allocation failure and adding the 1049 * hardware queue to the wait queue. 1050 */ 1051 if (!blk_mq_get_driver_tag(rq, &hctx, false)) 1052 break; 1053 } 1054 1055 list_del_init(&rq->queuelist); 1056 1057 bd.rq = rq; 1058 1059 /* 1060 * Flag last if we have no more requests, or if we have more 1061 * but can't assign a driver tag to it. 1062 */ 1063 if (list_empty(list)) 1064 bd.last = true; 1065 else { 1066 struct request *nxt; 1067 1068 nxt = list_first_entry(list, struct request, queuelist); 1069 bd.last = !blk_mq_get_driver_tag(nxt, NULL, false); 1070 } 1071 1072 ret = q->mq_ops->queue_rq(hctx, &bd); 1073 if (ret == BLK_STS_RESOURCE) { 1074 blk_mq_put_driver_tag_hctx(hctx, rq); 1075 list_add(&rq->queuelist, list); 1076 __blk_mq_requeue_request(rq); 1077 break; 1078 } 1079 1080 if (unlikely(ret != BLK_STS_OK)) { 1081 errors++; 1082 blk_mq_end_request(rq, BLK_STS_IOERR); 1083 continue; 1084 } 1085 1086 queued++; 1087 } while (!list_empty(list)); 1088 1089 hctx->dispatched[queued_to_index(queued)]++; 1090 1091 /* 1092 * Any items that need requeuing? Stuff them into hctx->dispatch, 1093 * that is where we will continue on next queue run. 1094 */ 1095 if (!list_empty(list)) { 1096 /* 1097 * If an I/O scheduler has been configured and we got a driver 1098 * tag for the next request already, free it again. 1099 */ 1100 rq = list_first_entry(list, struct request, queuelist); 1101 blk_mq_put_driver_tag(rq); 1102 1103 spin_lock(&hctx->lock); 1104 list_splice_init(list, &hctx->dispatch); 1105 spin_unlock(&hctx->lock); 1106 1107 /* 1108 * If SCHED_RESTART was set by the caller of this function and 1109 * it is no longer set that means that it was cleared by another 1110 * thread and hence that a queue rerun is needed. 1111 * 1112 * If TAG_WAITING is set that means that an I/O scheduler has 1113 * been configured and another thread is waiting for a driver 1114 * tag. To guarantee fairness, do not rerun this hardware queue 1115 * but let the other thread grab the driver tag. 1116 * 1117 * If no I/O scheduler has been configured it is possible that 1118 * the hardware queue got stopped and restarted before requests 1119 * were pushed back onto the dispatch list. Rerun the queue to 1120 * avoid starvation. Notes: 1121 * - blk_mq_run_hw_queue() checks whether or not a queue has 1122 * been stopped before rerunning a queue. 1123 * - Some but not all block drivers stop a queue before 1124 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq 1125 * and dm-rq. 1126 */ 1127 if (!blk_mq_sched_needs_restart(hctx) && 1128 !test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state)) 1129 blk_mq_run_hw_queue(hctx, true); 1130 } 1131 1132 return (queued + errors) != 0; 1133 } 1134 1135 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) 1136 { 1137 int srcu_idx; 1138 1139 /* 1140 * We should be running this queue from one of the CPUs that 1141 * are mapped to it. 1142 */ 1143 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) && 1144 cpu_online(hctx->next_cpu)); 1145 1146 /* 1147 * We can't run the queue inline with ints disabled. Ensure that 1148 * we catch bad users of this early. 1149 */ 1150 WARN_ON_ONCE(in_interrupt()); 1151 1152 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { 1153 rcu_read_lock(); 1154 blk_mq_sched_dispatch_requests(hctx); 1155 rcu_read_unlock(); 1156 } else { 1157 might_sleep(); 1158 1159 srcu_idx = srcu_read_lock(hctx->queue_rq_srcu); 1160 blk_mq_sched_dispatch_requests(hctx); 1161 srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx); 1162 } 1163 } 1164 1165 /* 1166 * It'd be great if the workqueue API had a way to pass 1167 * in a mask and had some smarts for more clever placement. 1168 * For now we just round-robin here, switching for every 1169 * BLK_MQ_CPU_WORK_BATCH queued items. 1170 */ 1171 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) 1172 { 1173 if (hctx->queue->nr_hw_queues == 1) 1174 return WORK_CPU_UNBOUND; 1175 1176 if (--hctx->next_cpu_batch <= 0) { 1177 int next_cpu; 1178 1179 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask); 1180 if (next_cpu >= nr_cpu_ids) 1181 next_cpu = cpumask_first(hctx->cpumask); 1182 1183 hctx->next_cpu = next_cpu; 1184 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 1185 } 1186 1187 return hctx->next_cpu; 1188 } 1189 1190 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, 1191 unsigned long msecs) 1192 { 1193 if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx))) 1194 return; 1195 1196 if (unlikely(blk_mq_hctx_stopped(hctx))) 1197 return; 1198 1199 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { 1200 int cpu = get_cpu(); 1201 if (cpumask_test_cpu(cpu, hctx->cpumask)) { 1202 __blk_mq_run_hw_queue(hctx); 1203 put_cpu(); 1204 return; 1205 } 1206 1207 put_cpu(); 1208 } 1209 1210 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), 1211 &hctx->run_work, 1212 msecs_to_jiffies(msecs)); 1213 } 1214 1215 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) 1216 { 1217 __blk_mq_delay_run_hw_queue(hctx, true, msecs); 1218 } 1219 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue); 1220 1221 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 1222 { 1223 __blk_mq_delay_run_hw_queue(hctx, async, 0); 1224 } 1225 EXPORT_SYMBOL(blk_mq_run_hw_queue); 1226 1227 void blk_mq_run_hw_queues(struct request_queue *q, bool async) 1228 { 1229 struct blk_mq_hw_ctx *hctx; 1230 int i; 1231 1232 queue_for_each_hw_ctx(q, hctx, i) { 1233 if (!blk_mq_hctx_has_pending(hctx) || 1234 blk_mq_hctx_stopped(hctx)) 1235 continue; 1236 1237 blk_mq_run_hw_queue(hctx, async); 1238 } 1239 } 1240 EXPORT_SYMBOL(blk_mq_run_hw_queues); 1241 1242 /** 1243 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped 1244 * @q: request queue. 1245 * 1246 * The caller is responsible for serializing this function against 1247 * blk_mq_{start,stop}_hw_queue(). 1248 */ 1249 bool blk_mq_queue_stopped(struct request_queue *q) 1250 { 1251 struct blk_mq_hw_ctx *hctx; 1252 int i; 1253 1254 queue_for_each_hw_ctx(q, hctx, i) 1255 if (blk_mq_hctx_stopped(hctx)) 1256 return true; 1257 1258 return false; 1259 } 1260 EXPORT_SYMBOL(blk_mq_queue_stopped); 1261 1262 /* 1263 * This function is often used for pausing .queue_rq() by driver when 1264 * there isn't enough resource or some conditions aren't satisfied, and 1265 * BLK_STS_RESOURCE is usually returned. 1266 * 1267 * We do not guarantee that dispatch can be drained or blocked 1268 * after blk_mq_stop_hw_queue() returns. Please use 1269 * blk_mq_quiesce_queue() for that requirement. 1270 */ 1271 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) 1272 { 1273 cancel_delayed_work(&hctx->run_work); 1274 1275 set_bit(BLK_MQ_S_STOPPED, &hctx->state); 1276 } 1277 EXPORT_SYMBOL(blk_mq_stop_hw_queue); 1278 1279 /* 1280 * This function is often used for pausing .queue_rq() by driver when 1281 * there isn't enough resource or some conditions aren't satisfied, and 1282 * BLK_STS_RESOURCE is usually returned. 1283 * 1284 * We do not guarantee that dispatch can be drained or blocked 1285 * after blk_mq_stop_hw_queues() returns. Please use 1286 * blk_mq_quiesce_queue() for that requirement. 1287 */ 1288 void blk_mq_stop_hw_queues(struct request_queue *q) 1289 { 1290 struct blk_mq_hw_ctx *hctx; 1291 int i; 1292 1293 queue_for_each_hw_ctx(q, hctx, i) 1294 blk_mq_stop_hw_queue(hctx); 1295 } 1296 EXPORT_SYMBOL(blk_mq_stop_hw_queues); 1297 1298 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) 1299 { 1300 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 1301 1302 blk_mq_run_hw_queue(hctx, false); 1303 } 1304 EXPORT_SYMBOL(blk_mq_start_hw_queue); 1305 1306 void blk_mq_start_hw_queues(struct request_queue *q) 1307 { 1308 struct blk_mq_hw_ctx *hctx; 1309 int i; 1310 1311 queue_for_each_hw_ctx(q, hctx, i) 1312 blk_mq_start_hw_queue(hctx); 1313 } 1314 EXPORT_SYMBOL(blk_mq_start_hw_queues); 1315 1316 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 1317 { 1318 if (!blk_mq_hctx_stopped(hctx)) 1319 return; 1320 1321 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 1322 blk_mq_run_hw_queue(hctx, async); 1323 } 1324 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue); 1325 1326 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) 1327 { 1328 struct blk_mq_hw_ctx *hctx; 1329 int i; 1330 1331 queue_for_each_hw_ctx(q, hctx, i) 1332 blk_mq_start_stopped_hw_queue(hctx, async); 1333 } 1334 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); 1335 1336 static void blk_mq_run_work_fn(struct work_struct *work) 1337 { 1338 struct blk_mq_hw_ctx *hctx; 1339 1340 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); 1341 1342 /* 1343 * If we are stopped, don't run the queue. The exception is if 1344 * BLK_MQ_S_START_ON_RUN is set. For that case, we auto-clear 1345 * the STOPPED bit and run it. 1346 */ 1347 if (test_bit(BLK_MQ_S_STOPPED, &hctx->state)) { 1348 if (!test_bit(BLK_MQ_S_START_ON_RUN, &hctx->state)) 1349 return; 1350 1351 clear_bit(BLK_MQ_S_START_ON_RUN, &hctx->state); 1352 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 1353 } 1354 1355 __blk_mq_run_hw_queue(hctx); 1356 } 1357 1358 1359 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) 1360 { 1361 if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx))) 1362 return; 1363 1364 /* 1365 * Stop the hw queue, then modify currently delayed work. 1366 * This should prevent us from running the queue prematurely. 1367 * Mark the queue as auto-clearing STOPPED when it runs. 1368 */ 1369 blk_mq_stop_hw_queue(hctx); 1370 set_bit(BLK_MQ_S_START_ON_RUN, &hctx->state); 1371 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), 1372 &hctx->run_work, 1373 msecs_to_jiffies(msecs)); 1374 } 1375 EXPORT_SYMBOL(blk_mq_delay_queue); 1376 1377 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, 1378 struct request *rq, 1379 bool at_head) 1380 { 1381 struct blk_mq_ctx *ctx = rq->mq_ctx; 1382 1383 lockdep_assert_held(&ctx->lock); 1384 1385 trace_block_rq_insert(hctx->queue, rq); 1386 1387 if (at_head) 1388 list_add(&rq->queuelist, &ctx->rq_list); 1389 else 1390 list_add_tail(&rq->queuelist, &ctx->rq_list); 1391 } 1392 1393 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 1394 bool at_head) 1395 { 1396 struct blk_mq_ctx *ctx = rq->mq_ctx; 1397 1398 lockdep_assert_held(&ctx->lock); 1399 1400 __blk_mq_insert_req_list(hctx, rq, at_head); 1401 blk_mq_hctx_mark_pending(hctx, ctx); 1402 } 1403 1404 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 1405 struct list_head *list) 1406 1407 { 1408 /* 1409 * preemption doesn't flush plug list, so it's possible ctx->cpu is 1410 * offline now 1411 */ 1412 spin_lock(&ctx->lock); 1413 while (!list_empty(list)) { 1414 struct request *rq; 1415 1416 rq = list_first_entry(list, struct request, queuelist); 1417 BUG_ON(rq->mq_ctx != ctx); 1418 list_del_init(&rq->queuelist); 1419 __blk_mq_insert_req_list(hctx, rq, false); 1420 } 1421 blk_mq_hctx_mark_pending(hctx, ctx); 1422 spin_unlock(&ctx->lock); 1423 } 1424 1425 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b) 1426 { 1427 struct request *rqa = container_of(a, struct request, queuelist); 1428 struct request *rqb = container_of(b, struct request, queuelist); 1429 1430 return !(rqa->mq_ctx < rqb->mq_ctx || 1431 (rqa->mq_ctx == rqb->mq_ctx && 1432 blk_rq_pos(rqa) < blk_rq_pos(rqb))); 1433 } 1434 1435 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) 1436 { 1437 struct blk_mq_ctx *this_ctx; 1438 struct request_queue *this_q; 1439 struct request *rq; 1440 LIST_HEAD(list); 1441 LIST_HEAD(ctx_list); 1442 unsigned int depth; 1443 1444 list_splice_init(&plug->mq_list, &list); 1445 1446 list_sort(NULL, &list, plug_ctx_cmp); 1447 1448 this_q = NULL; 1449 this_ctx = NULL; 1450 depth = 0; 1451 1452 while (!list_empty(&list)) { 1453 rq = list_entry_rq(list.next); 1454 list_del_init(&rq->queuelist); 1455 BUG_ON(!rq->q); 1456 if (rq->mq_ctx != this_ctx) { 1457 if (this_ctx) { 1458 trace_block_unplug(this_q, depth, from_schedule); 1459 blk_mq_sched_insert_requests(this_q, this_ctx, 1460 &ctx_list, 1461 from_schedule); 1462 } 1463 1464 this_ctx = rq->mq_ctx; 1465 this_q = rq->q; 1466 depth = 0; 1467 } 1468 1469 depth++; 1470 list_add_tail(&rq->queuelist, &ctx_list); 1471 } 1472 1473 /* 1474 * If 'this_ctx' is set, we know we have entries to complete 1475 * on 'ctx_list'. Do those. 1476 */ 1477 if (this_ctx) { 1478 trace_block_unplug(this_q, depth, from_schedule); 1479 blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list, 1480 from_schedule); 1481 } 1482 } 1483 1484 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) 1485 { 1486 blk_init_request_from_bio(rq, bio); 1487 1488 blk_account_io_start(rq, true); 1489 } 1490 1491 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx) 1492 { 1493 return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) && 1494 !blk_queue_nomerges(hctx->queue); 1495 } 1496 1497 static inline void blk_mq_queue_io(struct blk_mq_hw_ctx *hctx, 1498 struct blk_mq_ctx *ctx, 1499 struct request *rq) 1500 { 1501 spin_lock(&ctx->lock); 1502 __blk_mq_insert_request(hctx, rq, false); 1503 spin_unlock(&ctx->lock); 1504 } 1505 1506 static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq) 1507 { 1508 if (rq->tag != -1) 1509 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false); 1510 1511 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true); 1512 } 1513 1514 static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 1515 struct request *rq, 1516 blk_qc_t *cookie, bool may_sleep) 1517 { 1518 struct request_queue *q = rq->q; 1519 struct blk_mq_queue_data bd = { 1520 .rq = rq, 1521 .last = true, 1522 }; 1523 blk_qc_t new_cookie; 1524 blk_status_t ret; 1525 bool run_queue = true; 1526 1527 /* RCU or SRCU read lock is needed before checking quiesced flag */ 1528 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { 1529 run_queue = false; 1530 goto insert; 1531 } 1532 1533 if (q->elevator) 1534 goto insert; 1535 1536 if (!blk_mq_get_driver_tag(rq, NULL, false)) 1537 goto insert; 1538 1539 new_cookie = request_to_qc_t(hctx, rq); 1540 1541 /* 1542 * For OK queue, we are done. For error, kill it. Any other 1543 * error (busy), just add it to our list as we previously 1544 * would have done 1545 */ 1546 ret = q->mq_ops->queue_rq(hctx, &bd); 1547 switch (ret) { 1548 case BLK_STS_OK: 1549 *cookie = new_cookie; 1550 return; 1551 case BLK_STS_RESOURCE: 1552 __blk_mq_requeue_request(rq); 1553 goto insert; 1554 default: 1555 *cookie = BLK_QC_T_NONE; 1556 blk_mq_end_request(rq, ret); 1557 return; 1558 } 1559 1560 insert: 1561 blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep); 1562 } 1563 1564 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 1565 struct request *rq, blk_qc_t *cookie) 1566 { 1567 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { 1568 rcu_read_lock(); 1569 __blk_mq_try_issue_directly(hctx, rq, cookie, false); 1570 rcu_read_unlock(); 1571 } else { 1572 unsigned int srcu_idx; 1573 1574 might_sleep(); 1575 1576 srcu_idx = srcu_read_lock(hctx->queue_rq_srcu); 1577 __blk_mq_try_issue_directly(hctx, rq, cookie, true); 1578 srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx); 1579 } 1580 } 1581 1582 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) 1583 { 1584 const int is_sync = op_is_sync(bio->bi_opf); 1585 const int is_flush_fua = op_is_flush(bio->bi_opf); 1586 struct blk_mq_alloc_data data = { .flags = 0 }; 1587 struct request *rq; 1588 unsigned int request_count = 0; 1589 struct blk_plug *plug; 1590 struct request *same_queue_rq = NULL; 1591 blk_qc_t cookie; 1592 unsigned int wb_acct; 1593 1594 blk_queue_bounce(q, &bio); 1595 1596 blk_queue_split(q, &bio); 1597 1598 if (!bio_integrity_prep(bio)) 1599 return BLK_QC_T_NONE; 1600 1601 if (!is_flush_fua && !blk_queue_nomerges(q) && 1602 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) 1603 return BLK_QC_T_NONE; 1604 1605 if (blk_mq_sched_bio_merge(q, bio)) 1606 return BLK_QC_T_NONE; 1607 1608 wb_acct = wbt_wait(q->rq_wb, bio, NULL); 1609 1610 trace_block_getrq(q, bio, bio->bi_opf); 1611 1612 rq = blk_mq_get_request(q, bio, bio->bi_opf, &data); 1613 if (unlikely(!rq)) { 1614 __wbt_done(q->rq_wb, wb_acct); 1615 if (bio->bi_opf & REQ_NOWAIT) 1616 bio_wouldblock_error(bio); 1617 return BLK_QC_T_NONE; 1618 } 1619 1620 wbt_track(&rq->issue_stat, wb_acct); 1621 1622 cookie = request_to_qc_t(data.hctx, rq); 1623 1624 plug = current->plug; 1625 if (unlikely(is_flush_fua)) { 1626 blk_mq_put_ctx(data.ctx); 1627 blk_mq_bio_to_request(rq, bio); 1628 if (q->elevator) { 1629 blk_mq_sched_insert_request(rq, false, true, true, 1630 true); 1631 } else { 1632 blk_insert_flush(rq); 1633 blk_mq_run_hw_queue(data.hctx, true); 1634 } 1635 } else if (plug && q->nr_hw_queues == 1) { 1636 struct request *last = NULL; 1637 1638 blk_mq_put_ctx(data.ctx); 1639 blk_mq_bio_to_request(rq, bio); 1640 1641 /* 1642 * @request_count may become stale because of schedule 1643 * out, so check the list again. 1644 */ 1645 if (list_empty(&plug->mq_list)) 1646 request_count = 0; 1647 else if (blk_queue_nomerges(q)) 1648 request_count = blk_plug_queued_count(q); 1649 1650 if (!request_count) 1651 trace_block_plug(q); 1652 else 1653 last = list_entry_rq(plug->mq_list.prev); 1654 1655 if (request_count >= BLK_MAX_REQUEST_COUNT || (last && 1656 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { 1657 blk_flush_plug_list(plug, false); 1658 trace_block_plug(q); 1659 } 1660 1661 list_add_tail(&rq->queuelist, &plug->mq_list); 1662 } else if (plug && !blk_queue_nomerges(q)) { 1663 blk_mq_bio_to_request(rq, bio); 1664 1665 /* 1666 * We do limited plugging. If the bio can be merged, do that. 1667 * Otherwise the existing request in the plug list will be 1668 * issued. So the plug list will have one request at most 1669 * The plug list might get flushed before this. If that happens, 1670 * the plug list is empty, and same_queue_rq is invalid. 1671 */ 1672 if (list_empty(&plug->mq_list)) 1673 same_queue_rq = NULL; 1674 if (same_queue_rq) 1675 list_del_init(&same_queue_rq->queuelist); 1676 list_add_tail(&rq->queuelist, &plug->mq_list); 1677 1678 blk_mq_put_ctx(data.ctx); 1679 1680 if (same_queue_rq) { 1681 data.hctx = blk_mq_map_queue(q, 1682 same_queue_rq->mq_ctx->cpu); 1683 blk_mq_try_issue_directly(data.hctx, same_queue_rq, 1684 &cookie); 1685 } 1686 } else if (q->nr_hw_queues > 1 && is_sync) { 1687 blk_mq_put_ctx(data.ctx); 1688 blk_mq_bio_to_request(rq, bio); 1689 blk_mq_try_issue_directly(data.hctx, rq, &cookie); 1690 } else if (q->elevator) { 1691 blk_mq_put_ctx(data.ctx); 1692 blk_mq_bio_to_request(rq, bio); 1693 blk_mq_sched_insert_request(rq, false, true, true, true); 1694 } else { 1695 blk_mq_put_ctx(data.ctx); 1696 blk_mq_bio_to_request(rq, bio); 1697 blk_mq_queue_io(data.hctx, data.ctx, rq); 1698 blk_mq_run_hw_queue(data.hctx, true); 1699 } 1700 1701 return cookie; 1702 } 1703 1704 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 1705 unsigned int hctx_idx) 1706 { 1707 struct page *page; 1708 1709 if (tags->rqs && set->ops->exit_request) { 1710 int i; 1711 1712 for (i = 0; i < tags->nr_tags; i++) { 1713 struct request *rq = tags->static_rqs[i]; 1714 1715 if (!rq) 1716 continue; 1717 set->ops->exit_request(set, rq, hctx_idx); 1718 tags->static_rqs[i] = NULL; 1719 } 1720 } 1721 1722 while (!list_empty(&tags->page_list)) { 1723 page = list_first_entry(&tags->page_list, struct page, lru); 1724 list_del_init(&page->lru); 1725 /* 1726 * Remove kmemleak object previously allocated in 1727 * blk_mq_init_rq_map(). 1728 */ 1729 kmemleak_free(page_address(page)); 1730 __free_pages(page, page->private); 1731 } 1732 } 1733 1734 void blk_mq_free_rq_map(struct blk_mq_tags *tags) 1735 { 1736 kfree(tags->rqs); 1737 tags->rqs = NULL; 1738 kfree(tags->static_rqs); 1739 tags->static_rqs = NULL; 1740 1741 blk_mq_free_tags(tags); 1742 } 1743 1744 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, 1745 unsigned int hctx_idx, 1746 unsigned int nr_tags, 1747 unsigned int reserved_tags) 1748 { 1749 struct blk_mq_tags *tags; 1750 int node; 1751 1752 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx); 1753 if (node == NUMA_NO_NODE) 1754 node = set->numa_node; 1755 1756 tags = blk_mq_init_tags(nr_tags, reserved_tags, node, 1757 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); 1758 if (!tags) 1759 return NULL; 1760 1761 tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *), 1762 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 1763 node); 1764 if (!tags->rqs) { 1765 blk_mq_free_tags(tags); 1766 return NULL; 1767 } 1768 1769 tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *), 1770 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 1771 node); 1772 if (!tags->static_rqs) { 1773 kfree(tags->rqs); 1774 blk_mq_free_tags(tags); 1775 return NULL; 1776 } 1777 1778 return tags; 1779 } 1780 1781 static size_t order_to_size(unsigned int order) 1782 { 1783 return (size_t)PAGE_SIZE << order; 1784 } 1785 1786 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 1787 unsigned int hctx_idx, unsigned int depth) 1788 { 1789 unsigned int i, j, entries_per_page, max_order = 4; 1790 size_t rq_size, left; 1791 int node; 1792 1793 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx); 1794 if (node == NUMA_NO_NODE) 1795 node = set->numa_node; 1796 1797 INIT_LIST_HEAD(&tags->page_list); 1798 1799 /* 1800 * rq_size is the size of the request plus driver payload, rounded 1801 * to the cacheline size 1802 */ 1803 rq_size = round_up(sizeof(struct request) + set->cmd_size, 1804 cache_line_size()); 1805 left = rq_size * depth; 1806 1807 for (i = 0; i < depth; ) { 1808 int this_order = max_order; 1809 struct page *page; 1810 int to_do; 1811 void *p; 1812 1813 while (this_order && left < order_to_size(this_order - 1)) 1814 this_order--; 1815 1816 do { 1817 page = alloc_pages_node(node, 1818 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, 1819 this_order); 1820 if (page) 1821 break; 1822 if (!this_order--) 1823 break; 1824 if (order_to_size(this_order) < rq_size) 1825 break; 1826 } while (1); 1827 1828 if (!page) 1829 goto fail; 1830 1831 page->private = this_order; 1832 list_add_tail(&page->lru, &tags->page_list); 1833 1834 p = page_address(page); 1835 /* 1836 * Allow kmemleak to scan these pages as they contain pointers 1837 * to additional allocations like via ops->init_request(). 1838 */ 1839 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO); 1840 entries_per_page = order_to_size(this_order) / rq_size; 1841 to_do = min(entries_per_page, depth - i); 1842 left -= to_do * rq_size; 1843 for (j = 0; j < to_do; j++) { 1844 struct request *rq = p; 1845 1846 tags->static_rqs[i] = rq; 1847 if (set->ops->init_request) { 1848 if (set->ops->init_request(set, rq, hctx_idx, 1849 node)) { 1850 tags->static_rqs[i] = NULL; 1851 goto fail; 1852 } 1853 } 1854 1855 p += rq_size; 1856 i++; 1857 } 1858 } 1859 return 0; 1860 1861 fail: 1862 blk_mq_free_rqs(set, tags, hctx_idx); 1863 return -ENOMEM; 1864 } 1865 1866 /* 1867 * 'cpu' is going away. splice any existing rq_list entries from this 1868 * software queue to the hw queue dispatch list, and ensure that it 1869 * gets run. 1870 */ 1871 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node) 1872 { 1873 struct blk_mq_hw_ctx *hctx; 1874 struct blk_mq_ctx *ctx; 1875 LIST_HEAD(tmp); 1876 1877 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead); 1878 ctx = __blk_mq_get_ctx(hctx->queue, cpu); 1879 1880 spin_lock(&ctx->lock); 1881 if (!list_empty(&ctx->rq_list)) { 1882 list_splice_init(&ctx->rq_list, &tmp); 1883 blk_mq_hctx_clear_pending(hctx, ctx); 1884 } 1885 spin_unlock(&ctx->lock); 1886 1887 if (list_empty(&tmp)) 1888 return 0; 1889 1890 spin_lock(&hctx->lock); 1891 list_splice_tail_init(&tmp, &hctx->dispatch); 1892 spin_unlock(&hctx->lock); 1893 1894 blk_mq_run_hw_queue(hctx, true); 1895 return 0; 1896 } 1897 1898 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) 1899 { 1900 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD, 1901 &hctx->cpuhp_dead); 1902 } 1903 1904 /* hctx->ctxs will be freed in queue's release handler */ 1905 static void blk_mq_exit_hctx(struct request_queue *q, 1906 struct blk_mq_tag_set *set, 1907 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 1908 { 1909 blk_mq_debugfs_unregister_hctx(hctx); 1910 1911 blk_mq_tag_idle(hctx); 1912 1913 if (set->ops->exit_request) 1914 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); 1915 1916 blk_mq_sched_exit_hctx(q, hctx, hctx_idx); 1917 1918 if (set->ops->exit_hctx) 1919 set->ops->exit_hctx(hctx, hctx_idx); 1920 1921 if (hctx->flags & BLK_MQ_F_BLOCKING) 1922 cleanup_srcu_struct(hctx->queue_rq_srcu); 1923 1924 blk_mq_remove_cpuhp(hctx); 1925 blk_free_flush_queue(hctx->fq); 1926 sbitmap_free(&hctx->ctx_map); 1927 } 1928 1929 static void blk_mq_exit_hw_queues(struct request_queue *q, 1930 struct blk_mq_tag_set *set, int nr_queue) 1931 { 1932 struct blk_mq_hw_ctx *hctx; 1933 unsigned int i; 1934 1935 queue_for_each_hw_ctx(q, hctx, i) { 1936 if (i == nr_queue) 1937 break; 1938 blk_mq_exit_hctx(q, set, hctx, i); 1939 } 1940 } 1941 1942 static int blk_mq_init_hctx(struct request_queue *q, 1943 struct blk_mq_tag_set *set, 1944 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) 1945 { 1946 int node; 1947 1948 node = hctx->numa_node; 1949 if (node == NUMA_NO_NODE) 1950 node = hctx->numa_node = set->numa_node; 1951 1952 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); 1953 spin_lock_init(&hctx->lock); 1954 INIT_LIST_HEAD(&hctx->dispatch); 1955 hctx->queue = q; 1956 hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED; 1957 1958 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); 1959 1960 hctx->tags = set->tags[hctx_idx]; 1961 1962 /* 1963 * Allocate space for all possible cpus to avoid allocation at 1964 * runtime 1965 */ 1966 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *), 1967 GFP_KERNEL, node); 1968 if (!hctx->ctxs) 1969 goto unregister_cpu_notifier; 1970 1971 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL, 1972 node)) 1973 goto free_ctxs; 1974 1975 hctx->nr_ctx = 0; 1976 1977 if (set->ops->init_hctx && 1978 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) 1979 goto free_bitmap; 1980 1981 if (blk_mq_sched_init_hctx(q, hctx, hctx_idx)) 1982 goto exit_hctx; 1983 1984 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); 1985 if (!hctx->fq) 1986 goto sched_exit_hctx; 1987 1988 if (set->ops->init_request && 1989 set->ops->init_request(set, hctx->fq->flush_rq, hctx_idx, 1990 node)) 1991 goto free_fq; 1992 1993 if (hctx->flags & BLK_MQ_F_BLOCKING) 1994 init_srcu_struct(hctx->queue_rq_srcu); 1995 1996 blk_mq_debugfs_register_hctx(q, hctx); 1997 1998 return 0; 1999 2000 free_fq: 2001 kfree(hctx->fq); 2002 sched_exit_hctx: 2003 blk_mq_sched_exit_hctx(q, hctx, hctx_idx); 2004 exit_hctx: 2005 if (set->ops->exit_hctx) 2006 set->ops->exit_hctx(hctx, hctx_idx); 2007 free_bitmap: 2008 sbitmap_free(&hctx->ctx_map); 2009 free_ctxs: 2010 kfree(hctx->ctxs); 2011 unregister_cpu_notifier: 2012 blk_mq_remove_cpuhp(hctx); 2013 return -1; 2014 } 2015 2016 static void blk_mq_init_cpu_queues(struct request_queue *q, 2017 unsigned int nr_hw_queues) 2018 { 2019 unsigned int i; 2020 2021 for_each_possible_cpu(i) { 2022 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); 2023 struct blk_mq_hw_ctx *hctx; 2024 2025 __ctx->cpu = i; 2026 spin_lock_init(&__ctx->lock); 2027 INIT_LIST_HEAD(&__ctx->rq_list); 2028 __ctx->queue = q; 2029 2030 /* If the cpu isn't present, the cpu is mapped to first hctx */ 2031 if (!cpu_present(i)) 2032 continue; 2033 2034 hctx = blk_mq_map_queue(q, i); 2035 2036 /* 2037 * Set local node, IFF we have more than one hw queue. If 2038 * not, we remain on the home node of the device 2039 */ 2040 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) 2041 hctx->numa_node = local_memory_node(cpu_to_node(i)); 2042 } 2043 } 2044 2045 static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx) 2046 { 2047 int ret = 0; 2048 2049 set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx, 2050 set->queue_depth, set->reserved_tags); 2051 if (!set->tags[hctx_idx]) 2052 return false; 2053 2054 ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx, 2055 set->queue_depth); 2056 if (!ret) 2057 return true; 2058 2059 blk_mq_free_rq_map(set->tags[hctx_idx]); 2060 set->tags[hctx_idx] = NULL; 2061 return false; 2062 } 2063 2064 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, 2065 unsigned int hctx_idx) 2066 { 2067 if (set->tags[hctx_idx]) { 2068 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx); 2069 blk_mq_free_rq_map(set->tags[hctx_idx]); 2070 set->tags[hctx_idx] = NULL; 2071 } 2072 } 2073 2074 static void blk_mq_map_swqueue(struct request_queue *q) 2075 { 2076 unsigned int i, hctx_idx; 2077 struct blk_mq_hw_ctx *hctx; 2078 struct blk_mq_ctx *ctx; 2079 struct blk_mq_tag_set *set = q->tag_set; 2080 2081 /* 2082 * Avoid others reading imcomplete hctx->cpumask through sysfs 2083 */ 2084 mutex_lock(&q->sysfs_lock); 2085 2086 queue_for_each_hw_ctx(q, hctx, i) { 2087 cpumask_clear(hctx->cpumask); 2088 hctx->nr_ctx = 0; 2089 } 2090 2091 /* 2092 * Map software to hardware queues. 2093 * 2094 * If the cpu isn't present, the cpu is mapped to first hctx. 2095 */ 2096 for_each_present_cpu(i) { 2097 hctx_idx = q->mq_map[i]; 2098 /* unmapped hw queue can be remapped after CPU topo changed */ 2099 if (!set->tags[hctx_idx] && 2100 !__blk_mq_alloc_rq_map(set, hctx_idx)) { 2101 /* 2102 * If tags initialization fail for some hctx, 2103 * that hctx won't be brought online. In this 2104 * case, remap the current ctx to hctx[0] which 2105 * is guaranteed to always have tags allocated 2106 */ 2107 q->mq_map[i] = 0; 2108 } 2109 2110 ctx = per_cpu_ptr(q->queue_ctx, i); 2111 hctx = blk_mq_map_queue(q, i); 2112 2113 cpumask_set_cpu(i, hctx->cpumask); 2114 ctx->index_hw = hctx->nr_ctx; 2115 hctx->ctxs[hctx->nr_ctx++] = ctx; 2116 } 2117 2118 mutex_unlock(&q->sysfs_lock); 2119 2120 queue_for_each_hw_ctx(q, hctx, i) { 2121 /* 2122 * If no software queues are mapped to this hardware queue, 2123 * disable it and free the request entries. 2124 */ 2125 if (!hctx->nr_ctx) { 2126 /* Never unmap queue 0. We need it as a 2127 * fallback in case of a new remap fails 2128 * allocation 2129 */ 2130 if (i && set->tags[i]) 2131 blk_mq_free_map_and_requests(set, i); 2132 2133 hctx->tags = NULL; 2134 continue; 2135 } 2136 2137 hctx->tags = set->tags[i]; 2138 WARN_ON(!hctx->tags); 2139 2140 /* 2141 * Set the map size to the number of mapped software queues. 2142 * This is more accurate and more efficient than looping 2143 * over all possibly mapped software queues. 2144 */ 2145 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx); 2146 2147 /* 2148 * Initialize batch roundrobin counts 2149 */ 2150 hctx->next_cpu = cpumask_first(hctx->cpumask); 2151 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 2152 } 2153 } 2154 2155 /* 2156 * Caller needs to ensure that we're either frozen/quiesced, or that 2157 * the queue isn't live yet. 2158 */ 2159 static void queue_set_hctx_shared(struct request_queue *q, bool shared) 2160 { 2161 struct blk_mq_hw_ctx *hctx; 2162 int i; 2163 2164 queue_for_each_hw_ctx(q, hctx, i) { 2165 if (shared) { 2166 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 2167 atomic_inc(&q->shared_hctx_restart); 2168 hctx->flags |= BLK_MQ_F_TAG_SHARED; 2169 } else { 2170 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 2171 atomic_dec(&q->shared_hctx_restart); 2172 hctx->flags &= ~BLK_MQ_F_TAG_SHARED; 2173 } 2174 } 2175 } 2176 2177 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, 2178 bool shared) 2179 { 2180 struct request_queue *q; 2181 2182 lockdep_assert_held(&set->tag_list_lock); 2183 2184 list_for_each_entry(q, &set->tag_list, tag_set_list) { 2185 blk_mq_freeze_queue(q); 2186 queue_set_hctx_shared(q, shared); 2187 blk_mq_unfreeze_queue(q); 2188 } 2189 } 2190 2191 static void blk_mq_del_queue_tag_set(struct request_queue *q) 2192 { 2193 struct blk_mq_tag_set *set = q->tag_set; 2194 2195 mutex_lock(&set->tag_list_lock); 2196 list_del_rcu(&q->tag_set_list); 2197 INIT_LIST_HEAD(&q->tag_set_list); 2198 if (list_is_singular(&set->tag_list)) { 2199 /* just transitioned to unshared */ 2200 set->flags &= ~BLK_MQ_F_TAG_SHARED; 2201 /* update existing queue */ 2202 blk_mq_update_tag_set_depth(set, false); 2203 } 2204 mutex_unlock(&set->tag_list_lock); 2205 2206 synchronize_rcu(); 2207 } 2208 2209 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, 2210 struct request_queue *q) 2211 { 2212 q->tag_set = set; 2213 2214 mutex_lock(&set->tag_list_lock); 2215 2216 /* Check to see if we're transitioning to shared (from 1 to 2 queues). */ 2217 if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) { 2218 set->flags |= BLK_MQ_F_TAG_SHARED; 2219 /* update existing queue */ 2220 blk_mq_update_tag_set_depth(set, true); 2221 } 2222 if (set->flags & BLK_MQ_F_TAG_SHARED) 2223 queue_set_hctx_shared(q, true); 2224 list_add_tail_rcu(&q->tag_set_list, &set->tag_list); 2225 2226 mutex_unlock(&set->tag_list_lock); 2227 } 2228 2229 /* 2230 * It is the actual release handler for mq, but we do it from 2231 * request queue's release handler for avoiding use-after-free 2232 * and headache because q->mq_kobj shouldn't have been introduced, 2233 * but we can't group ctx/kctx kobj without it. 2234 */ 2235 void blk_mq_release(struct request_queue *q) 2236 { 2237 struct blk_mq_hw_ctx *hctx; 2238 unsigned int i; 2239 2240 /* hctx kobj stays in hctx */ 2241 queue_for_each_hw_ctx(q, hctx, i) { 2242 if (!hctx) 2243 continue; 2244 kobject_put(&hctx->kobj); 2245 } 2246 2247 q->mq_map = NULL; 2248 2249 kfree(q->queue_hw_ctx); 2250 2251 /* 2252 * release .mq_kobj and sw queue's kobject now because 2253 * both share lifetime with request queue. 2254 */ 2255 blk_mq_sysfs_deinit(q); 2256 2257 free_percpu(q->queue_ctx); 2258 } 2259 2260 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) 2261 { 2262 struct request_queue *uninit_q, *q; 2263 2264 uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node); 2265 if (!uninit_q) 2266 return ERR_PTR(-ENOMEM); 2267 2268 q = blk_mq_init_allocated_queue(set, uninit_q); 2269 if (IS_ERR(q)) 2270 blk_cleanup_queue(uninit_q); 2271 2272 return q; 2273 } 2274 EXPORT_SYMBOL(blk_mq_init_queue); 2275 2276 static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set) 2277 { 2278 int hw_ctx_size = sizeof(struct blk_mq_hw_ctx); 2279 2280 BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, queue_rq_srcu), 2281 __alignof__(struct blk_mq_hw_ctx)) != 2282 sizeof(struct blk_mq_hw_ctx)); 2283 2284 if (tag_set->flags & BLK_MQ_F_BLOCKING) 2285 hw_ctx_size += sizeof(struct srcu_struct); 2286 2287 return hw_ctx_size; 2288 } 2289 2290 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, 2291 struct request_queue *q) 2292 { 2293 int i, j; 2294 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; 2295 2296 blk_mq_sysfs_unregister(q); 2297 for (i = 0; i < set->nr_hw_queues; i++) { 2298 int node; 2299 2300 if (hctxs[i]) 2301 continue; 2302 2303 node = blk_mq_hw_queue_to_node(q->mq_map, i); 2304 hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set), 2305 GFP_KERNEL, node); 2306 if (!hctxs[i]) 2307 break; 2308 2309 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL, 2310 node)) { 2311 kfree(hctxs[i]); 2312 hctxs[i] = NULL; 2313 break; 2314 } 2315 2316 atomic_set(&hctxs[i]->nr_active, 0); 2317 hctxs[i]->numa_node = node; 2318 hctxs[i]->queue_num = i; 2319 2320 if (blk_mq_init_hctx(q, set, hctxs[i], i)) { 2321 free_cpumask_var(hctxs[i]->cpumask); 2322 kfree(hctxs[i]); 2323 hctxs[i] = NULL; 2324 break; 2325 } 2326 blk_mq_hctx_kobj_init(hctxs[i]); 2327 } 2328 for (j = i; j < q->nr_hw_queues; j++) { 2329 struct blk_mq_hw_ctx *hctx = hctxs[j]; 2330 2331 if (hctx) { 2332 if (hctx->tags) 2333 blk_mq_free_map_and_requests(set, j); 2334 blk_mq_exit_hctx(q, set, hctx, j); 2335 kobject_put(&hctx->kobj); 2336 hctxs[j] = NULL; 2337 2338 } 2339 } 2340 q->nr_hw_queues = i; 2341 blk_mq_sysfs_register(q); 2342 } 2343 2344 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 2345 struct request_queue *q) 2346 { 2347 /* mark the queue as mq asap */ 2348 q->mq_ops = set->ops; 2349 2350 q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn, 2351 blk_mq_poll_stats_bkt, 2352 BLK_MQ_POLL_STATS_BKTS, q); 2353 if (!q->poll_cb) 2354 goto err_exit; 2355 2356 q->queue_ctx = alloc_percpu(struct blk_mq_ctx); 2357 if (!q->queue_ctx) 2358 goto err_exit; 2359 2360 /* init q->mq_kobj and sw queues' kobjects */ 2361 blk_mq_sysfs_init(q); 2362 2363 q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)), 2364 GFP_KERNEL, set->numa_node); 2365 if (!q->queue_hw_ctx) 2366 goto err_percpu; 2367 2368 q->mq_map = set->mq_map; 2369 2370 blk_mq_realloc_hw_ctxs(set, q); 2371 if (!q->nr_hw_queues) 2372 goto err_hctxs; 2373 2374 INIT_WORK(&q->timeout_work, blk_mq_timeout_work); 2375 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); 2376 2377 q->nr_queues = nr_cpu_ids; 2378 2379 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; 2380 2381 if (!(set->flags & BLK_MQ_F_SG_MERGE)) 2382 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE; 2383 2384 q->sg_reserved_size = INT_MAX; 2385 2386 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); 2387 INIT_LIST_HEAD(&q->requeue_list); 2388 spin_lock_init(&q->requeue_lock); 2389 2390 blk_queue_make_request(q, blk_mq_make_request); 2391 2392 /* 2393 * Do this after blk_queue_make_request() overrides it... 2394 */ 2395 q->nr_requests = set->queue_depth; 2396 2397 /* 2398 * Default to classic polling 2399 */ 2400 q->poll_nsec = -1; 2401 2402 if (set->ops->complete) 2403 blk_queue_softirq_done(q, set->ops->complete); 2404 2405 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 2406 blk_mq_add_queue_tag_set(set, q); 2407 blk_mq_map_swqueue(q); 2408 2409 if (!(set->flags & BLK_MQ_F_NO_SCHED)) { 2410 int ret; 2411 2412 ret = blk_mq_sched_init(q); 2413 if (ret) 2414 return ERR_PTR(ret); 2415 } 2416 2417 return q; 2418 2419 err_hctxs: 2420 kfree(q->queue_hw_ctx); 2421 err_percpu: 2422 free_percpu(q->queue_ctx); 2423 err_exit: 2424 q->mq_ops = NULL; 2425 return ERR_PTR(-ENOMEM); 2426 } 2427 EXPORT_SYMBOL(blk_mq_init_allocated_queue); 2428 2429 void blk_mq_free_queue(struct request_queue *q) 2430 { 2431 struct blk_mq_tag_set *set = q->tag_set; 2432 2433 blk_mq_del_queue_tag_set(q); 2434 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); 2435 } 2436 2437 /* Basically redo blk_mq_init_queue with queue frozen */ 2438 static void blk_mq_queue_reinit(struct request_queue *q) 2439 { 2440 WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); 2441 2442 blk_mq_debugfs_unregister_hctxs(q); 2443 blk_mq_sysfs_unregister(q); 2444 2445 /* 2446 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe 2447 * we should change hctx numa_node according to new topology (this 2448 * involves free and re-allocate memory, worthy doing?) 2449 */ 2450 2451 blk_mq_map_swqueue(q); 2452 2453 blk_mq_sysfs_register(q); 2454 blk_mq_debugfs_register_hctxs(q); 2455 } 2456 2457 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 2458 { 2459 int i; 2460 2461 for (i = 0; i < set->nr_hw_queues; i++) 2462 if (!__blk_mq_alloc_rq_map(set, i)) 2463 goto out_unwind; 2464 2465 return 0; 2466 2467 out_unwind: 2468 while (--i >= 0) 2469 blk_mq_free_rq_map(set->tags[i]); 2470 2471 return -ENOMEM; 2472 } 2473 2474 /* 2475 * Allocate the request maps associated with this tag_set. Note that this 2476 * may reduce the depth asked for, if memory is tight. set->queue_depth 2477 * will be updated to reflect the allocated depth. 2478 */ 2479 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 2480 { 2481 unsigned int depth; 2482 int err; 2483 2484 depth = set->queue_depth; 2485 do { 2486 err = __blk_mq_alloc_rq_maps(set); 2487 if (!err) 2488 break; 2489 2490 set->queue_depth >>= 1; 2491 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { 2492 err = -ENOMEM; 2493 break; 2494 } 2495 } while (set->queue_depth); 2496 2497 if (!set->queue_depth || err) { 2498 pr_err("blk-mq: failed to allocate request map\n"); 2499 return -ENOMEM; 2500 } 2501 2502 if (depth != set->queue_depth) 2503 pr_info("blk-mq: reduced tag depth (%u -> %u)\n", 2504 depth, set->queue_depth); 2505 2506 return 0; 2507 } 2508 2509 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) 2510 { 2511 if (set->ops->map_queues) 2512 return set->ops->map_queues(set); 2513 else 2514 return blk_mq_map_queues(set); 2515 } 2516 2517 /* 2518 * Alloc a tag set to be associated with one or more request queues. 2519 * May fail with EINVAL for various error conditions. May adjust the 2520 * requested depth down, if if it too large. In that case, the set 2521 * value will be stored in set->queue_depth. 2522 */ 2523 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) 2524 { 2525 int ret; 2526 2527 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS); 2528 2529 if (!set->nr_hw_queues) 2530 return -EINVAL; 2531 if (!set->queue_depth) 2532 return -EINVAL; 2533 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) 2534 return -EINVAL; 2535 2536 if (!set->ops->queue_rq) 2537 return -EINVAL; 2538 2539 if (set->queue_depth > BLK_MQ_MAX_DEPTH) { 2540 pr_info("blk-mq: reduced tag depth to %u\n", 2541 BLK_MQ_MAX_DEPTH); 2542 set->queue_depth = BLK_MQ_MAX_DEPTH; 2543 } 2544 2545 /* 2546 * If a crashdump is active, then we are potentially in a very 2547 * memory constrained environment. Limit us to 1 queue and 2548 * 64 tags to prevent using too much memory. 2549 */ 2550 if (is_kdump_kernel()) { 2551 set->nr_hw_queues = 1; 2552 set->queue_depth = min(64U, set->queue_depth); 2553 } 2554 /* 2555 * There is no use for more h/w queues than cpus. 2556 */ 2557 if (set->nr_hw_queues > nr_cpu_ids) 2558 set->nr_hw_queues = nr_cpu_ids; 2559 2560 set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *), 2561 GFP_KERNEL, set->numa_node); 2562 if (!set->tags) 2563 return -ENOMEM; 2564 2565 ret = -ENOMEM; 2566 set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids, 2567 GFP_KERNEL, set->numa_node); 2568 if (!set->mq_map) 2569 goto out_free_tags; 2570 2571 ret = blk_mq_update_queue_map(set); 2572 if (ret) 2573 goto out_free_mq_map; 2574 2575 ret = blk_mq_alloc_rq_maps(set); 2576 if (ret) 2577 goto out_free_mq_map; 2578 2579 mutex_init(&set->tag_list_lock); 2580 INIT_LIST_HEAD(&set->tag_list); 2581 2582 return 0; 2583 2584 out_free_mq_map: 2585 kfree(set->mq_map); 2586 set->mq_map = NULL; 2587 out_free_tags: 2588 kfree(set->tags); 2589 set->tags = NULL; 2590 return ret; 2591 } 2592 EXPORT_SYMBOL(blk_mq_alloc_tag_set); 2593 2594 void blk_mq_free_tag_set(struct blk_mq_tag_set *set) 2595 { 2596 int i; 2597 2598 for (i = 0; i < nr_cpu_ids; i++) 2599 blk_mq_free_map_and_requests(set, i); 2600 2601 kfree(set->mq_map); 2602 set->mq_map = NULL; 2603 2604 kfree(set->tags); 2605 set->tags = NULL; 2606 } 2607 EXPORT_SYMBOL(blk_mq_free_tag_set); 2608 2609 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) 2610 { 2611 struct blk_mq_tag_set *set = q->tag_set; 2612 struct blk_mq_hw_ctx *hctx; 2613 int i, ret; 2614 2615 if (!set) 2616 return -EINVAL; 2617 2618 blk_mq_freeze_queue(q); 2619 2620 ret = 0; 2621 queue_for_each_hw_ctx(q, hctx, i) { 2622 if (!hctx->tags) 2623 continue; 2624 /* 2625 * If we're using an MQ scheduler, just update the scheduler 2626 * queue depth. This is similar to what the old code would do. 2627 */ 2628 if (!hctx->sched_tags) { 2629 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, 2630 min(nr, set->queue_depth), 2631 false); 2632 } else { 2633 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, 2634 nr, true); 2635 } 2636 if (ret) 2637 break; 2638 } 2639 2640 if (!ret) 2641 q->nr_requests = nr; 2642 2643 blk_mq_unfreeze_queue(q); 2644 2645 return ret; 2646 } 2647 2648 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, 2649 int nr_hw_queues) 2650 { 2651 struct request_queue *q; 2652 2653 lockdep_assert_held(&set->tag_list_lock); 2654 2655 if (nr_hw_queues > nr_cpu_ids) 2656 nr_hw_queues = nr_cpu_ids; 2657 if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues) 2658 return; 2659 2660 list_for_each_entry(q, &set->tag_list, tag_set_list) 2661 blk_mq_freeze_queue(q); 2662 2663 set->nr_hw_queues = nr_hw_queues; 2664 blk_mq_update_queue_map(set); 2665 list_for_each_entry(q, &set->tag_list, tag_set_list) { 2666 blk_mq_realloc_hw_ctxs(set, q); 2667 blk_mq_queue_reinit(q); 2668 } 2669 2670 list_for_each_entry(q, &set->tag_list, tag_set_list) 2671 blk_mq_unfreeze_queue(q); 2672 } 2673 2674 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) 2675 { 2676 mutex_lock(&set->tag_list_lock); 2677 __blk_mq_update_nr_hw_queues(set, nr_hw_queues); 2678 mutex_unlock(&set->tag_list_lock); 2679 } 2680 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); 2681 2682 /* Enable polling stats and return whether they were already enabled. */ 2683 static bool blk_poll_stats_enable(struct request_queue *q) 2684 { 2685 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) || 2686 test_and_set_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) 2687 return true; 2688 blk_stat_add_callback(q, q->poll_cb); 2689 return false; 2690 } 2691 2692 static void blk_mq_poll_stats_start(struct request_queue *q) 2693 { 2694 /* 2695 * We don't arm the callback if polling stats are not enabled or the 2696 * callback is already active. 2697 */ 2698 if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) || 2699 blk_stat_is_active(q->poll_cb)) 2700 return; 2701 2702 blk_stat_activate_msecs(q->poll_cb, 100); 2703 } 2704 2705 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb) 2706 { 2707 struct request_queue *q = cb->data; 2708 int bucket; 2709 2710 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) { 2711 if (cb->stat[bucket].nr_samples) 2712 q->poll_stat[bucket] = cb->stat[bucket]; 2713 } 2714 } 2715 2716 static unsigned long blk_mq_poll_nsecs(struct request_queue *q, 2717 struct blk_mq_hw_ctx *hctx, 2718 struct request *rq) 2719 { 2720 unsigned long ret = 0; 2721 int bucket; 2722 2723 /* 2724 * If stats collection isn't on, don't sleep but turn it on for 2725 * future users 2726 */ 2727 if (!blk_poll_stats_enable(q)) 2728 return 0; 2729 2730 /* 2731 * As an optimistic guess, use half of the mean service time 2732 * for this type of request. We can (and should) make this smarter. 2733 * For instance, if the completion latencies are tight, we can 2734 * get closer than just half the mean. This is especially 2735 * important on devices where the completion latencies are longer 2736 * than ~10 usec. We do use the stats for the relevant IO size 2737 * if available which does lead to better estimates. 2738 */ 2739 bucket = blk_mq_poll_stats_bkt(rq); 2740 if (bucket < 0) 2741 return ret; 2742 2743 if (q->poll_stat[bucket].nr_samples) 2744 ret = (q->poll_stat[bucket].mean + 1) / 2; 2745 2746 return ret; 2747 } 2748 2749 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, 2750 struct blk_mq_hw_ctx *hctx, 2751 struct request *rq) 2752 { 2753 struct hrtimer_sleeper hs; 2754 enum hrtimer_mode mode; 2755 unsigned int nsecs; 2756 ktime_t kt; 2757 2758 if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags)) 2759 return false; 2760 2761 /* 2762 * poll_nsec can be: 2763 * 2764 * -1: don't ever hybrid sleep 2765 * 0: use half of prev avg 2766 * >0: use this specific value 2767 */ 2768 if (q->poll_nsec == -1) 2769 return false; 2770 else if (q->poll_nsec > 0) 2771 nsecs = q->poll_nsec; 2772 else 2773 nsecs = blk_mq_poll_nsecs(q, hctx, rq); 2774 2775 if (!nsecs) 2776 return false; 2777 2778 set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags); 2779 2780 /* 2781 * This will be replaced with the stats tracking code, using 2782 * 'avg_completion_time / 2' as the pre-sleep target. 2783 */ 2784 kt = nsecs; 2785 2786 mode = HRTIMER_MODE_REL; 2787 hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode); 2788 hrtimer_set_expires(&hs.timer, kt); 2789 2790 hrtimer_init_sleeper(&hs, current); 2791 do { 2792 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) 2793 break; 2794 set_current_state(TASK_UNINTERRUPTIBLE); 2795 hrtimer_start_expires(&hs.timer, mode); 2796 if (hs.task) 2797 io_schedule(); 2798 hrtimer_cancel(&hs.timer); 2799 mode = HRTIMER_MODE_ABS; 2800 } while (hs.task && !signal_pending(current)); 2801 2802 __set_current_state(TASK_RUNNING); 2803 destroy_hrtimer_on_stack(&hs.timer); 2804 return true; 2805 } 2806 2807 static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq) 2808 { 2809 struct request_queue *q = hctx->queue; 2810 long state; 2811 2812 /* 2813 * If we sleep, have the caller restart the poll loop to reset 2814 * the state. Like for the other success return cases, the 2815 * caller is responsible for checking if the IO completed. If 2816 * the IO isn't complete, we'll get called again and will go 2817 * straight to the busy poll loop. 2818 */ 2819 if (blk_mq_poll_hybrid_sleep(q, hctx, rq)) 2820 return true; 2821 2822 hctx->poll_considered++; 2823 2824 state = current->state; 2825 while (!need_resched()) { 2826 int ret; 2827 2828 hctx->poll_invoked++; 2829 2830 ret = q->mq_ops->poll(hctx, rq->tag); 2831 if (ret > 0) { 2832 hctx->poll_success++; 2833 set_current_state(TASK_RUNNING); 2834 return true; 2835 } 2836 2837 if (signal_pending_state(state, current)) 2838 set_current_state(TASK_RUNNING); 2839 2840 if (current->state == TASK_RUNNING) 2841 return true; 2842 if (ret < 0) 2843 break; 2844 cpu_relax(); 2845 } 2846 2847 return false; 2848 } 2849 2850 bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie) 2851 { 2852 struct blk_mq_hw_ctx *hctx; 2853 struct blk_plug *plug; 2854 struct request *rq; 2855 2856 if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) || 2857 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) 2858 return false; 2859 2860 plug = current->plug; 2861 if (plug) 2862 blk_flush_plug_list(plug, false); 2863 2864 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; 2865 if (!blk_qc_t_is_internal(cookie)) 2866 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); 2867 else { 2868 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); 2869 /* 2870 * With scheduling, if the request has completed, we'll 2871 * get a NULL return here, as we clear the sched tag when 2872 * that happens. The request still remains valid, like always, 2873 * so we should be safe with just the NULL check. 2874 */ 2875 if (!rq) 2876 return false; 2877 } 2878 2879 return __blk_mq_poll(hctx, rq); 2880 } 2881 EXPORT_SYMBOL_GPL(blk_mq_poll); 2882 2883 static int __init blk_mq_init(void) 2884 { 2885 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, 2886 blk_mq_hctx_notify_dead); 2887 return 0; 2888 } 2889 subsys_initcall(blk_mq_init); 2890