1 /* 2 * Block multiqueue core code 3 * 4 * Copyright (C) 2013-2014 Jens Axboe 5 * Copyright (C) 2013-2014 Christoph Hellwig 6 */ 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/backing-dev.h> 10 #include <linux/bio.h> 11 #include <linux/blkdev.h> 12 #include <linux/kmemleak.h> 13 #include <linux/mm.h> 14 #include <linux/init.h> 15 #include <linux/slab.h> 16 #include <linux/workqueue.h> 17 #include <linux/smp.h> 18 #include <linux/llist.h> 19 #include <linux/list_sort.h> 20 #include <linux/cpu.h> 21 #include <linux/cache.h> 22 #include <linux/sched/sysctl.h> 23 #include <linux/sched/topology.h> 24 #include <linux/sched/signal.h> 25 #include <linux/delay.h> 26 #include <linux/crash_dump.h> 27 #include <linux/prefetch.h> 28 29 #include <trace/events/block.h> 30 31 #include <linux/blk-mq.h> 32 #include "blk.h" 33 #include "blk-mq.h" 34 #include "blk-mq-debugfs.h" 35 #include "blk-mq-tag.h" 36 #include "blk-stat.h" 37 #include "blk-wbt.h" 38 #include "blk-mq-sched.h" 39 40 static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie); 41 static void blk_mq_poll_stats_start(struct request_queue *q); 42 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); 43 44 static int blk_mq_poll_stats_bkt(const struct request *rq) 45 { 46 int ddir, bytes, bucket; 47 48 ddir = rq_data_dir(rq); 49 bytes = blk_rq_bytes(rq); 50 51 bucket = ddir + 2*(ilog2(bytes) - 9); 52 53 if (bucket < 0) 54 return -1; 55 else if (bucket >= BLK_MQ_POLL_STATS_BKTS) 56 return ddir + BLK_MQ_POLL_STATS_BKTS - 2; 57 58 return bucket; 59 } 60 61 /* 62 * Check if any of the ctx's have pending work in this hardware queue 63 */ 64 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) 65 { 66 return !list_empty_careful(&hctx->dispatch) || 67 sbitmap_any_bit_set(&hctx->ctx_map) || 68 blk_mq_sched_has_work(hctx); 69 } 70 71 /* 72 * Mark this ctx as having pending work in this hardware queue 73 */ 74 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, 75 struct blk_mq_ctx *ctx) 76 { 77 if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw)) 78 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw); 79 } 80 81 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, 82 struct blk_mq_ctx *ctx) 83 { 84 sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw); 85 } 86 87 struct mq_inflight { 88 struct hd_struct *part; 89 unsigned int *inflight; 90 }; 91 92 static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, 93 struct request *rq, void *priv, 94 bool reserved) 95 { 96 struct mq_inflight *mi = priv; 97 98 /* 99 * index[0] counts the specific partition that was asked for. index[1] 100 * counts the ones that are active on the whole device, so increment 101 * that if mi->part is indeed a partition, and not a whole device. 102 */ 103 if (rq->part == mi->part) 104 mi->inflight[0]++; 105 if (mi->part->partno) 106 mi->inflight[1]++; 107 } 108 109 void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, 110 unsigned int inflight[2]) 111 { 112 struct mq_inflight mi = { .part = part, .inflight = inflight, }; 113 114 inflight[0] = inflight[1] = 0; 115 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 116 } 117 118 static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx, 119 struct request *rq, void *priv, 120 bool reserved) 121 { 122 struct mq_inflight *mi = priv; 123 124 if (rq->part == mi->part) 125 mi->inflight[rq_data_dir(rq)]++; 126 } 127 128 void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, 129 unsigned int inflight[2]) 130 { 131 struct mq_inflight mi = { .part = part, .inflight = inflight, }; 132 133 inflight[0] = inflight[1] = 0; 134 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi); 135 } 136 137 void blk_freeze_queue_start(struct request_queue *q) 138 { 139 int freeze_depth; 140 141 freeze_depth = atomic_inc_return(&q->mq_freeze_depth); 142 if (freeze_depth == 1) { 143 percpu_ref_kill(&q->q_usage_counter); 144 if (q->mq_ops) 145 blk_mq_run_hw_queues(q, false); 146 } 147 } 148 EXPORT_SYMBOL_GPL(blk_freeze_queue_start); 149 150 void blk_mq_freeze_queue_wait(struct request_queue *q) 151 { 152 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); 153 } 154 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait); 155 156 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, 157 unsigned long timeout) 158 { 159 return wait_event_timeout(q->mq_freeze_wq, 160 percpu_ref_is_zero(&q->q_usage_counter), 161 timeout); 162 } 163 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout); 164 165 /* 166 * Guarantee no request is in use, so we can change any data structure of 167 * the queue afterward. 168 */ 169 void blk_freeze_queue(struct request_queue *q) 170 { 171 /* 172 * In the !blk_mq case we are only calling this to kill the 173 * q_usage_counter, otherwise this increases the freeze depth 174 * and waits for it to return to zero. For this reason there is 175 * no blk_unfreeze_queue(), and blk_freeze_queue() is not 176 * exported to drivers as the only user for unfreeze is blk_mq. 177 */ 178 blk_freeze_queue_start(q); 179 if (!q->mq_ops) 180 blk_drain_queue(q); 181 blk_mq_freeze_queue_wait(q); 182 } 183 184 void blk_mq_freeze_queue(struct request_queue *q) 185 { 186 /* 187 * ...just an alias to keep freeze and unfreeze actions balanced 188 * in the blk_mq_* namespace 189 */ 190 blk_freeze_queue(q); 191 } 192 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); 193 194 void blk_mq_unfreeze_queue(struct request_queue *q) 195 { 196 int freeze_depth; 197 198 freeze_depth = atomic_dec_return(&q->mq_freeze_depth); 199 WARN_ON_ONCE(freeze_depth < 0); 200 if (!freeze_depth) { 201 percpu_ref_reinit(&q->q_usage_counter); 202 wake_up_all(&q->mq_freeze_wq); 203 } 204 } 205 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); 206 207 /* 208 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the 209 * mpt3sas driver such that this function can be removed. 210 */ 211 void blk_mq_quiesce_queue_nowait(struct request_queue *q) 212 { 213 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); 214 } 215 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); 216 217 /** 218 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished 219 * @q: request queue. 220 * 221 * Note: this function does not prevent that the struct request end_io() 222 * callback function is invoked. Once this function is returned, we make 223 * sure no dispatch can happen until the queue is unquiesced via 224 * blk_mq_unquiesce_queue(). 225 */ 226 void blk_mq_quiesce_queue(struct request_queue *q) 227 { 228 struct blk_mq_hw_ctx *hctx; 229 unsigned int i; 230 bool rcu = false; 231 232 blk_mq_quiesce_queue_nowait(q); 233 234 queue_for_each_hw_ctx(q, hctx, i) { 235 if (hctx->flags & BLK_MQ_F_BLOCKING) 236 synchronize_srcu(hctx->srcu); 237 else 238 rcu = true; 239 } 240 if (rcu) 241 synchronize_rcu(); 242 } 243 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); 244 245 /* 246 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue() 247 * @q: request queue. 248 * 249 * This function recovers queue into the state before quiescing 250 * which is done by blk_mq_quiesce_queue. 251 */ 252 void blk_mq_unquiesce_queue(struct request_queue *q) 253 { 254 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); 255 256 /* dispatch requests which are inserted during quiescing */ 257 blk_mq_run_hw_queues(q, true); 258 } 259 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue); 260 261 void blk_mq_wake_waiters(struct request_queue *q) 262 { 263 struct blk_mq_hw_ctx *hctx; 264 unsigned int i; 265 266 queue_for_each_hw_ctx(q, hctx, i) 267 if (blk_mq_hw_queue_mapped(hctx)) 268 blk_mq_tag_wakeup_all(hctx->tags, true); 269 } 270 271 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) 272 { 273 return blk_mq_has_free_tags(hctx->tags); 274 } 275 EXPORT_SYMBOL(blk_mq_can_queue); 276 277 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, 278 unsigned int tag, unsigned int op) 279 { 280 struct blk_mq_tags *tags = blk_mq_tags_from_data(data); 281 struct request *rq = tags->static_rqs[tag]; 282 req_flags_t rq_flags = 0; 283 284 if (data->flags & BLK_MQ_REQ_INTERNAL) { 285 rq->tag = -1; 286 rq->internal_tag = tag; 287 } else { 288 if (blk_mq_tag_busy(data->hctx)) { 289 rq_flags = RQF_MQ_INFLIGHT; 290 atomic_inc(&data->hctx->nr_active); 291 } 292 rq->tag = tag; 293 rq->internal_tag = -1; 294 data->hctx->tags->rqs[rq->tag] = rq; 295 } 296 297 /* csd/requeue_work/fifo_time is initialized before use */ 298 rq->q = data->q; 299 rq->mq_ctx = data->ctx; 300 rq->rq_flags = rq_flags; 301 rq->cpu = -1; 302 rq->cmd_flags = op; 303 if (data->flags & BLK_MQ_REQ_PREEMPT) 304 rq->rq_flags |= RQF_PREEMPT; 305 if (blk_queue_io_stat(data->q)) 306 rq->rq_flags |= RQF_IO_STAT; 307 INIT_LIST_HEAD(&rq->queuelist); 308 INIT_HLIST_NODE(&rq->hash); 309 RB_CLEAR_NODE(&rq->rb_node); 310 rq->rq_disk = NULL; 311 rq->part = NULL; 312 rq->start_time_ns = ktime_get_ns(); 313 rq->io_start_time_ns = 0; 314 rq->nr_phys_segments = 0; 315 #if defined(CONFIG_BLK_DEV_INTEGRITY) 316 rq->nr_integrity_segments = 0; 317 #endif 318 rq->special = NULL; 319 /* tag was already set */ 320 rq->extra_len = 0; 321 rq->__deadline = 0; 322 323 INIT_LIST_HEAD(&rq->timeout_list); 324 rq->timeout = 0; 325 326 rq->end_io = NULL; 327 rq->end_io_data = NULL; 328 rq->next_rq = NULL; 329 330 #ifdef CONFIG_BLK_CGROUP 331 rq->rl = NULL; 332 #endif 333 334 data->ctx->rq_dispatched[op_is_sync(op)]++; 335 refcount_set(&rq->ref, 1); 336 return rq; 337 } 338 339 static struct request *blk_mq_get_request(struct request_queue *q, 340 struct bio *bio, unsigned int op, 341 struct blk_mq_alloc_data *data) 342 { 343 struct elevator_queue *e = q->elevator; 344 struct request *rq; 345 unsigned int tag; 346 bool put_ctx_on_error = false; 347 348 blk_queue_enter_live(q); 349 data->q = q; 350 if (likely(!data->ctx)) { 351 data->ctx = blk_mq_get_ctx(q); 352 put_ctx_on_error = true; 353 } 354 if (likely(!data->hctx)) 355 data->hctx = blk_mq_map_queue(q, data->ctx->cpu); 356 if (op & REQ_NOWAIT) 357 data->flags |= BLK_MQ_REQ_NOWAIT; 358 359 if (e) { 360 data->flags |= BLK_MQ_REQ_INTERNAL; 361 362 /* 363 * Flush requests are special and go directly to the 364 * dispatch list. Don't include reserved tags in the 365 * limiting, as it isn't useful. 366 */ 367 if (!op_is_flush(op) && e->type->ops.mq.limit_depth && 368 !(data->flags & BLK_MQ_REQ_RESERVED)) 369 e->type->ops.mq.limit_depth(op, data); 370 } 371 372 tag = blk_mq_get_tag(data); 373 if (tag == BLK_MQ_TAG_FAIL) { 374 if (put_ctx_on_error) { 375 blk_mq_put_ctx(data->ctx); 376 data->ctx = NULL; 377 } 378 blk_queue_exit(q); 379 return NULL; 380 } 381 382 rq = blk_mq_rq_ctx_init(data, tag, op); 383 if (!op_is_flush(op)) { 384 rq->elv.icq = NULL; 385 if (e && e->type->ops.mq.prepare_request) { 386 if (e->type->icq_cache && rq_ioc(bio)) 387 blk_mq_sched_assign_ioc(rq, bio); 388 389 e->type->ops.mq.prepare_request(rq, bio); 390 rq->rq_flags |= RQF_ELVPRIV; 391 } 392 } 393 data->hctx->queued++; 394 return rq; 395 } 396 397 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, 398 blk_mq_req_flags_t flags) 399 { 400 struct blk_mq_alloc_data alloc_data = { .flags = flags }; 401 struct request *rq; 402 int ret; 403 404 ret = blk_queue_enter(q, flags); 405 if (ret) 406 return ERR_PTR(ret); 407 408 rq = blk_mq_get_request(q, NULL, op, &alloc_data); 409 blk_queue_exit(q); 410 411 if (!rq) 412 return ERR_PTR(-EWOULDBLOCK); 413 414 blk_mq_put_ctx(alloc_data.ctx); 415 416 rq->__data_len = 0; 417 rq->__sector = (sector_t) -1; 418 rq->bio = rq->biotail = NULL; 419 return rq; 420 } 421 EXPORT_SYMBOL(blk_mq_alloc_request); 422 423 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, 424 unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx) 425 { 426 struct blk_mq_alloc_data alloc_data = { .flags = flags }; 427 struct request *rq; 428 unsigned int cpu; 429 int ret; 430 431 /* 432 * If the tag allocator sleeps we could get an allocation for a 433 * different hardware context. No need to complicate the low level 434 * allocator for this for the rare use case of a command tied to 435 * a specific queue. 436 */ 437 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT))) 438 return ERR_PTR(-EINVAL); 439 440 if (hctx_idx >= q->nr_hw_queues) 441 return ERR_PTR(-EIO); 442 443 ret = blk_queue_enter(q, flags); 444 if (ret) 445 return ERR_PTR(ret); 446 447 /* 448 * Check if the hardware context is actually mapped to anything. 449 * If not tell the caller that it should skip this queue. 450 */ 451 alloc_data.hctx = q->queue_hw_ctx[hctx_idx]; 452 if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) { 453 blk_queue_exit(q); 454 return ERR_PTR(-EXDEV); 455 } 456 cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask); 457 alloc_data.ctx = __blk_mq_get_ctx(q, cpu); 458 459 rq = blk_mq_get_request(q, NULL, op, &alloc_data); 460 blk_queue_exit(q); 461 462 if (!rq) 463 return ERR_PTR(-EWOULDBLOCK); 464 465 return rq; 466 } 467 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 468 469 static void __blk_mq_free_request(struct request *rq) 470 { 471 struct request_queue *q = rq->q; 472 struct blk_mq_ctx *ctx = rq->mq_ctx; 473 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); 474 const int sched_tag = rq->internal_tag; 475 476 if (rq->tag != -1) 477 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); 478 if (sched_tag != -1) 479 blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag); 480 blk_mq_sched_restart(hctx); 481 blk_queue_exit(q); 482 } 483 484 void blk_mq_free_request(struct request *rq) 485 { 486 struct request_queue *q = rq->q; 487 struct elevator_queue *e = q->elevator; 488 struct blk_mq_ctx *ctx = rq->mq_ctx; 489 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); 490 491 if (rq->rq_flags & RQF_ELVPRIV) { 492 if (e && e->type->ops.mq.finish_request) 493 e->type->ops.mq.finish_request(rq); 494 if (rq->elv.icq) { 495 put_io_context(rq->elv.icq->ioc); 496 rq->elv.icq = NULL; 497 } 498 } 499 500 ctx->rq_completed[rq_is_sync(rq)]++; 501 if (rq->rq_flags & RQF_MQ_INFLIGHT) 502 atomic_dec(&hctx->nr_active); 503 504 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) 505 laptop_io_completion(q->backing_dev_info); 506 507 wbt_done(q->rq_wb, rq); 508 509 if (blk_rq_rl(rq)) 510 blk_put_rl(blk_rq_rl(rq)); 511 512 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 513 if (refcount_dec_and_test(&rq->ref)) 514 __blk_mq_free_request(rq); 515 } 516 EXPORT_SYMBOL_GPL(blk_mq_free_request); 517 518 inline void __blk_mq_end_request(struct request *rq, blk_status_t error) 519 { 520 u64 now = ktime_get_ns(); 521 522 if (rq->rq_flags & RQF_STATS) { 523 blk_mq_poll_stats_start(rq->q); 524 blk_stat_add(rq, now); 525 } 526 527 blk_account_io_done(rq, now); 528 529 if (rq->end_io) { 530 wbt_done(rq->q->rq_wb, rq); 531 rq->end_io(rq, error); 532 } else { 533 if (unlikely(blk_bidi_rq(rq))) 534 blk_mq_free_request(rq->next_rq); 535 blk_mq_free_request(rq); 536 } 537 } 538 EXPORT_SYMBOL(__blk_mq_end_request); 539 540 void blk_mq_end_request(struct request *rq, blk_status_t error) 541 { 542 if (blk_update_request(rq, error, blk_rq_bytes(rq))) 543 BUG(); 544 __blk_mq_end_request(rq, error); 545 } 546 EXPORT_SYMBOL(blk_mq_end_request); 547 548 static void __blk_mq_complete_request_remote(void *data) 549 { 550 struct request *rq = data; 551 552 rq->q->softirq_done_fn(rq); 553 } 554 555 static void __blk_mq_complete_request(struct request *rq) 556 { 557 struct blk_mq_ctx *ctx = rq->mq_ctx; 558 bool shared = false; 559 int cpu; 560 561 if (!blk_mq_mark_complete(rq)) 562 return; 563 if (rq->internal_tag != -1) 564 blk_mq_sched_completed_request(rq); 565 566 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) { 567 rq->q->softirq_done_fn(rq); 568 return; 569 } 570 571 cpu = get_cpu(); 572 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) 573 shared = cpus_share_cache(cpu, ctx->cpu); 574 575 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { 576 rq->csd.func = __blk_mq_complete_request_remote; 577 rq->csd.info = rq; 578 rq->csd.flags = 0; 579 smp_call_function_single_async(ctx->cpu, &rq->csd); 580 } else { 581 rq->q->softirq_done_fn(rq); 582 } 583 put_cpu(); 584 } 585 586 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) 587 __releases(hctx->srcu) 588 { 589 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) 590 rcu_read_unlock(); 591 else 592 srcu_read_unlock(hctx->srcu, srcu_idx); 593 } 594 595 static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx) 596 __acquires(hctx->srcu) 597 { 598 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { 599 /* shut up gcc false positive */ 600 *srcu_idx = 0; 601 rcu_read_lock(); 602 } else 603 *srcu_idx = srcu_read_lock(hctx->srcu); 604 } 605 606 /** 607 * blk_mq_complete_request - end I/O on a request 608 * @rq: the request being processed 609 * 610 * Description: 611 * Ends all I/O on a request. It does not handle partial completions. 612 * The actual completion happens out-of-order, through a IPI handler. 613 **/ 614 void blk_mq_complete_request(struct request *rq) 615 { 616 if (unlikely(blk_should_fake_timeout(rq->q))) 617 return; 618 __blk_mq_complete_request(rq); 619 } 620 EXPORT_SYMBOL(blk_mq_complete_request); 621 622 int blk_mq_request_started(struct request *rq) 623 { 624 return blk_mq_rq_state(rq) != MQ_RQ_IDLE; 625 } 626 EXPORT_SYMBOL_GPL(blk_mq_request_started); 627 628 void blk_mq_start_request(struct request *rq) 629 { 630 struct request_queue *q = rq->q; 631 632 blk_mq_sched_started_request(rq); 633 634 trace_block_rq_issue(q, rq); 635 636 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { 637 rq->io_start_time_ns = ktime_get_ns(); 638 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 639 rq->throtl_size = blk_rq_sectors(rq); 640 #endif 641 rq->rq_flags |= RQF_STATS; 642 wbt_issue(q->rq_wb, rq); 643 } 644 645 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE); 646 647 blk_add_timer(rq); 648 WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT); 649 650 if (q->dma_drain_size && blk_rq_bytes(rq)) { 651 /* 652 * Make sure space for the drain appears. We know we can do 653 * this because max_hw_segments has been adjusted to be one 654 * fewer than the device can handle. 655 */ 656 rq->nr_phys_segments++; 657 } 658 } 659 EXPORT_SYMBOL(blk_mq_start_request); 660 661 static void __blk_mq_requeue_request(struct request *rq) 662 { 663 struct request_queue *q = rq->q; 664 665 blk_mq_put_driver_tag(rq); 666 667 trace_block_rq_requeue(q, rq); 668 wbt_requeue(q->rq_wb, rq); 669 670 if (blk_mq_request_started(rq)) { 671 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 672 rq->rq_flags &= ~RQF_TIMED_OUT; 673 if (q->dma_drain_size && blk_rq_bytes(rq)) 674 rq->nr_phys_segments--; 675 } 676 } 677 678 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) 679 { 680 __blk_mq_requeue_request(rq); 681 682 /* this request will be re-inserted to io scheduler queue */ 683 blk_mq_sched_requeue_request(rq); 684 685 BUG_ON(blk_queued_rq(rq)); 686 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); 687 } 688 EXPORT_SYMBOL(blk_mq_requeue_request); 689 690 static void blk_mq_requeue_work(struct work_struct *work) 691 { 692 struct request_queue *q = 693 container_of(work, struct request_queue, requeue_work.work); 694 LIST_HEAD(rq_list); 695 struct request *rq, *next; 696 697 spin_lock_irq(&q->requeue_lock); 698 list_splice_init(&q->requeue_list, &rq_list); 699 spin_unlock_irq(&q->requeue_lock); 700 701 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { 702 if (!(rq->rq_flags & RQF_SOFTBARRIER)) 703 continue; 704 705 rq->rq_flags &= ~RQF_SOFTBARRIER; 706 list_del_init(&rq->queuelist); 707 blk_mq_sched_insert_request(rq, true, false, false); 708 } 709 710 while (!list_empty(&rq_list)) { 711 rq = list_entry(rq_list.next, struct request, queuelist); 712 list_del_init(&rq->queuelist); 713 blk_mq_sched_insert_request(rq, false, false, false); 714 } 715 716 blk_mq_run_hw_queues(q, false); 717 } 718 719 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, 720 bool kick_requeue_list) 721 { 722 struct request_queue *q = rq->q; 723 unsigned long flags; 724 725 /* 726 * We abuse this flag that is otherwise used by the I/O scheduler to 727 * request head insertion from the workqueue. 728 */ 729 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER); 730 731 spin_lock_irqsave(&q->requeue_lock, flags); 732 if (at_head) { 733 rq->rq_flags |= RQF_SOFTBARRIER; 734 list_add(&rq->queuelist, &q->requeue_list); 735 } else { 736 list_add_tail(&rq->queuelist, &q->requeue_list); 737 } 738 spin_unlock_irqrestore(&q->requeue_lock, flags); 739 740 if (kick_requeue_list) 741 blk_mq_kick_requeue_list(q); 742 } 743 EXPORT_SYMBOL(blk_mq_add_to_requeue_list); 744 745 void blk_mq_kick_requeue_list(struct request_queue *q) 746 { 747 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0); 748 } 749 EXPORT_SYMBOL(blk_mq_kick_requeue_list); 750 751 void blk_mq_delay_kick_requeue_list(struct request_queue *q, 752 unsigned long msecs) 753 { 754 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 755 msecs_to_jiffies(msecs)); 756 } 757 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); 758 759 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) 760 { 761 if (tag < tags->nr_tags) { 762 prefetch(tags->rqs[tag]); 763 return tags->rqs[tag]; 764 } 765 766 return NULL; 767 } 768 EXPORT_SYMBOL(blk_mq_tag_to_rq); 769 770 static void blk_mq_rq_timed_out(struct request *req, bool reserved) 771 { 772 req->rq_flags |= RQF_TIMED_OUT; 773 if (req->q->mq_ops->timeout) { 774 enum blk_eh_timer_return ret; 775 776 ret = req->q->mq_ops->timeout(req, reserved); 777 if (ret == BLK_EH_DONE) 778 return; 779 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER); 780 } 781 782 blk_add_timer(req); 783 } 784 785 static bool blk_mq_req_expired(struct request *rq, unsigned long *next) 786 { 787 unsigned long deadline; 788 789 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT) 790 return false; 791 if (rq->rq_flags & RQF_TIMED_OUT) 792 return false; 793 794 deadline = blk_rq_deadline(rq); 795 if (time_after_eq(jiffies, deadline)) 796 return true; 797 798 if (*next == 0) 799 *next = deadline; 800 else if (time_after(*next, deadline)) 801 *next = deadline; 802 return false; 803 } 804 805 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, 806 struct request *rq, void *priv, bool reserved) 807 { 808 unsigned long *next = priv; 809 810 /* 811 * Just do a quick check if it is expired before locking the request in 812 * so we're not unnecessarilly synchronizing across CPUs. 813 */ 814 if (!blk_mq_req_expired(rq, next)) 815 return; 816 817 /* 818 * We have reason to believe the request may be expired. Take a 819 * reference on the request to lock this request lifetime into its 820 * currently allocated context to prevent it from being reallocated in 821 * the event the completion by-passes this timeout handler. 822 * 823 * If the reference was already released, then the driver beat the 824 * timeout handler to posting a natural completion. 825 */ 826 if (!refcount_inc_not_zero(&rq->ref)) 827 return; 828 829 /* 830 * The request is now locked and cannot be reallocated underneath the 831 * timeout handler's processing. Re-verify this exact request is truly 832 * expired; if it is not expired, then the request was completed and 833 * reallocated as a new request. 834 */ 835 if (blk_mq_req_expired(rq, next)) 836 blk_mq_rq_timed_out(rq, reserved); 837 if (refcount_dec_and_test(&rq->ref)) 838 __blk_mq_free_request(rq); 839 } 840 841 static void blk_mq_timeout_work(struct work_struct *work) 842 { 843 struct request_queue *q = 844 container_of(work, struct request_queue, timeout_work); 845 unsigned long next = 0; 846 struct blk_mq_hw_ctx *hctx; 847 int i; 848 849 /* A deadlock might occur if a request is stuck requiring a 850 * timeout at the same time a queue freeze is waiting 851 * completion, since the timeout code would not be able to 852 * acquire the queue reference here. 853 * 854 * That's why we don't use blk_queue_enter here; instead, we use 855 * percpu_ref_tryget directly, because we need to be able to 856 * obtain a reference even in the short window between the queue 857 * starting to freeze, by dropping the first reference in 858 * blk_freeze_queue_start, and the moment the last request is 859 * consumed, marked by the instant q_usage_counter reaches 860 * zero. 861 */ 862 if (!percpu_ref_tryget(&q->q_usage_counter)) 863 return; 864 865 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next); 866 867 if (next != 0) { 868 mod_timer(&q->timeout, next); 869 } else { 870 /* 871 * Request timeouts are handled as a forward rolling timer. If 872 * we end up here it means that no requests are pending and 873 * also that no request has been pending for a while. Mark 874 * each hctx as idle. 875 */ 876 queue_for_each_hw_ctx(q, hctx, i) { 877 /* the hctx may be unmapped, so check it here */ 878 if (blk_mq_hw_queue_mapped(hctx)) 879 blk_mq_tag_idle(hctx); 880 } 881 } 882 blk_queue_exit(q); 883 } 884 885 struct flush_busy_ctx_data { 886 struct blk_mq_hw_ctx *hctx; 887 struct list_head *list; 888 }; 889 890 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data) 891 { 892 struct flush_busy_ctx_data *flush_data = data; 893 struct blk_mq_hw_ctx *hctx = flush_data->hctx; 894 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 895 896 spin_lock(&ctx->lock); 897 list_splice_tail_init(&ctx->rq_list, flush_data->list); 898 sbitmap_clear_bit(sb, bitnr); 899 spin_unlock(&ctx->lock); 900 return true; 901 } 902 903 /* 904 * Process software queues that have been marked busy, splicing them 905 * to the for-dispatch 906 */ 907 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) 908 { 909 struct flush_busy_ctx_data data = { 910 .hctx = hctx, 911 .list = list, 912 }; 913 914 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data); 915 } 916 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs); 917 918 struct dispatch_rq_data { 919 struct blk_mq_hw_ctx *hctx; 920 struct request *rq; 921 }; 922 923 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr, 924 void *data) 925 { 926 struct dispatch_rq_data *dispatch_data = data; 927 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; 928 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 929 930 spin_lock(&ctx->lock); 931 if (!list_empty(&ctx->rq_list)) { 932 dispatch_data->rq = list_entry_rq(ctx->rq_list.next); 933 list_del_init(&dispatch_data->rq->queuelist); 934 if (list_empty(&ctx->rq_list)) 935 sbitmap_clear_bit(sb, bitnr); 936 } 937 spin_unlock(&ctx->lock); 938 939 return !dispatch_data->rq; 940 } 941 942 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 943 struct blk_mq_ctx *start) 944 { 945 unsigned off = start ? start->index_hw : 0; 946 struct dispatch_rq_data data = { 947 .hctx = hctx, 948 .rq = NULL, 949 }; 950 951 __sbitmap_for_each_set(&hctx->ctx_map, off, 952 dispatch_rq_from_ctx, &data); 953 954 return data.rq; 955 } 956 957 static inline unsigned int queued_to_index(unsigned int queued) 958 { 959 if (!queued) 960 return 0; 961 962 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1); 963 } 964 965 bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, 966 bool wait) 967 { 968 struct blk_mq_alloc_data data = { 969 .q = rq->q, 970 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), 971 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT, 972 }; 973 974 might_sleep_if(wait); 975 976 if (rq->tag != -1) 977 goto done; 978 979 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag)) 980 data.flags |= BLK_MQ_REQ_RESERVED; 981 982 rq->tag = blk_mq_get_tag(&data); 983 if (rq->tag >= 0) { 984 if (blk_mq_tag_busy(data.hctx)) { 985 rq->rq_flags |= RQF_MQ_INFLIGHT; 986 atomic_inc(&data.hctx->nr_active); 987 } 988 data.hctx->tags->rqs[rq->tag] = rq; 989 } 990 991 done: 992 if (hctx) 993 *hctx = data.hctx; 994 return rq->tag != -1; 995 } 996 997 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, 998 int flags, void *key) 999 { 1000 struct blk_mq_hw_ctx *hctx; 1001 1002 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); 1003 1004 list_del_init(&wait->entry); 1005 blk_mq_run_hw_queue(hctx, true); 1006 return 1; 1007 } 1008 1009 /* 1010 * Mark us waiting for a tag. For shared tags, this involves hooking us into 1011 * the tag wakeups. For non-shared tags, we can simply mark us needing a 1012 * restart. For both cases, take care to check the condition again after 1013 * marking us as waiting. 1014 */ 1015 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx, 1016 struct request *rq) 1017 { 1018 struct blk_mq_hw_ctx *this_hctx = *hctx; 1019 struct sbq_wait_state *ws; 1020 wait_queue_entry_t *wait; 1021 bool ret; 1022 1023 if (!(this_hctx->flags & BLK_MQ_F_TAG_SHARED)) { 1024 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state)) 1025 set_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state); 1026 1027 /* 1028 * It's possible that a tag was freed in the window between the 1029 * allocation failure and adding the hardware queue to the wait 1030 * queue. 1031 * 1032 * Don't clear RESTART here, someone else could have set it. 1033 * At most this will cost an extra queue run. 1034 */ 1035 return blk_mq_get_driver_tag(rq, hctx, false); 1036 } 1037 1038 wait = &this_hctx->dispatch_wait; 1039 if (!list_empty_careful(&wait->entry)) 1040 return false; 1041 1042 spin_lock(&this_hctx->lock); 1043 if (!list_empty(&wait->entry)) { 1044 spin_unlock(&this_hctx->lock); 1045 return false; 1046 } 1047 1048 ws = bt_wait_ptr(&this_hctx->tags->bitmap_tags, this_hctx); 1049 add_wait_queue(&ws->wait, wait); 1050 1051 /* 1052 * It's possible that a tag was freed in the window between the 1053 * allocation failure and adding the hardware queue to the wait 1054 * queue. 1055 */ 1056 ret = blk_mq_get_driver_tag(rq, hctx, false); 1057 if (!ret) { 1058 spin_unlock(&this_hctx->lock); 1059 return false; 1060 } 1061 1062 /* 1063 * We got a tag, remove ourselves from the wait queue to ensure 1064 * someone else gets the wakeup. 1065 */ 1066 spin_lock_irq(&ws->wait.lock); 1067 list_del_init(&wait->entry); 1068 spin_unlock_irq(&ws->wait.lock); 1069 spin_unlock(&this_hctx->lock); 1070 1071 return true; 1072 } 1073 1074 #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */ 1075 1076 /* 1077 * Returns true if we did some work AND can potentially do more. 1078 */ 1079 bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, 1080 bool got_budget) 1081 { 1082 struct blk_mq_hw_ctx *hctx; 1083 struct request *rq, *nxt; 1084 bool no_tag = false; 1085 int errors, queued; 1086 blk_status_t ret = BLK_STS_OK; 1087 1088 if (list_empty(list)) 1089 return false; 1090 1091 WARN_ON(!list_is_singular(list) && got_budget); 1092 1093 /* 1094 * Now process all the entries, sending them to the driver. 1095 */ 1096 errors = queued = 0; 1097 do { 1098 struct blk_mq_queue_data bd; 1099 1100 rq = list_first_entry(list, struct request, queuelist); 1101 1102 hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); 1103 if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) 1104 break; 1105 1106 if (!blk_mq_get_driver_tag(rq, NULL, false)) { 1107 /* 1108 * The initial allocation attempt failed, so we need to 1109 * rerun the hardware queue when a tag is freed. The 1110 * waitqueue takes care of that. If the queue is run 1111 * before we add this entry back on the dispatch list, 1112 * we'll re-run it below. 1113 */ 1114 if (!blk_mq_mark_tag_wait(&hctx, rq)) { 1115 blk_mq_put_dispatch_budget(hctx); 1116 /* 1117 * For non-shared tags, the RESTART check 1118 * will suffice. 1119 */ 1120 if (hctx->flags & BLK_MQ_F_TAG_SHARED) 1121 no_tag = true; 1122 break; 1123 } 1124 } 1125 1126 list_del_init(&rq->queuelist); 1127 1128 bd.rq = rq; 1129 1130 /* 1131 * Flag last if we have no more requests, or if we have more 1132 * but can't assign a driver tag to it. 1133 */ 1134 if (list_empty(list)) 1135 bd.last = true; 1136 else { 1137 nxt = list_first_entry(list, struct request, queuelist); 1138 bd.last = !blk_mq_get_driver_tag(nxt, NULL, false); 1139 } 1140 1141 ret = q->mq_ops->queue_rq(hctx, &bd); 1142 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { 1143 /* 1144 * If an I/O scheduler has been configured and we got a 1145 * driver tag for the next request already, free it 1146 * again. 1147 */ 1148 if (!list_empty(list)) { 1149 nxt = list_first_entry(list, struct request, queuelist); 1150 blk_mq_put_driver_tag(nxt); 1151 } 1152 list_add(&rq->queuelist, list); 1153 __blk_mq_requeue_request(rq); 1154 break; 1155 } 1156 1157 if (unlikely(ret != BLK_STS_OK)) { 1158 errors++; 1159 blk_mq_end_request(rq, BLK_STS_IOERR); 1160 continue; 1161 } 1162 1163 queued++; 1164 } while (!list_empty(list)); 1165 1166 hctx->dispatched[queued_to_index(queued)]++; 1167 1168 /* 1169 * Any items that need requeuing? Stuff them into hctx->dispatch, 1170 * that is where we will continue on next queue run. 1171 */ 1172 if (!list_empty(list)) { 1173 bool needs_restart; 1174 1175 spin_lock(&hctx->lock); 1176 list_splice_init(list, &hctx->dispatch); 1177 spin_unlock(&hctx->lock); 1178 1179 /* 1180 * If SCHED_RESTART was set by the caller of this function and 1181 * it is no longer set that means that it was cleared by another 1182 * thread and hence that a queue rerun is needed. 1183 * 1184 * If 'no_tag' is set, that means that we failed getting 1185 * a driver tag with an I/O scheduler attached. If our dispatch 1186 * waitqueue is no longer active, ensure that we run the queue 1187 * AFTER adding our entries back to the list. 1188 * 1189 * If no I/O scheduler has been configured it is possible that 1190 * the hardware queue got stopped and restarted before requests 1191 * were pushed back onto the dispatch list. Rerun the queue to 1192 * avoid starvation. Notes: 1193 * - blk_mq_run_hw_queue() checks whether or not a queue has 1194 * been stopped before rerunning a queue. 1195 * - Some but not all block drivers stop a queue before 1196 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq 1197 * and dm-rq. 1198 * 1199 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART 1200 * bit is set, run queue after a delay to avoid IO stalls 1201 * that could otherwise occur if the queue is idle. 1202 */ 1203 needs_restart = blk_mq_sched_needs_restart(hctx); 1204 if (!needs_restart || 1205 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) 1206 blk_mq_run_hw_queue(hctx, true); 1207 else if (needs_restart && (ret == BLK_STS_RESOURCE)) 1208 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); 1209 1210 return false; 1211 } 1212 1213 /* 1214 * If the host/device is unable to accept more work, inform the 1215 * caller of that. 1216 */ 1217 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) 1218 return false; 1219 1220 return (queued + errors) != 0; 1221 } 1222 1223 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) 1224 { 1225 int srcu_idx; 1226 1227 /* 1228 * We should be running this queue from one of the CPUs that 1229 * are mapped to it. 1230 * 1231 * There are at least two related races now between setting 1232 * hctx->next_cpu from blk_mq_hctx_next_cpu() and running 1233 * __blk_mq_run_hw_queue(): 1234 * 1235 * - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(), 1236 * but later it becomes online, then this warning is harmless 1237 * at all 1238 * 1239 * - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(), 1240 * but later it becomes offline, then the warning can't be 1241 * triggered, and we depend on blk-mq timeout handler to 1242 * handle dispatched requests to this hctx 1243 */ 1244 if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) && 1245 cpu_online(hctx->next_cpu)) { 1246 printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n", 1247 raw_smp_processor_id(), 1248 cpumask_empty(hctx->cpumask) ? "inactive": "active"); 1249 dump_stack(); 1250 } 1251 1252 /* 1253 * We can't run the queue inline with ints disabled. Ensure that 1254 * we catch bad users of this early. 1255 */ 1256 WARN_ON_ONCE(in_interrupt()); 1257 1258 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); 1259 1260 hctx_lock(hctx, &srcu_idx); 1261 blk_mq_sched_dispatch_requests(hctx); 1262 hctx_unlock(hctx, srcu_idx); 1263 } 1264 1265 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) 1266 { 1267 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask); 1268 1269 if (cpu >= nr_cpu_ids) 1270 cpu = cpumask_first(hctx->cpumask); 1271 return cpu; 1272 } 1273 1274 /* 1275 * It'd be great if the workqueue API had a way to pass 1276 * in a mask and had some smarts for more clever placement. 1277 * For now we just round-robin here, switching for every 1278 * BLK_MQ_CPU_WORK_BATCH queued items. 1279 */ 1280 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) 1281 { 1282 bool tried = false; 1283 int next_cpu = hctx->next_cpu; 1284 1285 if (hctx->queue->nr_hw_queues == 1) 1286 return WORK_CPU_UNBOUND; 1287 1288 if (--hctx->next_cpu_batch <= 0) { 1289 select_cpu: 1290 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, 1291 cpu_online_mask); 1292 if (next_cpu >= nr_cpu_ids) 1293 next_cpu = blk_mq_first_mapped_cpu(hctx); 1294 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 1295 } 1296 1297 /* 1298 * Do unbound schedule if we can't find a online CPU for this hctx, 1299 * and it should only happen in the path of handling CPU DEAD. 1300 */ 1301 if (!cpu_online(next_cpu)) { 1302 if (!tried) { 1303 tried = true; 1304 goto select_cpu; 1305 } 1306 1307 /* 1308 * Make sure to re-select CPU next time once after CPUs 1309 * in hctx->cpumask become online again. 1310 */ 1311 hctx->next_cpu = next_cpu; 1312 hctx->next_cpu_batch = 1; 1313 return WORK_CPU_UNBOUND; 1314 } 1315 1316 hctx->next_cpu = next_cpu; 1317 return next_cpu; 1318 } 1319 1320 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, 1321 unsigned long msecs) 1322 { 1323 if (unlikely(blk_mq_hctx_stopped(hctx))) 1324 return; 1325 1326 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { 1327 int cpu = get_cpu(); 1328 if (cpumask_test_cpu(cpu, hctx->cpumask)) { 1329 __blk_mq_run_hw_queue(hctx); 1330 put_cpu(); 1331 return; 1332 } 1333 1334 put_cpu(); 1335 } 1336 1337 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, 1338 msecs_to_jiffies(msecs)); 1339 } 1340 1341 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) 1342 { 1343 __blk_mq_delay_run_hw_queue(hctx, true, msecs); 1344 } 1345 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue); 1346 1347 bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 1348 { 1349 int srcu_idx; 1350 bool need_run; 1351 1352 /* 1353 * When queue is quiesced, we may be switching io scheduler, or 1354 * updating nr_hw_queues, or other things, and we can't run queue 1355 * any more, even __blk_mq_hctx_has_pending() can't be called safely. 1356 * 1357 * And queue will be rerun in blk_mq_unquiesce_queue() if it is 1358 * quiesced. 1359 */ 1360 hctx_lock(hctx, &srcu_idx); 1361 need_run = !blk_queue_quiesced(hctx->queue) && 1362 blk_mq_hctx_has_pending(hctx); 1363 hctx_unlock(hctx, srcu_idx); 1364 1365 if (need_run) { 1366 __blk_mq_delay_run_hw_queue(hctx, async, 0); 1367 return true; 1368 } 1369 1370 return false; 1371 } 1372 EXPORT_SYMBOL(blk_mq_run_hw_queue); 1373 1374 void blk_mq_run_hw_queues(struct request_queue *q, bool async) 1375 { 1376 struct blk_mq_hw_ctx *hctx; 1377 int i; 1378 1379 queue_for_each_hw_ctx(q, hctx, i) { 1380 if (blk_mq_hctx_stopped(hctx)) 1381 continue; 1382 1383 blk_mq_run_hw_queue(hctx, async); 1384 } 1385 } 1386 EXPORT_SYMBOL(blk_mq_run_hw_queues); 1387 1388 /** 1389 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped 1390 * @q: request queue. 1391 * 1392 * The caller is responsible for serializing this function against 1393 * blk_mq_{start,stop}_hw_queue(). 1394 */ 1395 bool blk_mq_queue_stopped(struct request_queue *q) 1396 { 1397 struct blk_mq_hw_ctx *hctx; 1398 int i; 1399 1400 queue_for_each_hw_ctx(q, hctx, i) 1401 if (blk_mq_hctx_stopped(hctx)) 1402 return true; 1403 1404 return false; 1405 } 1406 EXPORT_SYMBOL(blk_mq_queue_stopped); 1407 1408 /* 1409 * This function is often used for pausing .queue_rq() by driver when 1410 * there isn't enough resource or some conditions aren't satisfied, and 1411 * BLK_STS_RESOURCE is usually returned. 1412 * 1413 * We do not guarantee that dispatch can be drained or blocked 1414 * after blk_mq_stop_hw_queue() returns. Please use 1415 * blk_mq_quiesce_queue() for that requirement. 1416 */ 1417 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) 1418 { 1419 cancel_delayed_work(&hctx->run_work); 1420 1421 set_bit(BLK_MQ_S_STOPPED, &hctx->state); 1422 } 1423 EXPORT_SYMBOL(blk_mq_stop_hw_queue); 1424 1425 /* 1426 * This function is often used for pausing .queue_rq() by driver when 1427 * there isn't enough resource or some conditions aren't satisfied, and 1428 * BLK_STS_RESOURCE is usually returned. 1429 * 1430 * We do not guarantee that dispatch can be drained or blocked 1431 * after blk_mq_stop_hw_queues() returns. Please use 1432 * blk_mq_quiesce_queue() for that requirement. 1433 */ 1434 void blk_mq_stop_hw_queues(struct request_queue *q) 1435 { 1436 struct blk_mq_hw_ctx *hctx; 1437 int i; 1438 1439 queue_for_each_hw_ctx(q, hctx, i) 1440 blk_mq_stop_hw_queue(hctx); 1441 } 1442 EXPORT_SYMBOL(blk_mq_stop_hw_queues); 1443 1444 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) 1445 { 1446 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 1447 1448 blk_mq_run_hw_queue(hctx, false); 1449 } 1450 EXPORT_SYMBOL(blk_mq_start_hw_queue); 1451 1452 void blk_mq_start_hw_queues(struct request_queue *q) 1453 { 1454 struct blk_mq_hw_ctx *hctx; 1455 int i; 1456 1457 queue_for_each_hw_ctx(q, hctx, i) 1458 blk_mq_start_hw_queue(hctx); 1459 } 1460 EXPORT_SYMBOL(blk_mq_start_hw_queues); 1461 1462 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 1463 { 1464 if (!blk_mq_hctx_stopped(hctx)) 1465 return; 1466 1467 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 1468 blk_mq_run_hw_queue(hctx, async); 1469 } 1470 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue); 1471 1472 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) 1473 { 1474 struct blk_mq_hw_ctx *hctx; 1475 int i; 1476 1477 queue_for_each_hw_ctx(q, hctx, i) 1478 blk_mq_start_stopped_hw_queue(hctx, async); 1479 } 1480 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); 1481 1482 static void blk_mq_run_work_fn(struct work_struct *work) 1483 { 1484 struct blk_mq_hw_ctx *hctx; 1485 1486 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); 1487 1488 /* 1489 * If we are stopped, don't run the queue. 1490 */ 1491 if (test_bit(BLK_MQ_S_STOPPED, &hctx->state)) 1492 return; 1493 1494 __blk_mq_run_hw_queue(hctx); 1495 } 1496 1497 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, 1498 struct request *rq, 1499 bool at_head) 1500 { 1501 struct blk_mq_ctx *ctx = rq->mq_ctx; 1502 1503 lockdep_assert_held(&ctx->lock); 1504 1505 trace_block_rq_insert(hctx->queue, rq); 1506 1507 if (at_head) 1508 list_add(&rq->queuelist, &ctx->rq_list); 1509 else 1510 list_add_tail(&rq->queuelist, &ctx->rq_list); 1511 } 1512 1513 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 1514 bool at_head) 1515 { 1516 struct blk_mq_ctx *ctx = rq->mq_ctx; 1517 1518 lockdep_assert_held(&ctx->lock); 1519 1520 __blk_mq_insert_req_list(hctx, rq, at_head); 1521 blk_mq_hctx_mark_pending(hctx, ctx); 1522 } 1523 1524 /* 1525 * Should only be used carefully, when the caller knows we want to 1526 * bypass a potential IO scheduler on the target device. 1527 */ 1528 void blk_mq_request_bypass_insert(struct request *rq, bool run_queue) 1529 { 1530 struct blk_mq_ctx *ctx = rq->mq_ctx; 1531 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu); 1532 1533 spin_lock(&hctx->lock); 1534 list_add_tail(&rq->queuelist, &hctx->dispatch); 1535 spin_unlock(&hctx->lock); 1536 1537 if (run_queue) 1538 blk_mq_run_hw_queue(hctx, false); 1539 } 1540 1541 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 1542 struct list_head *list) 1543 1544 { 1545 /* 1546 * preemption doesn't flush plug list, so it's possible ctx->cpu is 1547 * offline now 1548 */ 1549 spin_lock(&ctx->lock); 1550 while (!list_empty(list)) { 1551 struct request *rq; 1552 1553 rq = list_first_entry(list, struct request, queuelist); 1554 BUG_ON(rq->mq_ctx != ctx); 1555 list_del_init(&rq->queuelist); 1556 __blk_mq_insert_req_list(hctx, rq, false); 1557 } 1558 blk_mq_hctx_mark_pending(hctx, ctx); 1559 spin_unlock(&ctx->lock); 1560 } 1561 1562 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b) 1563 { 1564 struct request *rqa = container_of(a, struct request, queuelist); 1565 struct request *rqb = container_of(b, struct request, queuelist); 1566 1567 return !(rqa->mq_ctx < rqb->mq_ctx || 1568 (rqa->mq_ctx == rqb->mq_ctx && 1569 blk_rq_pos(rqa) < blk_rq_pos(rqb))); 1570 } 1571 1572 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) 1573 { 1574 struct blk_mq_ctx *this_ctx; 1575 struct request_queue *this_q; 1576 struct request *rq; 1577 LIST_HEAD(list); 1578 LIST_HEAD(ctx_list); 1579 unsigned int depth; 1580 1581 list_splice_init(&plug->mq_list, &list); 1582 1583 list_sort(NULL, &list, plug_ctx_cmp); 1584 1585 this_q = NULL; 1586 this_ctx = NULL; 1587 depth = 0; 1588 1589 while (!list_empty(&list)) { 1590 rq = list_entry_rq(list.next); 1591 list_del_init(&rq->queuelist); 1592 BUG_ON(!rq->q); 1593 if (rq->mq_ctx != this_ctx) { 1594 if (this_ctx) { 1595 trace_block_unplug(this_q, depth, from_schedule); 1596 blk_mq_sched_insert_requests(this_q, this_ctx, 1597 &ctx_list, 1598 from_schedule); 1599 } 1600 1601 this_ctx = rq->mq_ctx; 1602 this_q = rq->q; 1603 depth = 0; 1604 } 1605 1606 depth++; 1607 list_add_tail(&rq->queuelist, &ctx_list); 1608 } 1609 1610 /* 1611 * If 'this_ctx' is set, we know we have entries to complete 1612 * on 'ctx_list'. Do those. 1613 */ 1614 if (this_ctx) { 1615 trace_block_unplug(this_q, depth, from_schedule); 1616 blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list, 1617 from_schedule); 1618 } 1619 } 1620 1621 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) 1622 { 1623 blk_init_request_from_bio(rq, bio); 1624 1625 blk_rq_set_rl(rq, blk_get_rl(rq->q, bio)); 1626 1627 blk_account_io_start(rq, true); 1628 } 1629 1630 static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq) 1631 { 1632 if (rq->tag != -1) 1633 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false); 1634 1635 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true); 1636 } 1637 1638 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, 1639 struct request *rq, 1640 blk_qc_t *cookie) 1641 { 1642 struct request_queue *q = rq->q; 1643 struct blk_mq_queue_data bd = { 1644 .rq = rq, 1645 .last = true, 1646 }; 1647 blk_qc_t new_cookie; 1648 blk_status_t ret; 1649 1650 new_cookie = request_to_qc_t(hctx, rq); 1651 1652 /* 1653 * For OK queue, we are done. For error, caller may kill it. 1654 * Any other error (busy), just add it to our list as we 1655 * previously would have done. 1656 */ 1657 ret = q->mq_ops->queue_rq(hctx, &bd); 1658 switch (ret) { 1659 case BLK_STS_OK: 1660 *cookie = new_cookie; 1661 break; 1662 case BLK_STS_RESOURCE: 1663 case BLK_STS_DEV_RESOURCE: 1664 __blk_mq_requeue_request(rq); 1665 break; 1666 default: 1667 *cookie = BLK_QC_T_NONE; 1668 break; 1669 } 1670 1671 return ret; 1672 } 1673 1674 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 1675 struct request *rq, 1676 blk_qc_t *cookie, 1677 bool bypass_insert) 1678 { 1679 struct request_queue *q = rq->q; 1680 bool run_queue = true; 1681 1682 /* 1683 * RCU or SRCU read lock is needed before checking quiesced flag. 1684 * 1685 * When queue is stopped or quiesced, ignore 'bypass_insert' from 1686 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller, 1687 * and avoid driver to try to dispatch again. 1688 */ 1689 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { 1690 run_queue = false; 1691 bypass_insert = false; 1692 goto insert; 1693 } 1694 1695 if (q->elevator && !bypass_insert) 1696 goto insert; 1697 1698 if (!blk_mq_get_dispatch_budget(hctx)) 1699 goto insert; 1700 1701 if (!blk_mq_get_driver_tag(rq, NULL, false)) { 1702 blk_mq_put_dispatch_budget(hctx); 1703 goto insert; 1704 } 1705 1706 return __blk_mq_issue_directly(hctx, rq, cookie); 1707 insert: 1708 if (bypass_insert) 1709 return BLK_STS_RESOURCE; 1710 1711 blk_mq_sched_insert_request(rq, false, run_queue, false); 1712 return BLK_STS_OK; 1713 } 1714 1715 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 1716 struct request *rq, blk_qc_t *cookie) 1717 { 1718 blk_status_t ret; 1719 int srcu_idx; 1720 1721 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); 1722 1723 hctx_lock(hctx, &srcu_idx); 1724 1725 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false); 1726 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) 1727 blk_mq_sched_insert_request(rq, false, true, false); 1728 else if (ret != BLK_STS_OK) 1729 blk_mq_end_request(rq, ret); 1730 1731 hctx_unlock(hctx, srcu_idx); 1732 } 1733 1734 blk_status_t blk_mq_request_issue_directly(struct request *rq) 1735 { 1736 blk_status_t ret; 1737 int srcu_idx; 1738 blk_qc_t unused_cookie; 1739 struct blk_mq_ctx *ctx = rq->mq_ctx; 1740 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu); 1741 1742 hctx_lock(hctx, &srcu_idx); 1743 ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true); 1744 hctx_unlock(hctx, srcu_idx); 1745 1746 return ret; 1747 } 1748 1749 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) 1750 { 1751 const int is_sync = op_is_sync(bio->bi_opf); 1752 const int is_flush_fua = op_is_flush(bio->bi_opf); 1753 struct blk_mq_alloc_data data = { .flags = 0 }; 1754 struct request *rq; 1755 unsigned int request_count = 0; 1756 struct blk_plug *plug; 1757 struct request *same_queue_rq = NULL; 1758 blk_qc_t cookie; 1759 unsigned int wb_acct; 1760 1761 blk_queue_bounce(q, &bio); 1762 1763 blk_queue_split(q, &bio); 1764 1765 if (!bio_integrity_prep(bio)) 1766 return BLK_QC_T_NONE; 1767 1768 if (!is_flush_fua && !blk_queue_nomerges(q) && 1769 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) 1770 return BLK_QC_T_NONE; 1771 1772 if (blk_mq_sched_bio_merge(q, bio)) 1773 return BLK_QC_T_NONE; 1774 1775 wb_acct = wbt_wait(q->rq_wb, bio, NULL); 1776 1777 trace_block_getrq(q, bio, bio->bi_opf); 1778 1779 rq = blk_mq_get_request(q, bio, bio->bi_opf, &data); 1780 if (unlikely(!rq)) { 1781 __wbt_done(q->rq_wb, wb_acct); 1782 if (bio->bi_opf & REQ_NOWAIT) 1783 bio_wouldblock_error(bio); 1784 return BLK_QC_T_NONE; 1785 } 1786 1787 wbt_track(rq, wb_acct); 1788 1789 cookie = request_to_qc_t(data.hctx, rq); 1790 1791 plug = current->plug; 1792 if (unlikely(is_flush_fua)) { 1793 blk_mq_put_ctx(data.ctx); 1794 blk_mq_bio_to_request(rq, bio); 1795 1796 /* bypass scheduler for flush rq */ 1797 blk_insert_flush(rq); 1798 blk_mq_run_hw_queue(data.hctx, true); 1799 } else if (plug && q->nr_hw_queues == 1) { 1800 struct request *last = NULL; 1801 1802 blk_mq_put_ctx(data.ctx); 1803 blk_mq_bio_to_request(rq, bio); 1804 1805 /* 1806 * @request_count may become stale because of schedule 1807 * out, so check the list again. 1808 */ 1809 if (list_empty(&plug->mq_list)) 1810 request_count = 0; 1811 else if (blk_queue_nomerges(q)) 1812 request_count = blk_plug_queued_count(q); 1813 1814 if (!request_count) 1815 trace_block_plug(q); 1816 else 1817 last = list_entry_rq(plug->mq_list.prev); 1818 1819 if (request_count >= BLK_MAX_REQUEST_COUNT || (last && 1820 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { 1821 blk_flush_plug_list(plug, false); 1822 trace_block_plug(q); 1823 } 1824 1825 list_add_tail(&rq->queuelist, &plug->mq_list); 1826 } else if (plug && !blk_queue_nomerges(q)) { 1827 blk_mq_bio_to_request(rq, bio); 1828 1829 /* 1830 * We do limited plugging. If the bio can be merged, do that. 1831 * Otherwise the existing request in the plug list will be 1832 * issued. So the plug list will have one request at most 1833 * The plug list might get flushed before this. If that happens, 1834 * the plug list is empty, and same_queue_rq is invalid. 1835 */ 1836 if (list_empty(&plug->mq_list)) 1837 same_queue_rq = NULL; 1838 if (same_queue_rq) 1839 list_del_init(&same_queue_rq->queuelist); 1840 list_add_tail(&rq->queuelist, &plug->mq_list); 1841 1842 blk_mq_put_ctx(data.ctx); 1843 1844 if (same_queue_rq) { 1845 data.hctx = blk_mq_map_queue(q, 1846 same_queue_rq->mq_ctx->cpu); 1847 blk_mq_try_issue_directly(data.hctx, same_queue_rq, 1848 &cookie); 1849 } 1850 } else if (q->nr_hw_queues > 1 && is_sync) { 1851 blk_mq_put_ctx(data.ctx); 1852 blk_mq_bio_to_request(rq, bio); 1853 blk_mq_try_issue_directly(data.hctx, rq, &cookie); 1854 } else { 1855 blk_mq_put_ctx(data.ctx); 1856 blk_mq_bio_to_request(rq, bio); 1857 blk_mq_sched_insert_request(rq, false, true, true); 1858 } 1859 1860 return cookie; 1861 } 1862 1863 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 1864 unsigned int hctx_idx) 1865 { 1866 struct page *page; 1867 1868 if (tags->rqs && set->ops->exit_request) { 1869 int i; 1870 1871 for (i = 0; i < tags->nr_tags; i++) { 1872 struct request *rq = tags->static_rqs[i]; 1873 1874 if (!rq) 1875 continue; 1876 set->ops->exit_request(set, rq, hctx_idx); 1877 tags->static_rqs[i] = NULL; 1878 } 1879 } 1880 1881 while (!list_empty(&tags->page_list)) { 1882 page = list_first_entry(&tags->page_list, struct page, lru); 1883 list_del_init(&page->lru); 1884 /* 1885 * Remove kmemleak object previously allocated in 1886 * blk_mq_init_rq_map(). 1887 */ 1888 kmemleak_free(page_address(page)); 1889 __free_pages(page, page->private); 1890 } 1891 } 1892 1893 void blk_mq_free_rq_map(struct blk_mq_tags *tags) 1894 { 1895 kfree(tags->rqs); 1896 tags->rqs = NULL; 1897 kfree(tags->static_rqs); 1898 tags->static_rqs = NULL; 1899 1900 blk_mq_free_tags(tags); 1901 } 1902 1903 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, 1904 unsigned int hctx_idx, 1905 unsigned int nr_tags, 1906 unsigned int reserved_tags) 1907 { 1908 struct blk_mq_tags *tags; 1909 int node; 1910 1911 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx); 1912 if (node == NUMA_NO_NODE) 1913 node = set->numa_node; 1914 1915 tags = blk_mq_init_tags(nr_tags, reserved_tags, node, 1916 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); 1917 if (!tags) 1918 return NULL; 1919 1920 tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *), 1921 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 1922 node); 1923 if (!tags->rqs) { 1924 blk_mq_free_tags(tags); 1925 return NULL; 1926 } 1927 1928 tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *), 1929 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 1930 node); 1931 if (!tags->static_rqs) { 1932 kfree(tags->rqs); 1933 blk_mq_free_tags(tags); 1934 return NULL; 1935 } 1936 1937 return tags; 1938 } 1939 1940 static size_t order_to_size(unsigned int order) 1941 { 1942 return (size_t)PAGE_SIZE << order; 1943 } 1944 1945 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, 1946 unsigned int hctx_idx, int node) 1947 { 1948 int ret; 1949 1950 if (set->ops->init_request) { 1951 ret = set->ops->init_request(set, rq, hctx_idx, node); 1952 if (ret) 1953 return ret; 1954 } 1955 1956 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 1957 return 0; 1958 } 1959 1960 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 1961 unsigned int hctx_idx, unsigned int depth) 1962 { 1963 unsigned int i, j, entries_per_page, max_order = 4; 1964 size_t rq_size, left; 1965 int node; 1966 1967 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx); 1968 if (node == NUMA_NO_NODE) 1969 node = set->numa_node; 1970 1971 INIT_LIST_HEAD(&tags->page_list); 1972 1973 /* 1974 * rq_size is the size of the request plus driver payload, rounded 1975 * to the cacheline size 1976 */ 1977 rq_size = round_up(sizeof(struct request) + set->cmd_size, 1978 cache_line_size()); 1979 left = rq_size * depth; 1980 1981 for (i = 0; i < depth; ) { 1982 int this_order = max_order; 1983 struct page *page; 1984 int to_do; 1985 void *p; 1986 1987 while (this_order && left < order_to_size(this_order - 1)) 1988 this_order--; 1989 1990 do { 1991 page = alloc_pages_node(node, 1992 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, 1993 this_order); 1994 if (page) 1995 break; 1996 if (!this_order--) 1997 break; 1998 if (order_to_size(this_order) < rq_size) 1999 break; 2000 } while (1); 2001 2002 if (!page) 2003 goto fail; 2004 2005 page->private = this_order; 2006 list_add_tail(&page->lru, &tags->page_list); 2007 2008 p = page_address(page); 2009 /* 2010 * Allow kmemleak to scan these pages as they contain pointers 2011 * to additional allocations like via ops->init_request(). 2012 */ 2013 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO); 2014 entries_per_page = order_to_size(this_order) / rq_size; 2015 to_do = min(entries_per_page, depth - i); 2016 left -= to_do * rq_size; 2017 for (j = 0; j < to_do; j++) { 2018 struct request *rq = p; 2019 2020 tags->static_rqs[i] = rq; 2021 if (blk_mq_init_request(set, rq, hctx_idx, node)) { 2022 tags->static_rqs[i] = NULL; 2023 goto fail; 2024 } 2025 2026 p += rq_size; 2027 i++; 2028 } 2029 } 2030 return 0; 2031 2032 fail: 2033 blk_mq_free_rqs(set, tags, hctx_idx); 2034 return -ENOMEM; 2035 } 2036 2037 /* 2038 * 'cpu' is going away. splice any existing rq_list entries from this 2039 * software queue to the hw queue dispatch list, and ensure that it 2040 * gets run. 2041 */ 2042 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node) 2043 { 2044 struct blk_mq_hw_ctx *hctx; 2045 struct blk_mq_ctx *ctx; 2046 LIST_HEAD(tmp); 2047 2048 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead); 2049 ctx = __blk_mq_get_ctx(hctx->queue, cpu); 2050 2051 spin_lock(&ctx->lock); 2052 if (!list_empty(&ctx->rq_list)) { 2053 list_splice_init(&ctx->rq_list, &tmp); 2054 blk_mq_hctx_clear_pending(hctx, ctx); 2055 } 2056 spin_unlock(&ctx->lock); 2057 2058 if (list_empty(&tmp)) 2059 return 0; 2060 2061 spin_lock(&hctx->lock); 2062 list_splice_tail_init(&tmp, &hctx->dispatch); 2063 spin_unlock(&hctx->lock); 2064 2065 blk_mq_run_hw_queue(hctx, true); 2066 return 0; 2067 } 2068 2069 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) 2070 { 2071 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD, 2072 &hctx->cpuhp_dead); 2073 } 2074 2075 /* hctx->ctxs will be freed in queue's release handler */ 2076 static void blk_mq_exit_hctx(struct request_queue *q, 2077 struct blk_mq_tag_set *set, 2078 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 2079 { 2080 blk_mq_debugfs_unregister_hctx(hctx); 2081 2082 if (blk_mq_hw_queue_mapped(hctx)) 2083 blk_mq_tag_idle(hctx); 2084 2085 if (set->ops->exit_request) 2086 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); 2087 2088 blk_mq_sched_exit_hctx(q, hctx, hctx_idx); 2089 2090 if (set->ops->exit_hctx) 2091 set->ops->exit_hctx(hctx, hctx_idx); 2092 2093 if (hctx->flags & BLK_MQ_F_BLOCKING) 2094 cleanup_srcu_struct(hctx->srcu); 2095 2096 blk_mq_remove_cpuhp(hctx); 2097 blk_free_flush_queue(hctx->fq); 2098 sbitmap_free(&hctx->ctx_map); 2099 } 2100 2101 static void blk_mq_exit_hw_queues(struct request_queue *q, 2102 struct blk_mq_tag_set *set, int nr_queue) 2103 { 2104 struct blk_mq_hw_ctx *hctx; 2105 unsigned int i; 2106 2107 queue_for_each_hw_ctx(q, hctx, i) { 2108 if (i == nr_queue) 2109 break; 2110 blk_mq_exit_hctx(q, set, hctx, i); 2111 } 2112 } 2113 2114 static int blk_mq_init_hctx(struct request_queue *q, 2115 struct blk_mq_tag_set *set, 2116 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) 2117 { 2118 int node; 2119 2120 node = hctx->numa_node; 2121 if (node == NUMA_NO_NODE) 2122 node = hctx->numa_node = set->numa_node; 2123 2124 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); 2125 spin_lock_init(&hctx->lock); 2126 INIT_LIST_HEAD(&hctx->dispatch); 2127 hctx->queue = q; 2128 hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED; 2129 2130 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); 2131 2132 hctx->tags = set->tags[hctx_idx]; 2133 2134 /* 2135 * Allocate space for all possible cpus to avoid allocation at 2136 * runtime 2137 */ 2138 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), 2139 GFP_KERNEL, node); 2140 if (!hctx->ctxs) 2141 goto unregister_cpu_notifier; 2142 2143 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL, 2144 node)) 2145 goto free_ctxs; 2146 2147 hctx->nr_ctx = 0; 2148 2149 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); 2150 INIT_LIST_HEAD(&hctx->dispatch_wait.entry); 2151 2152 if (set->ops->init_hctx && 2153 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) 2154 goto free_bitmap; 2155 2156 if (blk_mq_sched_init_hctx(q, hctx, hctx_idx)) 2157 goto exit_hctx; 2158 2159 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); 2160 if (!hctx->fq) 2161 goto sched_exit_hctx; 2162 2163 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node)) 2164 goto free_fq; 2165 2166 if (hctx->flags & BLK_MQ_F_BLOCKING) 2167 init_srcu_struct(hctx->srcu); 2168 2169 blk_mq_debugfs_register_hctx(q, hctx); 2170 2171 return 0; 2172 2173 free_fq: 2174 kfree(hctx->fq); 2175 sched_exit_hctx: 2176 blk_mq_sched_exit_hctx(q, hctx, hctx_idx); 2177 exit_hctx: 2178 if (set->ops->exit_hctx) 2179 set->ops->exit_hctx(hctx, hctx_idx); 2180 free_bitmap: 2181 sbitmap_free(&hctx->ctx_map); 2182 free_ctxs: 2183 kfree(hctx->ctxs); 2184 unregister_cpu_notifier: 2185 blk_mq_remove_cpuhp(hctx); 2186 return -1; 2187 } 2188 2189 static void blk_mq_init_cpu_queues(struct request_queue *q, 2190 unsigned int nr_hw_queues) 2191 { 2192 unsigned int i; 2193 2194 for_each_possible_cpu(i) { 2195 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); 2196 struct blk_mq_hw_ctx *hctx; 2197 2198 __ctx->cpu = i; 2199 spin_lock_init(&__ctx->lock); 2200 INIT_LIST_HEAD(&__ctx->rq_list); 2201 __ctx->queue = q; 2202 2203 /* 2204 * Set local node, IFF we have more than one hw queue. If 2205 * not, we remain on the home node of the device 2206 */ 2207 hctx = blk_mq_map_queue(q, i); 2208 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) 2209 hctx->numa_node = local_memory_node(cpu_to_node(i)); 2210 } 2211 } 2212 2213 static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx) 2214 { 2215 int ret = 0; 2216 2217 set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx, 2218 set->queue_depth, set->reserved_tags); 2219 if (!set->tags[hctx_idx]) 2220 return false; 2221 2222 ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx, 2223 set->queue_depth); 2224 if (!ret) 2225 return true; 2226 2227 blk_mq_free_rq_map(set->tags[hctx_idx]); 2228 set->tags[hctx_idx] = NULL; 2229 return false; 2230 } 2231 2232 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, 2233 unsigned int hctx_idx) 2234 { 2235 if (set->tags[hctx_idx]) { 2236 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx); 2237 blk_mq_free_rq_map(set->tags[hctx_idx]); 2238 set->tags[hctx_idx] = NULL; 2239 } 2240 } 2241 2242 static void blk_mq_map_swqueue(struct request_queue *q) 2243 { 2244 unsigned int i, hctx_idx; 2245 struct blk_mq_hw_ctx *hctx; 2246 struct blk_mq_ctx *ctx; 2247 struct blk_mq_tag_set *set = q->tag_set; 2248 2249 /* 2250 * Avoid others reading imcomplete hctx->cpumask through sysfs 2251 */ 2252 mutex_lock(&q->sysfs_lock); 2253 2254 queue_for_each_hw_ctx(q, hctx, i) { 2255 cpumask_clear(hctx->cpumask); 2256 hctx->nr_ctx = 0; 2257 hctx->dispatch_from = NULL; 2258 } 2259 2260 /* 2261 * Map software to hardware queues. 2262 * 2263 * If the cpu isn't present, the cpu is mapped to first hctx. 2264 */ 2265 for_each_possible_cpu(i) { 2266 hctx_idx = q->mq_map[i]; 2267 /* unmapped hw queue can be remapped after CPU topo changed */ 2268 if (!set->tags[hctx_idx] && 2269 !__blk_mq_alloc_rq_map(set, hctx_idx)) { 2270 /* 2271 * If tags initialization fail for some hctx, 2272 * that hctx won't be brought online. In this 2273 * case, remap the current ctx to hctx[0] which 2274 * is guaranteed to always have tags allocated 2275 */ 2276 q->mq_map[i] = 0; 2277 } 2278 2279 ctx = per_cpu_ptr(q->queue_ctx, i); 2280 hctx = blk_mq_map_queue(q, i); 2281 2282 cpumask_set_cpu(i, hctx->cpumask); 2283 ctx->index_hw = hctx->nr_ctx; 2284 hctx->ctxs[hctx->nr_ctx++] = ctx; 2285 } 2286 2287 mutex_unlock(&q->sysfs_lock); 2288 2289 queue_for_each_hw_ctx(q, hctx, i) { 2290 /* 2291 * If no software queues are mapped to this hardware queue, 2292 * disable it and free the request entries. 2293 */ 2294 if (!hctx->nr_ctx) { 2295 /* Never unmap queue 0. We need it as a 2296 * fallback in case of a new remap fails 2297 * allocation 2298 */ 2299 if (i && set->tags[i]) 2300 blk_mq_free_map_and_requests(set, i); 2301 2302 hctx->tags = NULL; 2303 continue; 2304 } 2305 2306 hctx->tags = set->tags[i]; 2307 WARN_ON(!hctx->tags); 2308 2309 /* 2310 * Set the map size to the number of mapped software queues. 2311 * This is more accurate and more efficient than looping 2312 * over all possibly mapped software queues. 2313 */ 2314 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx); 2315 2316 /* 2317 * Initialize batch roundrobin counts 2318 */ 2319 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); 2320 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 2321 } 2322 } 2323 2324 /* 2325 * Caller needs to ensure that we're either frozen/quiesced, or that 2326 * the queue isn't live yet. 2327 */ 2328 static void queue_set_hctx_shared(struct request_queue *q, bool shared) 2329 { 2330 struct blk_mq_hw_ctx *hctx; 2331 int i; 2332 2333 queue_for_each_hw_ctx(q, hctx, i) { 2334 if (shared) { 2335 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 2336 atomic_inc(&q->shared_hctx_restart); 2337 hctx->flags |= BLK_MQ_F_TAG_SHARED; 2338 } else { 2339 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 2340 atomic_dec(&q->shared_hctx_restart); 2341 hctx->flags &= ~BLK_MQ_F_TAG_SHARED; 2342 } 2343 } 2344 } 2345 2346 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, 2347 bool shared) 2348 { 2349 struct request_queue *q; 2350 2351 lockdep_assert_held(&set->tag_list_lock); 2352 2353 list_for_each_entry(q, &set->tag_list, tag_set_list) { 2354 blk_mq_freeze_queue(q); 2355 queue_set_hctx_shared(q, shared); 2356 blk_mq_unfreeze_queue(q); 2357 } 2358 } 2359 2360 static void blk_mq_del_queue_tag_set(struct request_queue *q) 2361 { 2362 struct blk_mq_tag_set *set = q->tag_set; 2363 2364 mutex_lock(&set->tag_list_lock); 2365 list_del_rcu(&q->tag_set_list); 2366 if (list_is_singular(&set->tag_list)) { 2367 /* just transitioned to unshared */ 2368 set->flags &= ~BLK_MQ_F_TAG_SHARED; 2369 /* update existing queue */ 2370 blk_mq_update_tag_set_depth(set, false); 2371 } 2372 mutex_unlock(&set->tag_list_lock); 2373 synchronize_rcu(); 2374 INIT_LIST_HEAD(&q->tag_set_list); 2375 } 2376 2377 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, 2378 struct request_queue *q) 2379 { 2380 q->tag_set = set; 2381 2382 mutex_lock(&set->tag_list_lock); 2383 2384 /* 2385 * Check to see if we're transitioning to shared (from 1 to 2 queues). 2386 */ 2387 if (!list_empty(&set->tag_list) && 2388 !(set->flags & BLK_MQ_F_TAG_SHARED)) { 2389 set->flags |= BLK_MQ_F_TAG_SHARED; 2390 /* update existing queue */ 2391 blk_mq_update_tag_set_depth(set, true); 2392 } 2393 if (set->flags & BLK_MQ_F_TAG_SHARED) 2394 queue_set_hctx_shared(q, true); 2395 list_add_tail_rcu(&q->tag_set_list, &set->tag_list); 2396 2397 mutex_unlock(&set->tag_list_lock); 2398 } 2399 2400 /* 2401 * It is the actual release handler for mq, but we do it from 2402 * request queue's release handler for avoiding use-after-free 2403 * and headache because q->mq_kobj shouldn't have been introduced, 2404 * but we can't group ctx/kctx kobj without it. 2405 */ 2406 void blk_mq_release(struct request_queue *q) 2407 { 2408 struct blk_mq_hw_ctx *hctx; 2409 unsigned int i; 2410 2411 /* hctx kobj stays in hctx */ 2412 queue_for_each_hw_ctx(q, hctx, i) { 2413 if (!hctx) 2414 continue; 2415 kobject_put(&hctx->kobj); 2416 } 2417 2418 q->mq_map = NULL; 2419 2420 kfree(q->queue_hw_ctx); 2421 2422 /* 2423 * release .mq_kobj and sw queue's kobject now because 2424 * both share lifetime with request queue. 2425 */ 2426 blk_mq_sysfs_deinit(q); 2427 2428 free_percpu(q->queue_ctx); 2429 } 2430 2431 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) 2432 { 2433 struct request_queue *uninit_q, *q; 2434 2435 uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node, NULL); 2436 if (!uninit_q) 2437 return ERR_PTR(-ENOMEM); 2438 2439 q = blk_mq_init_allocated_queue(set, uninit_q); 2440 if (IS_ERR(q)) 2441 blk_cleanup_queue(uninit_q); 2442 2443 return q; 2444 } 2445 EXPORT_SYMBOL(blk_mq_init_queue); 2446 2447 static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set) 2448 { 2449 int hw_ctx_size = sizeof(struct blk_mq_hw_ctx); 2450 2451 BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu), 2452 __alignof__(struct blk_mq_hw_ctx)) != 2453 sizeof(struct blk_mq_hw_ctx)); 2454 2455 if (tag_set->flags & BLK_MQ_F_BLOCKING) 2456 hw_ctx_size += sizeof(struct srcu_struct); 2457 2458 return hw_ctx_size; 2459 } 2460 2461 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, 2462 struct request_queue *q) 2463 { 2464 int i, j; 2465 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; 2466 2467 blk_mq_sysfs_unregister(q); 2468 2469 /* protect against switching io scheduler */ 2470 mutex_lock(&q->sysfs_lock); 2471 for (i = 0; i < set->nr_hw_queues; i++) { 2472 int node; 2473 2474 if (hctxs[i]) 2475 continue; 2476 2477 node = blk_mq_hw_queue_to_node(q->mq_map, i); 2478 hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set), 2479 GFP_KERNEL, node); 2480 if (!hctxs[i]) 2481 break; 2482 2483 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL, 2484 node)) { 2485 kfree(hctxs[i]); 2486 hctxs[i] = NULL; 2487 break; 2488 } 2489 2490 atomic_set(&hctxs[i]->nr_active, 0); 2491 hctxs[i]->numa_node = node; 2492 hctxs[i]->queue_num = i; 2493 2494 if (blk_mq_init_hctx(q, set, hctxs[i], i)) { 2495 free_cpumask_var(hctxs[i]->cpumask); 2496 kfree(hctxs[i]); 2497 hctxs[i] = NULL; 2498 break; 2499 } 2500 blk_mq_hctx_kobj_init(hctxs[i]); 2501 } 2502 for (j = i; j < q->nr_hw_queues; j++) { 2503 struct blk_mq_hw_ctx *hctx = hctxs[j]; 2504 2505 if (hctx) { 2506 if (hctx->tags) 2507 blk_mq_free_map_and_requests(set, j); 2508 blk_mq_exit_hctx(q, set, hctx, j); 2509 kobject_put(&hctx->kobj); 2510 hctxs[j] = NULL; 2511 2512 } 2513 } 2514 q->nr_hw_queues = i; 2515 mutex_unlock(&q->sysfs_lock); 2516 blk_mq_sysfs_register(q); 2517 } 2518 2519 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 2520 struct request_queue *q) 2521 { 2522 /* mark the queue as mq asap */ 2523 q->mq_ops = set->ops; 2524 2525 q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn, 2526 blk_mq_poll_stats_bkt, 2527 BLK_MQ_POLL_STATS_BKTS, q); 2528 if (!q->poll_cb) 2529 goto err_exit; 2530 2531 q->queue_ctx = alloc_percpu(struct blk_mq_ctx); 2532 if (!q->queue_ctx) 2533 goto err_exit; 2534 2535 /* init q->mq_kobj and sw queues' kobjects */ 2536 blk_mq_sysfs_init(q); 2537 2538 q->queue_hw_ctx = kcalloc_node(nr_cpu_ids, sizeof(*(q->queue_hw_ctx)), 2539 GFP_KERNEL, set->numa_node); 2540 if (!q->queue_hw_ctx) 2541 goto err_percpu; 2542 2543 q->mq_map = set->mq_map; 2544 2545 blk_mq_realloc_hw_ctxs(set, q); 2546 if (!q->nr_hw_queues) 2547 goto err_hctxs; 2548 2549 INIT_WORK(&q->timeout_work, blk_mq_timeout_work); 2550 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); 2551 2552 q->nr_queues = nr_cpu_ids; 2553 2554 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; 2555 2556 if (!(set->flags & BLK_MQ_F_SG_MERGE)) 2557 queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); 2558 2559 q->sg_reserved_size = INT_MAX; 2560 2561 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); 2562 INIT_LIST_HEAD(&q->requeue_list); 2563 spin_lock_init(&q->requeue_lock); 2564 2565 blk_queue_make_request(q, blk_mq_make_request); 2566 if (q->mq_ops->poll) 2567 q->poll_fn = blk_mq_poll; 2568 2569 /* 2570 * Do this after blk_queue_make_request() overrides it... 2571 */ 2572 q->nr_requests = set->queue_depth; 2573 2574 /* 2575 * Default to classic polling 2576 */ 2577 q->poll_nsec = -1; 2578 2579 if (set->ops->complete) 2580 blk_queue_softirq_done(q, set->ops->complete); 2581 2582 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 2583 blk_mq_add_queue_tag_set(set, q); 2584 blk_mq_map_swqueue(q); 2585 2586 if (!(set->flags & BLK_MQ_F_NO_SCHED)) { 2587 int ret; 2588 2589 ret = elevator_init_mq(q); 2590 if (ret) 2591 return ERR_PTR(ret); 2592 } 2593 2594 return q; 2595 2596 err_hctxs: 2597 kfree(q->queue_hw_ctx); 2598 err_percpu: 2599 free_percpu(q->queue_ctx); 2600 err_exit: 2601 q->mq_ops = NULL; 2602 return ERR_PTR(-ENOMEM); 2603 } 2604 EXPORT_SYMBOL(blk_mq_init_allocated_queue); 2605 2606 void blk_mq_free_queue(struct request_queue *q) 2607 { 2608 struct blk_mq_tag_set *set = q->tag_set; 2609 2610 blk_mq_del_queue_tag_set(q); 2611 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); 2612 } 2613 2614 /* Basically redo blk_mq_init_queue with queue frozen */ 2615 static void blk_mq_queue_reinit(struct request_queue *q) 2616 { 2617 WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); 2618 2619 blk_mq_debugfs_unregister_hctxs(q); 2620 blk_mq_sysfs_unregister(q); 2621 2622 /* 2623 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe 2624 * we should change hctx numa_node according to the new topology (this 2625 * involves freeing and re-allocating memory, worth doing?) 2626 */ 2627 blk_mq_map_swqueue(q); 2628 2629 blk_mq_sysfs_register(q); 2630 blk_mq_debugfs_register_hctxs(q); 2631 } 2632 2633 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 2634 { 2635 int i; 2636 2637 for (i = 0; i < set->nr_hw_queues; i++) 2638 if (!__blk_mq_alloc_rq_map(set, i)) 2639 goto out_unwind; 2640 2641 return 0; 2642 2643 out_unwind: 2644 while (--i >= 0) 2645 blk_mq_free_rq_map(set->tags[i]); 2646 2647 return -ENOMEM; 2648 } 2649 2650 /* 2651 * Allocate the request maps associated with this tag_set. Note that this 2652 * may reduce the depth asked for, if memory is tight. set->queue_depth 2653 * will be updated to reflect the allocated depth. 2654 */ 2655 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 2656 { 2657 unsigned int depth; 2658 int err; 2659 2660 depth = set->queue_depth; 2661 do { 2662 err = __blk_mq_alloc_rq_maps(set); 2663 if (!err) 2664 break; 2665 2666 set->queue_depth >>= 1; 2667 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { 2668 err = -ENOMEM; 2669 break; 2670 } 2671 } while (set->queue_depth); 2672 2673 if (!set->queue_depth || err) { 2674 pr_err("blk-mq: failed to allocate request map\n"); 2675 return -ENOMEM; 2676 } 2677 2678 if (depth != set->queue_depth) 2679 pr_info("blk-mq: reduced tag depth (%u -> %u)\n", 2680 depth, set->queue_depth); 2681 2682 return 0; 2683 } 2684 2685 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) 2686 { 2687 if (set->ops->map_queues) { 2688 int cpu; 2689 /* 2690 * transport .map_queues is usually done in the following 2691 * way: 2692 * 2693 * for (queue = 0; queue < set->nr_hw_queues; queue++) { 2694 * mask = get_cpu_mask(queue) 2695 * for_each_cpu(cpu, mask) 2696 * set->mq_map[cpu] = queue; 2697 * } 2698 * 2699 * When we need to remap, the table has to be cleared for 2700 * killing stale mapping since one CPU may not be mapped 2701 * to any hw queue. 2702 */ 2703 for_each_possible_cpu(cpu) 2704 set->mq_map[cpu] = 0; 2705 2706 return set->ops->map_queues(set); 2707 } else 2708 return blk_mq_map_queues(set); 2709 } 2710 2711 /* 2712 * Alloc a tag set to be associated with one or more request queues. 2713 * May fail with EINVAL for various error conditions. May adjust the 2714 * requested depth down, if if it too large. In that case, the set 2715 * value will be stored in set->queue_depth. 2716 */ 2717 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) 2718 { 2719 int ret; 2720 2721 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS); 2722 2723 if (!set->nr_hw_queues) 2724 return -EINVAL; 2725 if (!set->queue_depth) 2726 return -EINVAL; 2727 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) 2728 return -EINVAL; 2729 2730 if (!set->ops->queue_rq) 2731 return -EINVAL; 2732 2733 if (!set->ops->get_budget ^ !set->ops->put_budget) 2734 return -EINVAL; 2735 2736 if (set->queue_depth > BLK_MQ_MAX_DEPTH) { 2737 pr_info("blk-mq: reduced tag depth to %u\n", 2738 BLK_MQ_MAX_DEPTH); 2739 set->queue_depth = BLK_MQ_MAX_DEPTH; 2740 } 2741 2742 /* 2743 * If a crashdump is active, then we are potentially in a very 2744 * memory constrained environment. Limit us to 1 queue and 2745 * 64 tags to prevent using too much memory. 2746 */ 2747 if (is_kdump_kernel()) { 2748 set->nr_hw_queues = 1; 2749 set->queue_depth = min(64U, set->queue_depth); 2750 } 2751 /* 2752 * There is no use for more h/w queues than cpus. 2753 */ 2754 if (set->nr_hw_queues > nr_cpu_ids) 2755 set->nr_hw_queues = nr_cpu_ids; 2756 2757 set->tags = kcalloc_node(nr_cpu_ids, sizeof(struct blk_mq_tags *), 2758 GFP_KERNEL, set->numa_node); 2759 if (!set->tags) 2760 return -ENOMEM; 2761 2762 ret = -ENOMEM; 2763 set->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*set->mq_map), 2764 GFP_KERNEL, set->numa_node); 2765 if (!set->mq_map) 2766 goto out_free_tags; 2767 2768 ret = blk_mq_update_queue_map(set); 2769 if (ret) 2770 goto out_free_mq_map; 2771 2772 ret = blk_mq_alloc_rq_maps(set); 2773 if (ret) 2774 goto out_free_mq_map; 2775 2776 mutex_init(&set->tag_list_lock); 2777 INIT_LIST_HEAD(&set->tag_list); 2778 2779 return 0; 2780 2781 out_free_mq_map: 2782 kfree(set->mq_map); 2783 set->mq_map = NULL; 2784 out_free_tags: 2785 kfree(set->tags); 2786 set->tags = NULL; 2787 return ret; 2788 } 2789 EXPORT_SYMBOL(blk_mq_alloc_tag_set); 2790 2791 void blk_mq_free_tag_set(struct blk_mq_tag_set *set) 2792 { 2793 int i; 2794 2795 for (i = 0; i < nr_cpu_ids; i++) 2796 blk_mq_free_map_and_requests(set, i); 2797 2798 kfree(set->mq_map); 2799 set->mq_map = NULL; 2800 2801 kfree(set->tags); 2802 set->tags = NULL; 2803 } 2804 EXPORT_SYMBOL(blk_mq_free_tag_set); 2805 2806 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) 2807 { 2808 struct blk_mq_tag_set *set = q->tag_set; 2809 struct blk_mq_hw_ctx *hctx; 2810 int i, ret; 2811 2812 if (!set) 2813 return -EINVAL; 2814 2815 blk_mq_freeze_queue(q); 2816 blk_mq_quiesce_queue(q); 2817 2818 ret = 0; 2819 queue_for_each_hw_ctx(q, hctx, i) { 2820 if (!hctx->tags) 2821 continue; 2822 /* 2823 * If we're using an MQ scheduler, just update the scheduler 2824 * queue depth. This is similar to what the old code would do. 2825 */ 2826 if (!hctx->sched_tags) { 2827 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, 2828 false); 2829 } else { 2830 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, 2831 nr, true); 2832 } 2833 if (ret) 2834 break; 2835 } 2836 2837 if (!ret) 2838 q->nr_requests = nr; 2839 2840 blk_mq_unquiesce_queue(q); 2841 blk_mq_unfreeze_queue(q); 2842 2843 return ret; 2844 } 2845 2846 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, 2847 int nr_hw_queues) 2848 { 2849 struct request_queue *q; 2850 2851 lockdep_assert_held(&set->tag_list_lock); 2852 2853 if (nr_hw_queues > nr_cpu_ids) 2854 nr_hw_queues = nr_cpu_ids; 2855 if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues) 2856 return; 2857 2858 list_for_each_entry(q, &set->tag_list, tag_set_list) 2859 blk_mq_freeze_queue(q); 2860 2861 set->nr_hw_queues = nr_hw_queues; 2862 blk_mq_update_queue_map(set); 2863 list_for_each_entry(q, &set->tag_list, tag_set_list) { 2864 blk_mq_realloc_hw_ctxs(set, q); 2865 blk_mq_queue_reinit(q); 2866 } 2867 2868 list_for_each_entry(q, &set->tag_list, tag_set_list) 2869 blk_mq_unfreeze_queue(q); 2870 } 2871 2872 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) 2873 { 2874 mutex_lock(&set->tag_list_lock); 2875 __blk_mq_update_nr_hw_queues(set, nr_hw_queues); 2876 mutex_unlock(&set->tag_list_lock); 2877 } 2878 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); 2879 2880 /* Enable polling stats and return whether they were already enabled. */ 2881 static bool blk_poll_stats_enable(struct request_queue *q) 2882 { 2883 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) || 2884 blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q)) 2885 return true; 2886 blk_stat_add_callback(q, q->poll_cb); 2887 return false; 2888 } 2889 2890 static void blk_mq_poll_stats_start(struct request_queue *q) 2891 { 2892 /* 2893 * We don't arm the callback if polling stats are not enabled or the 2894 * callback is already active. 2895 */ 2896 if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) || 2897 blk_stat_is_active(q->poll_cb)) 2898 return; 2899 2900 blk_stat_activate_msecs(q->poll_cb, 100); 2901 } 2902 2903 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb) 2904 { 2905 struct request_queue *q = cb->data; 2906 int bucket; 2907 2908 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) { 2909 if (cb->stat[bucket].nr_samples) 2910 q->poll_stat[bucket] = cb->stat[bucket]; 2911 } 2912 } 2913 2914 static unsigned long blk_mq_poll_nsecs(struct request_queue *q, 2915 struct blk_mq_hw_ctx *hctx, 2916 struct request *rq) 2917 { 2918 unsigned long ret = 0; 2919 int bucket; 2920 2921 /* 2922 * If stats collection isn't on, don't sleep but turn it on for 2923 * future users 2924 */ 2925 if (!blk_poll_stats_enable(q)) 2926 return 0; 2927 2928 /* 2929 * As an optimistic guess, use half of the mean service time 2930 * for this type of request. We can (and should) make this smarter. 2931 * For instance, if the completion latencies are tight, we can 2932 * get closer than just half the mean. This is especially 2933 * important on devices where the completion latencies are longer 2934 * than ~10 usec. We do use the stats for the relevant IO size 2935 * if available which does lead to better estimates. 2936 */ 2937 bucket = blk_mq_poll_stats_bkt(rq); 2938 if (bucket < 0) 2939 return ret; 2940 2941 if (q->poll_stat[bucket].nr_samples) 2942 ret = (q->poll_stat[bucket].mean + 1) / 2; 2943 2944 return ret; 2945 } 2946 2947 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, 2948 struct blk_mq_hw_ctx *hctx, 2949 struct request *rq) 2950 { 2951 struct hrtimer_sleeper hs; 2952 enum hrtimer_mode mode; 2953 unsigned int nsecs; 2954 ktime_t kt; 2955 2956 if (rq->rq_flags & RQF_MQ_POLL_SLEPT) 2957 return false; 2958 2959 /* 2960 * poll_nsec can be: 2961 * 2962 * -1: don't ever hybrid sleep 2963 * 0: use half of prev avg 2964 * >0: use this specific value 2965 */ 2966 if (q->poll_nsec == -1) 2967 return false; 2968 else if (q->poll_nsec > 0) 2969 nsecs = q->poll_nsec; 2970 else 2971 nsecs = blk_mq_poll_nsecs(q, hctx, rq); 2972 2973 if (!nsecs) 2974 return false; 2975 2976 rq->rq_flags |= RQF_MQ_POLL_SLEPT; 2977 2978 /* 2979 * This will be replaced with the stats tracking code, using 2980 * 'avg_completion_time / 2' as the pre-sleep target. 2981 */ 2982 kt = nsecs; 2983 2984 mode = HRTIMER_MODE_REL; 2985 hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode); 2986 hrtimer_set_expires(&hs.timer, kt); 2987 2988 hrtimer_init_sleeper(&hs, current); 2989 do { 2990 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE) 2991 break; 2992 set_current_state(TASK_UNINTERRUPTIBLE); 2993 hrtimer_start_expires(&hs.timer, mode); 2994 if (hs.task) 2995 io_schedule(); 2996 hrtimer_cancel(&hs.timer); 2997 mode = HRTIMER_MODE_ABS; 2998 } while (hs.task && !signal_pending(current)); 2999 3000 __set_current_state(TASK_RUNNING); 3001 destroy_hrtimer_on_stack(&hs.timer); 3002 return true; 3003 } 3004 3005 static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq) 3006 { 3007 struct request_queue *q = hctx->queue; 3008 long state; 3009 3010 /* 3011 * If we sleep, have the caller restart the poll loop to reset 3012 * the state. Like for the other success return cases, the 3013 * caller is responsible for checking if the IO completed. If 3014 * the IO isn't complete, we'll get called again and will go 3015 * straight to the busy poll loop. 3016 */ 3017 if (blk_mq_poll_hybrid_sleep(q, hctx, rq)) 3018 return true; 3019 3020 hctx->poll_considered++; 3021 3022 state = current->state; 3023 while (!need_resched()) { 3024 int ret; 3025 3026 hctx->poll_invoked++; 3027 3028 ret = q->mq_ops->poll(hctx, rq->tag); 3029 if (ret > 0) { 3030 hctx->poll_success++; 3031 set_current_state(TASK_RUNNING); 3032 return true; 3033 } 3034 3035 if (signal_pending_state(state, current)) 3036 set_current_state(TASK_RUNNING); 3037 3038 if (current->state == TASK_RUNNING) 3039 return true; 3040 if (ret < 0) 3041 break; 3042 cpu_relax(); 3043 } 3044 3045 __set_current_state(TASK_RUNNING); 3046 return false; 3047 } 3048 3049 static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie) 3050 { 3051 struct blk_mq_hw_ctx *hctx; 3052 struct request *rq; 3053 3054 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) 3055 return false; 3056 3057 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; 3058 if (!blk_qc_t_is_internal(cookie)) 3059 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); 3060 else { 3061 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); 3062 /* 3063 * With scheduling, if the request has completed, we'll 3064 * get a NULL return here, as we clear the sched tag when 3065 * that happens. The request still remains valid, like always, 3066 * so we should be safe with just the NULL check. 3067 */ 3068 if (!rq) 3069 return false; 3070 } 3071 3072 return __blk_mq_poll(hctx, rq); 3073 } 3074 3075 static int __init blk_mq_init(void) 3076 { 3077 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, 3078 blk_mq_hctx_notify_dead); 3079 return 0; 3080 } 3081 subsys_initcall(blk_mq_init); 3082