1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Block multiqueue core code 4 * 5 * Copyright (C) 2013-2014 Jens Axboe 6 * Copyright (C) 2013-2014 Christoph Hellwig 7 */ 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/backing-dev.h> 11 #include <linux/bio.h> 12 #include <linux/blkdev.h> 13 #include <linux/blk-integrity.h> 14 #include <linux/kmemleak.h> 15 #include <linux/mm.h> 16 #include <linux/init.h> 17 #include <linux/slab.h> 18 #include <linux/workqueue.h> 19 #include <linux/smp.h> 20 #include <linux/interrupt.h> 21 #include <linux/llist.h> 22 #include <linux/cpu.h> 23 #include <linux/cache.h> 24 #include <linux/sched/sysctl.h> 25 #include <linux/sched/topology.h> 26 #include <linux/sched/signal.h> 27 #include <linux/delay.h> 28 #include <linux/crash_dump.h> 29 #include <linux/prefetch.h> 30 #include <linux/blk-crypto.h> 31 32 #include <trace/events/block.h> 33 34 #include <linux/blk-mq.h> 35 #include <linux/t10-pi.h> 36 #include "blk.h" 37 #include "blk-mq.h" 38 #include "blk-mq-debugfs.h" 39 #include "blk-mq-tag.h" 40 #include "blk-pm.h" 41 #include "blk-stat.h" 42 #include "blk-mq-sched.h" 43 #include "blk-rq-qos.h" 44 45 static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); 46 47 static void blk_mq_poll_stats_start(struct request_queue *q); 48 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); 49 50 static int blk_mq_poll_stats_bkt(const struct request *rq) 51 { 52 int ddir, sectors, bucket; 53 54 ddir = rq_data_dir(rq); 55 sectors = blk_rq_stats_sectors(rq); 56 57 bucket = ddir + 2 * ilog2(sectors); 58 59 if (bucket < 0) 60 return -1; 61 else if (bucket >= BLK_MQ_POLL_STATS_BKTS) 62 return ddir + BLK_MQ_POLL_STATS_BKTS - 2; 63 64 return bucket; 65 } 66 67 #define BLK_QC_T_SHIFT 16 68 #define BLK_QC_T_INTERNAL (1U << 31) 69 70 static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q, 71 blk_qc_t qc) 72 { 73 return q->queue_hw_ctx[(qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT]; 74 } 75 76 static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx, 77 blk_qc_t qc) 78 { 79 unsigned int tag = qc & ((1U << BLK_QC_T_SHIFT) - 1); 80 81 if (qc & BLK_QC_T_INTERNAL) 82 return blk_mq_tag_to_rq(hctx->sched_tags, tag); 83 return blk_mq_tag_to_rq(hctx->tags, tag); 84 } 85 86 static inline blk_qc_t blk_rq_to_qc(struct request *rq) 87 { 88 return (rq->mq_hctx->queue_num << BLK_QC_T_SHIFT) | 89 (rq->tag != -1 ? 90 rq->tag : (rq->internal_tag | BLK_QC_T_INTERNAL)); 91 } 92 93 /* 94 * Check if any of the ctx, dispatch list or elevator 95 * have pending work in this hardware queue. 96 */ 97 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) 98 { 99 return !list_empty_careful(&hctx->dispatch) || 100 sbitmap_any_bit_set(&hctx->ctx_map) || 101 blk_mq_sched_has_work(hctx); 102 } 103 104 /* 105 * Mark this ctx as having pending work in this hardware queue 106 */ 107 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, 108 struct blk_mq_ctx *ctx) 109 { 110 const int bit = ctx->index_hw[hctx->type]; 111 112 if (!sbitmap_test_bit(&hctx->ctx_map, bit)) 113 sbitmap_set_bit(&hctx->ctx_map, bit); 114 } 115 116 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, 117 struct blk_mq_ctx *ctx) 118 { 119 const int bit = ctx->index_hw[hctx->type]; 120 121 sbitmap_clear_bit(&hctx->ctx_map, bit); 122 } 123 124 struct mq_inflight { 125 struct block_device *part; 126 unsigned int inflight[2]; 127 }; 128 129 static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, 130 struct request *rq, void *priv, 131 bool reserved) 132 { 133 struct mq_inflight *mi = priv; 134 135 if ((!mi->part->bd_partno || rq->part == mi->part) && 136 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) 137 mi->inflight[rq_data_dir(rq)]++; 138 139 return true; 140 } 141 142 unsigned int blk_mq_in_flight(struct request_queue *q, 143 struct block_device *part) 144 { 145 struct mq_inflight mi = { .part = part }; 146 147 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 148 149 return mi.inflight[0] + mi.inflight[1]; 150 } 151 152 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, 153 unsigned int inflight[2]) 154 { 155 struct mq_inflight mi = { .part = part }; 156 157 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 158 inflight[0] = mi.inflight[0]; 159 inflight[1] = mi.inflight[1]; 160 } 161 162 void blk_freeze_queue_start(struct request_queue *q) 163 { 164 mutex_lock(&q->mq_freeze_lock); 165 if (++q->mq_freeze_depth == 1) { 166 percpu_ref_kill(&q->q_usage_counter); 167 mutex_unlock(&q->mq_freeze_lock); 168 if (queue_is_mq(q)) 169 blk_mq_run_hw_queues(q, false); 170 } else { 171 mutex_unlock(&q->mq_freeze_lock); 172 } 173 } 174 EXPORT_SYMBOL_GPL(blk_freeze_queue_start); 175 176 void blk_mq_freeze_queue_wait(struct request_queue *q) 177 { 178 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); 179 } 180 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait); 181 182 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, 183 unsigned long timeout) 184 { 185 return wait_event_timeout(q->mq_freeze_wq, 186 percpu_ref_is_zero(&q->q_usage_counter), 187 timeout); 188 } 189 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout); 190 191 /* 192 * Guarantee no request is in use, so we can change any data structure of 193 * the queue afterward. 194 */ 195 void blk_freeze_queue(struct request_queue *q) 196 { 197 /* 198 * In the !blk_mq case we are only calling this to kill the 199 * q_usage_counter, otherwise this increases the freeze depth 200 * and waits for it to return to zero. For this reason there is 201 * no blk_unfreeze_queue(), and blk_freeze_queue() is not 202 * exported to drivers as the only user for unfreeze is blk_mq. 203 */ 204 blk_freeze_queue_start(q); 205 blk_mq_freeze_queue_wait(q); 206 } 207 208 void blk_mq_freeze_queue(struct request_queue *q) 209 { 210 /* 211 * ...just an alias to keep freeze and unfreeze actions balanced 212 * in the blk_mq_* namespace 213 */ 214 blk_freeze_queue(q); 215 } 216 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); 217 218 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic) 219 { 220 mutex_lock(&q->mq_freeze_lock); 221 if (force_atomic) 222 q->q_usage_counter.data->force_atomic = true; 223 q->mq_freeze_depth--; 224 WARN_ON_ONCE(q->mq_freeze_depth < 0); 225 if (!q->mq_freeze_depth) { 226 percpu_ref_resurrect(&q->q_usage_counter); 227 wake_up_all(&q->mq_freeze_wq); 228 } 229 mutex_unlock(&q->mq_freeze_lock); 230 } 231 232 void blk_mq_unfreeze_queue(struct request_queue *q) 233 { 234 __blk_mq_unfreeze_queue(q, false); 235 } 236 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); 237 238 /* 239 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the 240 * mpt3sas driver such that this function can be removed. 241 */ 242 void blk_mq_quiesce_queue_nowait(struct request_queue *q) 243 { 244 unsigned long flags; 245 246 spin_lock_irqsave(&q->queue_lock, flags); 247 if (!q->quiesce_depth++) 248 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); 249 spin_unlock_irqrestore(&q->queue_lock, flags); 250 } 251 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); 252 253 /** 254 * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done 255 * @q: request queue. 256 * 257 * Note: it is driver's responsibility for making sure that quiesce has 258 * been started. 259 */ 260 void blk_mq_wait_quiesce_done(struct request_queue *q) 261 { 262 struct blk_mq_hw_ctx *hctx; 263 unsigned int i; 264 bool rcu = false; 265 266 queue_for_each_hw_ctx(q, hctx, i) { 267 if (hctx->flags & BLK_MQ_F_BLOCKING) 268 synchronize_srcu(hctx->srcu); 269 else 270 rcu = true; 271 } 272 if (rcu) 273 synchronize_rcu(); 274 } 275 EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done); 276 277 /** 278 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished 279 * @q: request queue. 280 * 281 * Note: this function does not prevent that the struct request end_io() 282 * callback function is invoked. Once this function is returned, we make 283 * sure no dispatch can happen until the queue is unquiesced via 284 * blk_mq_unquiesce_queue(). 285 */ 286 void blk_mq_quiesce_queue(struct request_queue *q) 287 { 288 blk_mq_quiesce_queue_nowait(q); 289 blk_mq_wait_quiesce_done(q); 290 } 291 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); 292 293 /* 294 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue() 295 * @q: request queue. 296 * 297 * This function recovers queue into the state before quiescing 298 * which is done by blk_mq_quiesce_queue. 299 */ 300 void blk_mq_unquiesce_queue(struct request_queue *q) 301 { 302 unsigned long flags; 303 bool run_queue = false; 304 305 spin_lock_irqsave(&q->queue_lock, flags); 306 if (WARN_ON_ONCE(q->quiesce_depth <= 0)) { 307 ; 308 } else if (!--q->quiesce_depth) { 309 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); 310 run_queue = true; 311 } 312 spin_unlock_irqrestore(&q->queue_lock, flags); 313 314 /* dispatch requests which are inserted during quiescing */ 315 if (run_queue) 316 blk_mq_run_hw_queues(q, true); 317 } 318 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue); 319 320 void blk_mq_wake_waiters(struct request_queue *q) 321 { 322 struct blk_mq_hw_ctx *hctx; 323 unsigned int i; 324 325 queue_for_each_hw_ctx(q, hctx, i) 326 if (blk_mq_hw_queue_mapped(hctx)) 327 blk_mq_tag_wakeup_all(hctx->tags, true); 328 } 329 330 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, 331 struct blk_mq_tags *tags, unsigned int tag, u64 alloc_time_ns) 332 { 333 struct blk_mq_ctx *ctx = data->ctx; 334 struct blk_mq_hw_ctx *hctx = data->hctx; 335 struct request_queue *q = data->q; 336 struct request *rq = tags->static_rqs[tag]; 337 338 rq->q = q; 339 rq->mq_ctx = ctx; 340 rq->mq_hctx = hctx; 341 rq->cmd_flags = data->cmd_flags; 342 343 if (data->flags & BLK_MQ_REQ_PM) 344 data->rq_flags |= RQF_PM; 345 if (blk_queue_io_stat(q)) 346 data->rq_flags |= RQF_IO_STAT; 347 rq->rq_flags = data->rq_flags; 348 349 if (!(data->rq_flags & RQF_ELV)) { 350 rq->tag = tag; 351 rq->internal_tag = BLK_MQ_NO_TAG; 352 } else { 353 rq->tag = BLK_MQ_NO_TAG; 354 rq->internal_tag = tag; 355 } 356 rq->timeout = 0; 357 358 if (blk_mq_need_time_stamp(rq)) 359 rq->start_time_ns = ktime_get_ns(); 360 else 361 rq->start_time_ns = 0; 362 rq->rq_disk = NULL; 363 rq->part = NULL; 364 #ifdef CONFIG_BLK_RQ_ALLOC_TIME 365 rq->alloc_time_ns = alloc_time_ns; 366 #endif 367 rq->io_start_time_ns = 0; 368 rq->stats_sectors = 0; 369 rq->nr_phys_segments = 0; 370 #if defined(CONFIG_BLK_DEV_INTEGRITY) 371 rq->nr_integrity_segments = 0; 372 #endif 373 rq->end_io = NULL; 374 rq->end_io_data = NULL; 375 376 blk_crypto_rq_set_defaults(rq); 377 INIT_LIST_HEAD(&rq->queuelist); 378 /* tag was already set */ 379 WRITE_ONCE(rq->deadline, 0); 380 refcount_set(&rq->ref, 1); 381 382 if (rq->rq_flags & RQF_ELV) { 383 struct elevator_queue *e = data->q->elevator; 384 385 rq->elv.icq = NULL; 386 INIT_HLIST_NODE(&rq->hash); 387 RB_CLEAR_NODE(&rq->rb_node); 388 389 if (!op_is_flush(data->cmd_flags) && 390 e->type->ops.prepare_request) { 391 if (e->type->icq_cache) 392 blk_mq_sched_assign_ioc(rq); 393 394 e->type->ops.prepare_request(rq); 395 rq->rq_flags |= RQF_ELVPRIV; 396 } 397 } 398 399 return rq; 400 } 401 402 static inline struct request * 403 __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data, 404 u64 alloc_time_ns) 405 { 406 unsigned int tag, tag_offset; 407 struct blk_mq_tags *tags; 408 struct request *rq; 409 unsigned long tag_mask; 410 int i, nr = 0; 411 412 tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset); 413 if (unlikely(!tag_mask)) 414 return NULL; 415 416 tags = blk_mq_tags_from_data(data); 417 for (i = 0; tag_mask; i++) { 418 if (!(tag_mask & (1UL << i))) 419 continue; 420 tag = tag_offset + i; 421 prefetch(tags->static_rqs[tag]); 422 tag_mask &= ~(1UL << i); 423 rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns); 424 rq_list_add(data->cached_rq, rq); 425 nr++; 426 } 427 /* caller already holds a reference, add for remainder */ 428 percpu_ref_get_many(&data->q->q_usage_counter, nr - 1); 429 data->nr_tags -= nr; 430 431 return rq_list_pop(data->cached_rq); 432 } 433 434 static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) 435 { 436 struct request_queue *q = data->q; 437 u64 alloc_time_ns = 0; 438 struct request *rq; 439 unsigned int tag; 440 441 /* alloc_time includes depth and tag waits */ 442 if (blk_queue_rq_alloc_time(q)) 443 alloc_time_ns = ktime_get_ns(); 444 445 if (data->cmd_flags & REQ_NOWAIT) 446 data->flags |= BLK_MQ_REQ_NOWAIT; 447 448 if (q->elevator) { 449 struct elevator_queue *e = q->elevator; 450 451 data->rq_flags |= RQF_ELV; 452 453 /* 454 * Flush/passthrough requests are special and go directly to the 455 * dispatch list. Don't include reserved tags in the 456 * limiting, as it isn't useful. 457 */ 458 if (!op_is_flush(data->cmd_flags) && 459 !blk_op_is_passthrough(data->cmd_flags) && 460 e->type->ops.limit_depth && 461 !(data->flags & BLK_MQ_REQ_RESERVED)) 462 e->type->ops.limit_depth(data->cmd_flags, data); 463 } 464 465 retry: 466 data->ctx = blk_mq_get_ctx(q); 467 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); 468 if (!(data->rq_flags & RQF_ELV)) 469 blk_mq_tag_busy(data->hctx); 470 471 /* 472 * Try batched alloc if we want more than 1 tag. 473 */ 474 if (data->nr_tags > 1) { 475 rq = __blk_mq_alloc_requests_batch(data, alloc_time_ns); 476 if (rq) 477 return rq; 478 data->nr_tags = 1; 479 } 480 481 /* 482 * Waiting allocations only fail because of an inactive hctx. In that 483 * case just retry the hctx assignment and tag allocation as CPU hotplug 484 * should have migrated us to an online CPU by now. 485 */ 486 tag = blk_mq_get_tag(data); 487 if (tag == BLK_MQ_NO_TAG) { 488 if (data->flags & BLK_MQ_REQ_NOWAIT) 489 return NULL; 490 /* 491 * Give up the CPU and sleep for a random short time to 492 * ensure that thread using a realtime scheduling class 493 * are migrated off the CPU, and thus off the hctx that 494 * is going away. 495 */ 496 msleep(3); 497 goto retry; 498 } 499 500 return blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag, 501 alloc_time_ns); 502 } 503 504 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, 505 blk_mq_req_flags_t flags) 506 { 507 struct blk_mq_alloc_data data = { 508 .q = q, 509 .flags = flags, 510 .cmd_flags = op, 511 .nr_tags = 1, 512 }; 513 struct request *rq; 514 int ret; 515 516 ret = blk_queue_enter(q, flags); 517 if (ret) 518 return ERR_PTR(ret); 519 520 rq = __blk_mq_alloc_requests(&data); 521 if (!rq) 522 goto out_queue_exit; 523 rq->__data_len = 0; 524 rq->__sector = (sector_t) -1; 525 rq->bio = rq->biotail = NULL; 526 return rq; 527 out_queue_exit: 528 blk_queue_exit(q); 529 return ERR_PTR(-EWOULDBLOCK); 530 } 531 EXPORT_SYMBOL(blk_mq_alloc_request); 532 533 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, 534 unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx) 535 { 536 struct blk_mq_alloc_data data = { 537 .q = q, 538 .flags = flags, 539 .cmd_flags = op, 540 .nr_tags = 1, 541 }; 542 u64 alloc_time_ns = 0; 543 unsigned int cpu; 544 unsigned int tag; 545 int ret; 546 547 /* alloc_time includes depth and tag waits */ 548 if (blk_queue_rq_alloc_time(q)) 549 alloc_time_ns = ktime_get_ns(); 550 551 /* 552 * If the tag allocator sleeps we could get an allocation for a 553 * different hardware context. No need to complicate the low level 554 * allocator for this for the rare use case of a command tied to 555 * a specific queue. 556 */ 557 if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED)))) 558 return ERR_PTR(-EINVAL); 559 560 if (hctx_idx >= q->nr_hw_queues) 561 return ERR_PTR(-EIO); 562 563 ret = blk_queue_enter(q, flags); 564 if (ret) 565 return ERR_PTR(ret); 566 567 /* 568 * Check if the hardware context is actually mapped to anything. 569 * If not tell the caller that it should skip this queue. 570 */ 571 ret = -EXDEV; 572 data.hctx = q->queue_hw_ctx[hctx_idx]; 573 if (!blk_mq_hw_queue_mapped(data.hctx)) 574 goto out_queue_exit; 575 cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); 576 data.ctx = __blk_mq_get_ctx(q, cpu); 577 578 if (!q->elevator) 579 blk_mq_tag_busy(data.hctx); 580 else 581 data.rq_flags |= RQF_ELV; 582 583 ret = -EWOULDBLOCK; 584 tag = blk_mq_get_tag(&data); 585 if (tag == BLK_MQ_NO_TAG) 586 goto out_queue_exit; 587 return blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag, 588 alloc_time_ns); 589 590 out_queue_exit: 591 blk_queue_exit(q); 592 return ERR_PTR(ret); 593 } 594 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 595 596 static void __blk_mq_free_request(struct request *rq) 597 { 598 struct request_queue *q = rq->q; 599 struct blk_mq_ctx *ctx = rq->mq_ctx; 600 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 601 const int sched_tag = rq->internal_tag; 602 603 blk_crypto_free_request(rq); 604 blk_pm_mark_last_busy(rq); 605 rq->mq_hctx = NULL; 606 if (rq->tag != BLK_MQ_NO_TAG) 607 blk_mq_put_tag(hctx->tags, ctx, rq->tag); 608 if (sched_tag != BLK_MQ_NO_TAG) 609 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag); 610 blk_mq_sched_restart(hctx); 611 blk_queue_exit(q); 612 } 613 614 void blk_mq_free_request(struct request *rq) 615 { 616 struct request_queue *q = rq->q; 617 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 618 619 if (rq->rq_flags & RQF_ELVPRIV) { 620 struct elevator_queue *e = q->elevator; 621 622 if (e->type->ops.finish_request) 623 e->type->ops.finish_request(rq); 624 if (rq->elv.icq) { 625 put_io_context(rq->elv.icq->ioc); 626 rq->elv.icq = NULL; 627 } 628 } 629 630 if (rq->rq_flags & RQF_MQ_INFLIGHT) 631 __blk_mq_dec_active_requests(hctx); 632 633 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) 634 laptop_io_completion(q->disk->bdi); 635 636 rq_qos_done(q, rq); 637 638 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 639 if (refcount_dec_and_test(&rq->ref)) 640 __blk_mq_free_request(rq); 641 } 642 EXPORT_SYMBOL_GPL(blk_mq_free_request); 643 644 void blk_mq_free_plug_rqs(struct blk_plug *plug) 645 { 646 struct request *rq; 647 648 while ((rq = rq_list_pop(&plug->cached_rq)) != NULL) 649 blk_mq_free_request(rq); 650 } 651 652 static void req_bio_endio(struct request *rq, struct bio *bio, 653 unsigned int nbytes, blk_status_t error) 654 { 655 if (unlikely(error)) { 656 bio->bi_status = error; 657 } else if (req_op(rq) == REQ_OP_ZONE_APPEND) { 658 /* 659 * Partial zone append completions cannot be supported as the 660 * BIO fragments may end up not being written sequentially. 661 */ 662 if (bio->bi_iter.bi_size != nbytes) 663 bio->bi_status = BLK_STS_IOERR; 664 else 665 bio->bi_iter.bi_sector = rq->__sector; 666 } 667 668 bio_advance(bio, nbytes); 669 670 if (unlikely(rq->rq_flags & RQF_QUIET)) 671 bio_set_flag(bio, BIO_QUIET); 672 /* don't actually finish bio if it's part of flush sequence */ 673 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) 674 bio_endio(bio); 675 } 676 677 static void blk_account_io_completion(struct request *req, unsigned int bytes) 678 { 679 if (req->part && blk_do_io_stat(req)) { 680 const int sgrp = op_stat_group(req_op(req)); 681 682 part_stat_lock(); 683 part_stat_add(req->part, sectors[sgrp], bytes >> 9); 684 part_stat_unlock(); 685 } 686 } 687 688 /** 689 * blk_update_request - Complete multiple bytes without completing the request 690 * @req: the request being processed 691 * @error: block status code 692 * @nr_bytes: number of bytes to complete for @req 693 * 694 * Description: 695 * Ends I/O on a number of bytes attached to @req, but doesn't complete 696 * the request structure even if @req doesn't have leftover. 697 * If @req has leftover, sets it up for the next range of segments. 698 * 699 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 700 * %false return from this function. 701 * 702 * Note: 703 * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function 704 * except in the consistency check at the end of this function. 705 * 706 * Return: 707 * %false - this request doesn't have any more data 708 * %true - this request has more data 709 **/ 710 bool blk_update_request(struct request *req, blk_status_t error, 711 unsigned int nr_bytes) 712 { 713 int total_bytes; 714 715 trace_block_rq_complete(req, error, nr_bytes); 716 717 if (!req->bio) 718 return false; 719 720 #ifdef CONFIG_BLK_DEV_INTEGRITY 721 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && 722 error == BLK_STS_OK) 723 req->q->integrity.profile->complete_fn(req, nr_bytes); 724 #endif 725 726 if (unlikely(error && !blk_rq_is_passthrough(req) && 727 !(req->rq_flags & RQF_QUIET))) 728 blk_print_req_error(req, error); 729 730 blk_account_io_completion(req, nr_bytes); 731 732 total_bytes = 0; 733 while (req->bio) { 734 struct bio *bio = req->bio; 735 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); 736 737 if (bio_bytes == bio->bi_iter.bi_size) 738 req->bio = bio->bi_next; 739 740 /* Completion has already been traced */ 741 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 742 req_bio_endio(req, bio, bio_bytes, error); 743 744 total_bytes += bio_bytes; 745 nr_bytes -= bio_bytes; 746 747 if (!nr_bytes) 748 break; 749 } 750 751 /* 752 * completely done 753 */ 754 if (!req->bio) { 755 /* 756 * Reset counters so that the request stacking driver 757 * can find how many bytes remain in the request 758 * later. 759 */ 760 req->__data_len = 0; 761 return false; 762 } 763 764 req->__data_len -= total_bytes; 765 766 /* update sector only for requests with clear definition of sector */ 767 if (!blk_rq_is_passthrough(req)) 768 req->__sector += total_bytes >> 9; 769 770 /* mixed attributes always follow the first bio */ 771 if (req->rq_flags & RQF_MIXED_MERGE) { 772 req->cmd_flags &= ~REQ_FAILFAST_MASK; 773 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; 774 } 775 776 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) { 777 /* 778 * If total number of sectors is less than the first segment 779 * size, something has gone terribly wrong. 780 */ 781 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 782 blk_dump_rq_flags(req, "request botched"); 783 req->__data_len = blk_rq_cur_bytes(req); 784 } 785 786 /* recalculate the number of segments */ 787 req->nr_phys_segments = blk_recalc_rq_segments(req); 788 } 789 790 return true; 791 } 792 EXPORT_SYMBOL_GPL(blk_update_request); 793 794 static inline void __blk_mq_end_request_acct(struct request *rq, u64 now) 795 { 796 if (rq->rq_flags & RQF_STATS) { 797 blk_mq_poll_stats_start(rq->q); 798 blk_stat_add(rq, now); 799 } 800 801 blk_mq_sched_completed_request(rq, now); 802 blk_account_io_done(rq, now); 803 } 804 805 inline void __blk_mq_end_request(struct request *rq, blk_status_t error) 806 { 807 if (blk_mq_need_time_stamp(rq)) 808 __blk_mq_end_request_acct(rq, ktime_get_ns()); 809 810 if (rq->end_io) { 811 rq_qos_done(rq->q, rq); 812 rq->end_io(rq, error); 813 } else { 814 blk_mq_free_request(rq); 815 } 816 } 817 EXPORT_SYMBOL(__blk_mq_end_request); 818 819 void blk_mq_end_request(struct request *rq, blk_status_t error) 820 { 821 if (blk_update_request(rq, error, blk_rq_bytes(rq))) 822 BUG(); 823 __blk_mq_end_request(rq, error); 824 } 825 EXPORT_SYMBOL(blk_mq_end_request); 826 827 #define TAG_COMP_BATCH 32 828 829 static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx, 830 int *tag_array, int nr_tags) 831 { 832 struct request_queue *q = hctx->queue; 833 834 /* 835 * All requests should have been marked as RQF_MQ_INFLIGHT, so 836 * update hctx->nr_active in batch 837 */ 838 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) 839 __blk_mq_sub_active_requests(hctx, nr_tags); 840 841 blk_mq_put_tags(hctx->tags, tag_array, nr_tags); 842 percpu_ref_put_many(&q->q_usage_counter, nr_tags); 843 } 844 845 void blk_mq_end_request_batch(struct io_comp_batch *iob) 846 { 847 int tags[TAG_COMP_BATCH], nr_tags = 0; 848 struct blk_mq_hw_ctx *cur_hctx = NULL; 849 struct request *rq; 850 u64 now = 0; 851 852 if (iob->need_ts) 853 now = ktime_get_ns(); 854 855 while ((rq = rq_list_pop(&iob->req_list)) != NULL) { 856 prefetch(rq->bio); 857 prefetch(rq->rq_next); 858 859 blk_update_request(rq, BLK_STS_OK, blk_rq_bytes(rq)); 860 if (iob->need_ts) 861 __blk_mq_end_request_acct(rq, now); 862 863 rq_qos_done(rq->q, rq); 864 865 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 866 if (!refcount_dec_and_test(&rq->ref)) 867 continue; 868 869 blk_crypto_free_request(rq); 870 blk_pm_mark_last_busy(rq); 871 872 if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) { 873 if (cur_hctx) 874 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags); 875 nr_tags = 0; 876 cur_hctx = rq->mq_hctx; 877 } 878 tags[nr_tags++] = rq->tag; 879 } 880 881 if (nr_tags) 882 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags); 883 } 884 EXPORT_SYMBOL_GPL(blk_mq_end_request_batch); 885 886 static void blk_complete_reqs(struct llist_head *list) 887 { 888 struct llist_node *entry = llist_reverse_order(llist_del_all(list)); 889 struct request *rq, *next; 890 891 llist_for_each_entry_safe(rq, next, entry, ipi_list) 892 rq->q->mq_ops->complete(rq); 893 } 894 895 static __latent_entropy void blk_done_softirq(struct softirq_action *h) 896 { 897 blk_complete_reqs(this_cpu_ptr(&blk_cpu_done)); 898 } 899 900 static int blk_softirq_cpu_dead(unsigned int cpu) 901 { 902 blk_complete_reqs(&per_cpu(blk_cpu_done, cpu)); 903 return 0; 904 } 905 906 static void __blk_mq_complete_request_remote(void *data) 907 { 908 __raise_softirq_irqoff(BLOCK_SOFTIRQ); 909 } 910 911 static inline bool blk_mq_complete_need_ipi(struct request *rq) 912 { 913 int cpu = raw_smp_processor_id(); 914 915 if (!IS_ENABLED(CONFIG_SMP) || 916 !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) 917 return false; 918 /* 919 * With force threaded interrupts enabled, raising softirq from an SMP 920 * function call will always result in waking the ksoftirqd thread. 921 * This is probably worse than completing the request on a different 922 * cache domain. 923 */ 924 if (force_irqthreads()) 925 return false; 926 927 /* same CPU or cache domain? Complete locally */ 928 if (cpu == rq->mq_ctx->cpu || 929 (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) && 930 cpus_share_cache(cpu, rq->mq_ctx->cpu))) 931 return false; 932 933 /* don't try to IPI to an offline CPU */ 934 return cpu_online(rq->mq_ctx->cpu); 935 } 936 937 static void blk_mq_complete_send_ipi(struct request *rq) 938 { 939 struct llist_head *list; 940 unsigned int cpu; 941 942 cpu = rq->mq_ctx->cpu; 943 list = &per_cpu(blk_cpu_done, cpu); 944 if (llist_add(&rq->ipi_list, list)) { 945 INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq); 946 smp_call_function_single_async(cpu, &rq->csd); 947 } 948 } 949 950 static void blk_mq_raise_softirq(struct request *rq) 951 { 952 struct llist_head *list; 953 954 preempt_disable(); 955 list = this_cpu_ptr(&blk_cpu_done); 956 if (llist_add(&rq->ipi_list, list)) 957 raise_softirq(BLOCK_SOFTIRQ); 958 preempt_enable(); 959 } 960 961 bool blk_mq_complete_request_remote(struct request *rq) 962 { 963 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); 964 965 /* 966 * For a polled request, always complete locallly, it's pointless 967 * to redirect the completion. 968 */ 969 if (rq->cmd_flags & REQ_POLLED) 970 return false; 971 972 if (blk_mq_complete_need_ipi(rq)) { 973 blk_mq_complete_send_ipi(rq); 974 return true; 975 } 976 977 if (rq->q->nr_hw_queues == 1) { 978 blk_mq_raise_softirq(rq); 979 return true; 980 } 981 return false; 982 } 983 EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote); 984 985 /** 986 * blk_mq_complete_request - end I/O on a request 987 * @rq: the request being processed 988 * 989 * Description: 990 * Complete a request by scheduling the ->complete_rq operation. 991 **/ 992 void blk_mq_complete_request(struct request *rq) 993 { 994 if (!blk_mq_complete_request_remote(rq)) 995 rq->q->mq_ops->complete(rq); 996 } 997 EXPORT_SYMBOL(blk_mq_complete_request); 998 999 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) 1000 __releases(hctx->srcu) 1001 { 1002 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) 1003 rcu_read_unlock(); 1004 else 1005 srcu_read_unlock(hctx->srcu, srcu_idx); 1006 } 1007 1008 static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx) 1009 __acquires(hctx->srcu) 1010 { 1011 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { 1012 /* shut up gcc false positive */ 1013 *srcu_idx = 0; 1014 rcu_read_lock(); 1015 } else 1016 *srcu_idx = srcu_read_lock(hctx->srcu); 1017 } 1018 1019 /** 1020 * blk_mq_start_request - Start processing a request 1021 * @rq: Pointer to request to be started 1022 * 1023 * Function used by device drivers to notify the block layer that a request 1024 * is going to be processed now, so blk layer can do proper initializations 1025 * such as starting the timeout timer. 1026 */ 1027 void blk_mq_start_request(struct request *rq) 1028 { 1029 struct request_queue *q = rq->q; 1030 1031 trace_block_rq_issue(rq); 1032 1033 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { 1034 u64 start_time; 1035 #ifdef CONFIG_BLK_CGROUP 1036 if (rq->bio) 1037 start_time = bio_issue_time(&rq->bio->bi_issue); 1038 else 1039 #endif 1040 start_time = ktime_get_ns(); 1041 rq->io_start_time_ns = start_time; 1042 rq->stats_sectors = blk_rq_sectors(rq); 1043 rq->rq_flags |= RQF_STATS; 1044 rq_qos_issue(q, rq); 1045 } 1046 1047 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE); 1048 1049 blk_add_timer(rq); 1050 WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT); 1051 1052 #ifdef CONFIG_BLK_DEV_INTEGRITY 1053 if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE) 1054 q->integrity.profile->prepare_fn(rq); 1055 #endif 1056 if (rq->bio && rq->bio->bi_opf & REQ_POLLED) 1057 WRITE_ONCE(rq->bio->bi_cookie, blk_rq_to_qc(rq)); 1058 } 1059 EXPORT_SYMBOL(blk_mq_start_request); 1060 1061 static void __blk_mq_requeue_request(struct request *rq) 1062 { 1063 struct request_queue *q = rq->q; 1064 1065 blk_mq_put_driver_tag(rq); 1066 1067 trace_block_rq_requeue(rq); 1068 rq_qos_requeue(q, rq); 1069 1070 if (blk_mq_request_started(rq)) { 1071 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 1072 rq->rq_flags &= ~RQF_TIMED_OUT; 1073 } 1074 } 1075 1076 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) 1077 { 1078 __blk_mq_requeue_request(rq); 1079 1080 /* this request will be re-inserted to io scheduler queue */ 1081 blk_mq_sched_requeue_request(rq); 1082 1083 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); 1084 } 1085 EXPORT_SYMBOL(blk_mq_requeue_request); 1086 1087 static void blk_mq_requeue_work(struct work_struct *work) 1088 { 1089 struct request_queue *q = 1090 container_of(work, struct request_queue, requeue_work.work); 1091 LIST_HEAD(rq_list); 1092 struct request *rq, *next; 1093 1094 spin_lock_irq(&q->requeue_lock); 1095 list_splice_init(&q->requeue_list, &rq_list); 1096 spin_unlock_irq(&q->requeue_lock); 1097 1098 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { 1099 if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP))) 1100 continue; 1101 1102 rq->rq_flags &= ~RQF_SOFTBARRIER; 1103 list_del_init(&rq->queuelist); 1104 /* 1105 * If RQF_DONTPREP, rq has contained some driver specific 1106 * data, so insert it to hctx dispatch list to avoid any 1107 * merge. 1108 */ 1109 if (rq->rq_flags & RQF_DONTPREP) 1110 blk_mq_request_bypass_insert(rq, false, false); 1111 else 1112 blk_mq_sched_insert_request(rq, true, false, false); 1113 } 1114 1115 while (!list_empty(&rq_list)) { 1116 rq = list_entry(rq_list.next, struct request, queuelist); 1117 list_del_init(&rq->queuelist); 1118 blk_mq_sched_insert_request(rq, false, false, false); 1119 } 1120 1121 blk_mq_run_hw_queues(q, false); 1122 } 1123 1124 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, 1125 bool kick_requeue_list) 1126 { 1127 struct request_queue *q = rq->q; 1128 unsigned long flags; 1129 1130 /* 1131 * We abuse this flag that is otherwise used by the I/O scheduler to 1132 * request head insertion from the workqueue. 1133 */ 1134 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER); 1135 1136 spin_lock_irqsave(&q->requeue_lock, flags); 1137 if (at_head) { 1138 rq->rq_flags |= RQF_SOFTBARRIER; 1139 list_add(&rq->queuelist, &q->requeue_list); 1140 } else { 1141 list_add_tail(&rq->queuelist, &q->requeue_list); 1142 } 1143 spin_unlock_irqrestore(&q->requeue_lock, flags); 1144 1145 if (kick_requeue_list) 1146 blk_mq_kick_requeue_list(q); 1147 } 1148 1149 void blk_mq_kick_requeue_list(struct request_queue *q) 1150 { 1151 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0); 1152 } 1153 EXPORT_SYMBOL(blk_mq_kick_requeue_list); 1154 1155 void blk_mq_delay_kick_requeue_list(struct request_queue *q, 1156 unsigned long msecs) 1157 { 1158 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 1159 msecs_to_jiffies(msecs)); 1160 } 1161 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); 1162 1163 static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq, 1164 void *priv, bool reserved) 1165 { 1166 /* 1167 * If we find a request that isn't idle and the queue matches, 1168 * we know the queue is busy. Return false to stop the iteration. 1169 */ 1170 if (blk_mq_request_started(rq) && rq->q == hctx->queue) { 1171 bool *busy = priv; 1172 1173 *busy = true; 1174 return false; 1175 } 1176 1177 return true; 1178 } 1179 1180 bool blk_mq_queue_inflight(struct request_queue *q) 1181 { 1182 bool busy = false; 1183 1184 blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy); 1185 return busy; 1186 } 1187 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight); 1188 1189 static void blk_mq_rq_timed_out(struct request *req, bool reserved) 1190 { 1191 req->rq_flags |= RQF_TIMED_OUT; 1192 if (req->q->mq_ops->timeout) { 1193 enum blk_eh_timer_return ret; 1194 1195 ret = req->q->mq_ops->timeout(req, reserved); 1196 if (ret == BLK_EH_DONE) 1197 return; 1198 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER); 1199 } 1200 1201 blk_add_timer(req); 1202 } 1203 1204 static bool blk_mq_req_expired(struct request *rq, unsigned long *next) 1205 { 1206 unsigned long deadline; 1207 1208 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT) 1209 return false; 1210 if (rq->rq_flags & RQF_TIMED_OUT) 1211 return false; 1212 1213 deadline = READ_ONCE(rq->deadline); 1214 if (time_after_eq(jiffies, deadline)) 1215 return true; 1216 1217 if (*next == 0) 1218 *next = deadline; 1219 else if (time_after(*next, deadline)) 1220 *next = deadline; 1221 return false; 1222 } 1223 1224 void blk_mq_put_rq_ref(struct request *rq) 1225 { 1226 if (is_flush_rq(rq)) 1227 rq->end_io(rq, 0); 1228 else if (refcount_dec_and_test(&rq->ref)) 1229 __blk_mq_free_request(rq); 1230 } 1231 1232 static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, 1233 struct request *rq, void *priv, bool reserved) 1234 { 1235 unsigned long *next = priv; 1236 1237 /* 1238 * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot 1239 * be reallocated underneath the timeout handler's processing, then 1240 * the expire check is reliable. If the request is not expired, then 1241 * it was completed and reallocated as a new request after returning 1242 * from blk_mq_check_expired(). 1243 */ 1244 if (blk_mq_req_expired(rq, next)) 1245 blk_mq_rq_timed_out(rq, reserved); 1246 return true; 1247 } 1248 1249 static void blk_mq_timeout_work(struct work_struct *work) 1250 { 1251 struct request_queue *q = 1252 container_of(work, struct request_queue, timeout_work); 1253 unsigned long next = 0; 1254 struct blk_mq_hw_ctx *hctx; 1255 int i; 1256 1257 /* A deadlock might occur if a request is stuck requiring a 1258 * timeout at the same time a queue freeze is waiting 1259 * completion, since the timeout code would not be able to 1260 * acquire the queue reference here. 1261 * 1262 * That's why we don't use blk_queue_enter here; instead, we use 1263 * percpu_ref_tryget directly, because we need to be able to 1264 * obtain a reference even in the short window between the queue 1265 * starting to freeze, by dropping the first reference in 1266 * blk_freeze_queue_start, and the moment the last request is 1267 * consumed, marked by the instant q_usage_counter reaches 1268 * zero. 1269 */ 1270 if (!percpu_ref_tryget(&q->q_usage_counter)) 1271 return; 1272 1273 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next); 1274 1275 if (next != 0) { 1276 mod_timer(&q->timeout, next); 1277 } else { 1278 /* 1279 * Request timeouts are handled as a forward rolling timer. If 1280 * we end up here it means that no requests are pending and 1281 * also that no request has been pending for a while. Mark 1282 * each hctx as idle. 1283 */ 1284 queue_for_each_hw_ctx(q, hctx, i) { 1285 /* the hctx may be unmapped, so check it here */ 1286 if (blk_mq_hw_queue_mapped(hctx)) 1287 blk_mq_tag_idle(hctx); 1288 } 1289 } 1290 blk_queue_exit(q); 1291 } 1292 1293 struct flush_busy_ctx_data { 1294 struct blk_mq_hw_ctx *hctx; 1295 struct list_head *list; 1296 }; 1297 1298 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data) 1299 { 1300 struct flush_busy_ctx_data *flush_data = data; 1301 struct blk_mq_hw_ctx *hctx = flush_data->hctx; 1302 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 1303 enum hctx_type type = hctx->type; 1304 1305 spin_lock(&ctx->lock); 1306 list_splice_tail_init(&ctx->rq_lists[type], flush_data->list); 1307 sbitmap_clear_bit(sb, bitnr); 1308 spin_unlock(&ctx->lock); 1309 return true; 1310 } 1311 1312 /* 1313 * Process software queues that have been marked busy, splicing them 1314 * to the for-dispatch 1315 */ 1316 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) 1317 { 1318 struct flush_busy_ctx_data data = { 1319 .hctx = hctx, 1320 .list = list, 1321 }; 1322 1323 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data); 1324 } 1325 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs); 1326 1327 struct dispatch_rq_data { 1328 struct blk_mq_hw_ctx *hctx; 1329 struct request *rq; 1330 }; 1331 1332 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr, 1333 void *data) 1334 { 1335 struct dispatch_rq_data *dispatch_data = data; 1336 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; 1337 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 1338 enum hctx_type type = hctx->type; 1339 1340 spin_lock(&ctx->lock); 1341 if (!list_empty(&ctx->rq_lists[type])) { 1342 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next); 1343 list_del_init(&dispatch_data->rq->queuelist); 1344 if (list_empty(&ctx->rq_lists[type])) 1345 sbitmap_clear_bit(sb, bitnr); 1346 } 1347 spin_unlock(&ctx->lock); 1348 1349 return !dispatch_data->rq; 1350 } 1351 1352 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 1353 struct blk_mq_ctx *start) 1354 { 1355 unsigned off = start ? start->index_hw[hctx->type] : 0; 1356 struct dispatch_rq_data data = { 1357 .hctx = hctx, 1358 .rq = NULL, 1359 }; 1360 1361 __sbitmap_for_each_set(&hctx->ctx_map, off, 1362 dispatch_rq_from_ctx, &data); 1363 1364 return data.rq; 1365 } 1366 1367 static bool __blk_mq_alloc_driver_tag(struct request *rq) 1368 { 1369 struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags; 1370 unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags; 1371 int tag; 1372 1373 blk_mq_tag_busy(rq->mq_hctx); 1374 1375 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) { 1376 bt = &rq->mq_hctx->tags->breserved_tags; 1377 tag_offset = 0; 1378 } else { 1379 if (!hctx_may_queue(rq->mq_hctx, bt)) 1380 return false; 1381 } 1382 1383 tag = __sbitmap_queue_get(bt); 1384 if (tag == BLK_MQ_NO_TAG) 1385 return false; 1386 1387 rq->tag = tag + tag_offset; 1388 return true; 1389 } 1390 1391 bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq) 1392 { 1393 if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq)) 1394 return false; 1395 1396 if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) && 1397 !(rq->rq_flags & RQF_MQ_INFLIGHT)) { 1398 rq->rq_flags |= RQF_MQ_INFLIGHT; 1399 __blk_mq_inc_active_requests(hctx); 1400 } 1401 hctx->tags->rqs[rq->tag] = rq; 1402 return true; 1403 } 1404 1405 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, 1406 int flags, void *key) 1407 { 1408 struct blk_mq_hw_ctx *hctx; 1409 1410 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); 1411 1412 spin_lock(&hctx->dispatch_wait_lock); 1413 if (!list_empty(&wait->entry)) { 1414 struct sbitmap_queue *sbq; 1415 1416 list_del_init(&wait->entry); 1417 sbq = &hctx->tags->bitmap_tags; 1418 atomic_dec(&sbq->ws_active); 1419 } 1420 spin_unlock(&hctx->dispatch_wait_lock); 1421 1422 blk_mq_run_hw_queue(hctx, true); 1423 return 1; 1424 } 1425 1426 /* 1427 * Mark us waiting for a tag. For shared tags, this involves hooking us into 1428 * the tag wakeups. For non-shared tags, we can simply mark us needing a 1429 * restart. For both cases, take care to check the condition again after 1430 * marking us as waiting. 1431 */ 1432 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, 1433 struct request *rq) 1434 { 1435 struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags; 1436 struct wait_queue_head *wq; 1437 wait_queue_entry_t *wait; 1438 bool ret; 1439 1440 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { 1441 blk_mq_sched_mark_restart_hctx(hctx); 1442 1443 /* 1444 * It's possible that a tag was freed in the window between the 1445 * allocation failure and adding the hardware queue to the wait 1446 * queue. 1447 * 1448 * Don't clear RESTART here, someone else could have set it. 1449 * At most this will cost an extra queue run. 1450 */ 1451 return blk_mq_get_driver_tag(rq); 1452 } 1453 1454 wait = &hctx->dispatch_wait; 1455 if (!list_empty_careful(&wait->entry)) 1456 return false; 1457 1458 wq = &bt_wait_ptr(sbq, hctx)->wait; 1459 1460 spin_lock_irq(&wq->lock); 1461 spin_lock(&hctx->dispatch_wait_lock); 1462 if (!list_empty(&wait->entry)) { 1463 spin_unlock(&hctx->dispatch_wait_lock); 1464 spin_unlock_irq(&wq->lock); 1465 return false; 1466 } 1467 1468 atomic_inc(&sbq->ws_active); 1469 wait->flags &= ~WQ_FLAG_EXCLUSIVE; 1470 __add_wait_queue(wq, wait); 1471 1472 /* 1473 * It's possible that a tag was freed in the window between the 1474 * allocation failure and adding the hardware queue to the wait 1475 * queue. 1476 */ 1477 ret = blk_mq_get_driver_tag(rq); 1478 if (!ret) { 1479 spin_unlock(&hctx->dispatch_wait_lock); 1480 spin_unlock_irq(&wq->lock); 1481 return false; 1482 } 1483 1484 /* 1485 * We got a tag, remove ourselves from the wait queue to ensure 1486 * someone else gets the wakeup. 1487 */ 1488 list_del_init(&wait->entry); 1489 atomic_dec(&sbq->ws_active); 1490 spin_unlock(&hctx->dispatch_wait_lock); 1491 spin_unlock_irq(&wq->lock); 1492 1493 return true; 1494 } 1495 1496 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT 8 1497 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR 4 1498 /* 1499 * Update dispatch busy with the Exponential Weighted Moving Average(EWMA): 1500 * - EWMA is one simple way to compute running average value 1501 * - weight(7/8 and 1/8) is applied so that it can decrease exponentially 1502 * - take 4 as factor for avoiding to get too small(0) result, and this 1503 * factor doesn't matter because EWMA decreases exponentially 1504 */ 1505 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy) 1506 { 1507 unsigned int ewma; 1508 1509 ewma = hctx->dispatch_busy; 1510 1511 if (!ewma && !busy) 1512 return; 1513 1514 ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1; 1515 if (busy) 1516 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR; 1517 ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT; 1518 1519 hctx->dispatch_busy = ewma; 1520 } 1521 1522 #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */ 1523 1524 static void blk_mq_handle_dev_resource(struct request *rq, 1525 struct list_head *list) 1526 { 1527 struct request *next = 1528 list_first_entry_or_null(list, struct request, queuelist); 1529 1530 /* 1531 * If an I/O scheduler has been configured and we got a driver tag for 1532 * the next request already, free it. 1533 */ 1534 if (next) 1535 blk_mq_put_driver_tag(next); 1536 1537 list_add(&rq->queuelist, list); 1538 __blk_mq_requeue_request(rq); 1539 } 1540 1541 static void blk_mq_handle_zone_resource(struct request *rq, 1542 struct list_head *zone_list) 1543 { 1544 /* 1545 * If we end up here it is because we cannot dispatch a request to a 1546 * specific zone due to LLD level zone-write locking or other zone 1547 * related resource not being available. In this case, set the request 1548 * aside in zone_list for retrying it later. 1549 */ 1550 list_add(&rq->queuelist, zone_list); 1551 __blk_mq_requeue_request(rq); 1552 } 1553 1554 enum prep_dispatch { 1555 PREP_DISPATCH_OK, 1556 PREP_DISPATCH_NO_TAG, 1557 PREP_DISPATCH_NO_BUDGET, 1558 }; 1559 1560 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq, 1561 bool need_budget) 1562 { 1563 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 1564 int budget_token = -1; 1565 1566 if (need_budget) { 1567 budget_token = blk_mq_get_dispatch_budget(rq->q); 1568 if (budget_token < 0) { 1569 blk_mq_put_driver_tag(rq); 1570 return PREP_DISPATCH_NO_BUDGET; 1571 } 1572 blk_mq_set_rq_budget_token(rq, budget_token); 1573 } 1574 1575 if (!blk_mq_get_driver_tag(rq)) { 1576 /* 1577 * The initial allocation attempt failed, so we need to 1578 * rerun the hardware queue when a tag is freed. The 1579 * waitqueue takes care of that. If the queue is run 1580 * before we add this entry back on the dispatch list, 1581 * we'll re-run it below. 1582 */ 1583 if (!blk_mq_mark_tag_wait(hctx, rq)) { 1584 /* 1585 * All budgets not got from this function will be put 1586 * together during handling partial dispatch 1587 */ 1588 if (need_budget) 1589 blk_mq_put_dispatch_budget(rq->q, budget_token); 1590 return PREP_DISPATCH_NO_TAG; 1591 } 1592 } 1593 1594 return PREP_DISPATCH_OK; 1595 } 1596 1597 /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */ 1598 static void blk_mq_release_budgets(struct request_queue *q, 1599 struct list_head *list) 1600 { 1601 struct request *rq; 1602 1603 list_for_each_entry(rq, list, queuelist) { 1604 int budget_token = blk_mq_get_rq_budget_token(rq); 1605 1606 if (budget_token >= 0) 1607 blk_mq_put_dispatch_budget(q, budget_token); 1608 } 1609 } 1610 1611 /* 1612 * Returns true if we did some work AND can potentially do more. 1613 */ 1614 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, 1615 unsigned int nr_budgets) 1616 { 1617 enum prep_dispatch prep; 1618 struct request_queue *q = hctx->queue; 1619 struct request *rq, *nxt; 1620 int errors, queued; 1621 blk_status_t ret = BLK_STS_OK; 1622 LIST_HEAD(zone_list); 1623 bool needs_resource = false; 1624 1625 if (list_empty(list)) 1626 return false; 1627 1628 /* 1629 * Now process all the entries, sending them to the driver. 1630 */ 1631 errors = queued = 0; 1632 do { 1633 struct blk_mq_queue_data bd; 1634 1635 rq = list_first_entry(list, struct request, queuelist); 1636 1637 WARN_ON_ONCE(hctx != rq->mq_hctx); 1638 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets); 1639 if (prep != PREP_DISPATCH_OK) 1640 break; 1641 1642 list_del_init(&rq->queuelist); 1643 1644 bd.rq = rq; 1645 1646 /* 1647 * Flag last if we have no more requests, or if we have more 1648 * but can't assign a driver tag to it. 1649 */ 1650 if (list_empty(list)) 1651 bd.last = true; 1652 else { 1653 nxt = list_first_entry(list, struct request, queuelist); 1654 bd.last = !blk_mq_get_driver_tag(nxt); 1655 } 1656 1657 /* 1658 * once the request is queued to lld, no need to cover the 1659 * budget any more 1660 */ 1661 if (nr_budgets) 1662 nr_budgets--; 1663 ret = q->mq_ops->queue_rq(hctx, &bd); 1664 switch (ret) { 1665 case BLK_STS_OK: 1666 queued++; 1667 break; 1668 case BLK_STS_RESOURCE: 1669 needs_resource = true; 1670 fallthrough; 1671 case BLK_STS_DEV_RESOURCE: 1672 blk_mq_handle_dev_resource(rq, list); 1673 goto out; 1674 case BLK_STS_ZONE_RESOURCE: 1675 /* 1676 * Move the request to zone_list and keep going through 1677 * the dispatch list to find more requests the drive can 1678 * accept. 1679 */ 1680 blk_mq_handle_zone_resource(rq, &zone_list); 1681 needs_resource = true; 1682 break; 1683 default: 1684 errors++; 1685 blk_mq_end_request(rq, ret); 1686 } 1687 } while (!list_empty(list)); 1688 out: 1689 if (!list_empty(&zone_list)) 1690 list_splice_tail_init(&zone_list, list); 1691 1692 /* If we didn't flush the entire list, we could have told the driver 1693 * there was more coming, but that turned out to be a lie. 1694 */ 1695 if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued) 1696 q->mq_ops->commit_rqs(hctx); 1697 /* 1698 * Any items that need requeuing? Stuff them into hctx->dispatch, 1699 * that is where we will continue on next queue run. 1700 */ 1701 if (!list_empty(list)) { 1702 bool needs_restart; 1703 /* For non-shared tags, the RESTART check will suffice */ 1704 bool no_tag = prep == PREP_DISPATCH_NO_TAG && 1705 (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED); 1706 1707 if (nr_budgets) 1708 blk_mq_release_budgets(q, list); 1709 1710 spin_lock(&hctx->lock); 1711 list_splice_tail_init(list, &hctx->dispatch); 1712 spin_unlock(&hctx->lock); 1713 1714 /* 1715 * Order adding requests to hctx->dispatch and checking 1716 * SCHED_RESTART flag. The pair of this smp_mb() is the one 1717 * in blk_mq_sched_restart(). Avoid restart code path to 1718 * miss the new added requests to hctx->dispatch, meantime 1719 * SCHED_RESTART is observed here. 1720 */ 1721 smp_mb(); 1722 1723 /* 1724 * If SCHED_RESTART was set by the caller of this function and 1725 * it is no longer set that means that it was cleared by another 1726 * thread and hence that a queue rerun is needed. 1727 * 1728 * If 'no_tag' is set, that means that we failed getting 1729 * a driver tag with an I/O scheduler attached. If our dispatch 1730 * waitqueue is no longer active, ensure that we run the queue 1731 * AFTER adding our entries back to the list. 1732 * 1733 * If no I/O scheduler has been configured it is possible that 1734 * the hardware queue got stopped and restarted before requests 1735 * were pushed back onto the dispatch list. Rerun the queue to 1736 * avoid starvation. Notes: 1737 * - blk_mq_run_hw_queue() checks whether or not a queue has 1738 * been stopped before rerunning a queue. 1739 * - Some but not all block drivers stop a queue before 1740 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq 1741 * and dm-rq. 1742 * 1743 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART 1744 * bit is set, run queue after a delay to avoid IO stalls 1745 * that could otherwise occur if the queue is idle. We'll do 1746 * similar if we couldn't get budget or couldn't lock a zone 1747 * and SCHED_RESTART is set. 1748 */ 1749 needs_restart = blk_mq_sched_needs_restart(hctx); 1750 if (prep == PREP_DISPATCH_NO_BUDGET) 1751 needs_resource = true; 1752 if (!needs_restart || 1753 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) 1754 blk_mq_run_hw_queue(hctx, true); 1755 else if (needs_restart && needs_resource) 1756 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); 1757 1758 blk_mq_update_dispatch_busy(hctx, true); 1759 return false; 1760 } else 1761 blk_mq_update_dispatch_busy(hctx, false); 1762 1763 return (queued + errors) != 0; 1764 } 1765 1766 /** 1767 * __blk_mq_run_hw_queue - Run a hardware queue. 1768 * @hctx: Pointer to the hardware queue to run. 1769 * 1770 * Send pending requests to the hardware. 1771 */ 1772 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) 1773 { 1774 int srcu_idx; 1775 1776 /* 1777 * We can't run the queue inline with ints disabled. Ensure that 1778 * we catch bad users of this early. 1779 */ 1780 WARN_ON_ONCE(in_interrupt()); 1781 1782 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); 1783 1784 hctx_lock(hctx, &srcu_idx); 1785 blk_mq_sched_dispatch_requests(hctx); 1786 hctx_unlock(hctx, srcu_idx); 1787 } 1788 1789 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) 1790 { 1791 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask); 1792 1793 if (cpu >= nr_cpu_ids) 1794 cpu = cpumask_first(hctx->cpumask); 1795 return cpu; 1796 } 1797 1798 /* 1799 * It'd be great if the workqueue API had a way to pass 1800 * in a mask and had some smarts for more clever placement. 1801 * For now we just round-robin here, switching for every 1802 * BLK_MQ_CPU_WORK_BATCH queued items. 1803 */ 1804 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) 1805 { 1806 bool tried = false; 1807 int next_cpu = hctx->next_cpu; 1808 1809 if (hctx->queue->nr_hw_queues == 1) 1810 return WORK_CPU_UNBOUND; 1811 1812 if (--hctx->next_cpu_batch <= 0) { 1813 select_cpu: 1814 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, 1815 cpu_online_mask); 1816 if (next_cpu >= nr_cpu_ids) 1817 next_cpu = blk_mq_first_mapped_cpu(hctx); 1818 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 1819 } 1820 1821 /* 1822 * Do unbound schedule if we can't find a online CPU for this hctx, 1823 * and it should only happen in the path of handling CPU DEAD. 1824 */ 1825 if (!cpu_online(next_cpu)) { 1826 if (!tried) { 1827 tried = true; 1828 goto select_cpu; 1829 } 1830 1831 /* 1832 * Make sure to re-select CPU next time once after CPUs 1833 * in hctx->cpumask become online again. 1834 */ 1835 hctx->next_cpu = next_cpu; 1836 hctx->next_cpu_batch = 1; 1837 return WORK_CPU_UNBOUND; 1838 } 1839 1840 hctx->next_cpu = next_cpu; 1841 return next_cpu; 1842 } 1843 1844 /** 1845 * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue. 1846 * @hctx: Pointer to the hardware queue to run. 1847 * @async: If we want to run the queue asynchronously. 1848 * @msecs: Milliseconds of delay to wait before running the queue. 1849 * 1850 * If !@async, try to run the queue now. Else, run the queue asynchronously and 1851 * with a delay of @msecs. 1852 */ 1853 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, 1854 unsigned long msecs) 1855 { 1856 if (unlikely(blk_mq_hctx_stopped(hctx))) 1857 return; 1858 1859 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { 1860 int cpu = get_cpu(); 1861 if (cpumask_test_cpu(cpu, hctx->cpumask)) { 1862 __blk_mq_run_hw_queue(hctx); 1863 put_cpu(); 1864 return; 1865 } 1866 1867 put_cpu(); 1868 } 1869 1870 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, 1871 msecs_to_jiffies(msecs)); 1872 } 1873 1874 /** 1875 * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously. 1876 * @hctx: Pointer to the hardware queue to run. 1877 * @msecs: Milliseconds of delay to wait before running the queue. 1878 * 1879 * Run a hardware queue asynchronously with a delay of @msecs. 1880 */ 1881 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) 1882 { 1883 __blk_mq_delay_run_hw_queue(hctx, true, msecs); 1884 } 1885 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue); 1886 1887 /** 1888 * blk_mq_run_hw_queue - Start to run a hardware queue. 1889 * @hctx: Pointer to the hardware queue to run. 1890 * @async: If we want to run the queue asynchronously. 1891 * 1892 * Check if the request queue is not in a quiesced state and if there are 1893 * pending requests to be sent. If this is true, run the queue to send requests 1894 * to hardware. 1895 */ 1896 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 1897 { 1898 int srcu_idx; 1899 bool need_run; 1900 1901 /* 1902 * When queue is quiesced, we may be switching io scheduler, or 1903 * updating nr_hw_queues, or other things, and we can't run queue 1904 * any more, even __blk_mq_hctx_has_pending() can't be called safely. 1905 * 1906 * And queue will be rerun in blk_mq_unquiesce_queue() if it is 1907 * quiesced. 1908 */ 1909 hctx_lock(hctx, &srcu_idx); 1910 need_run = !blk_queue_quiesced(hctx->queue) && 1911 blk_mq_hctx_has_pending(hctx); 1912 hctx_unlock(hctx, srcu_idx); 1913 1914 if (need_run) 1915 __blk_mq_delay_run_hw_queue(hctx, async, 0); 1916 } 1917 EXPORT_SYMBOL(blk_mq_run_hw_queue); 1918 1919 /* 1920 * Is the request queue handled by an IO scheduler that does not respect 1921 * hardware queues when dispatching? 1922 */ 1923 static bool blk_mq_has_sqsched(struct request_queue *q) 1924 { 1925 struct elevator_queue *e = q->elevator; 1926 1927 if (e && e->type->ops.dispatch_request && 1928 !(e->type->elevator_features & ELEVATOR_F_MQ_AWARE)) 1929 return true; 1930 return false; 1931 } 1932 1933 /* 1934 * Return prefered queue to dispatch from (if any) for non-mq aware IO 1935 * scheduler. 1936 */ 1937 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q) 1938 { 1939 struct blk_mq_hw_ctx *hctx; 1940 1941 /* 1942 * If the IO scheduler does not respect hardware queues when 1943 * dispatching, we just don't bother with multiple HW queues and 1944 * dispatch from hctx for the current CPU since running multiple queues 1945 * just causes lock contention inside the scheduler and pointless cache 1946 * bouncing. 1947 */ 1948 hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT, 1949 raw_smp_processor_id()); 1950 if (!blk_mq_hctx_stopped(hctx)) 1951 return hctx; 1952 return NULL; 1953 } 1954 1955 /** 1956 * blk_mq_run_hw_queues - Run all hardware queues in a request queue. 1957 * @q: Pointer to the request queue to run. 1958 * @async: If we want to run the queue asynchronously. 1959 */ 1960 void blk_mq_run_hw_queues(struct request_queue *q, bool async) 1961 { 1962 struct blk_mq_hw_ctx *hctx, *sq_hctx; 1963 int i; 1964 1965 sq_hctx = NULL; 1966 if (blk_mq_has_sqsched(q)) 1967 sq_hctx = blk_mq_get_sq_hctx(q); 1968 queue_for_each_hw_ctx(q, hctx, i) { 1969 if (blk_mq_hctx_stopped(hctx)) 1970 continue; 1971 /* 1972 * Dispatch from this hctx either if there's no hctx preferred 1973 * by IO scheduler or if it has requests that bypass the 1974 * scheduler. 1975 */ 1976 if (!sq_hctx || sq_hctx == hctx || 1977 !list_empty_careful(&hctx->dispatch)) 1978 blk_mq_run_hw_queue(hctx, async); 1979 } 1980 } 1981 EXPORT_SYMBOL(blk_mq_run_hw_queues); 1982 1983 /** 1984 * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously. 1985 * @q: Pointer to the request queue to run. 1986 * @msecs: Milliseconds of delay to wait before running the queues. 1987 */ 1988 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs) 1989 { 1990 struct blk_mq_hw_ctx *hctx, *sq_hctx; 1991 int i; 1992 1993 sq_hctx = NULL; 1994 if (blk_mq_has_sqsched(q)) 1995 sq_hctx = blk_mq_get_sq_hctx(q); 1996 queue_for_each_hw_ctx(q, hctx, i) { 1997 if (blk_mq_hctx_stopped(hctx)) 1998 continue; 1999 /* 2000 * Dispatch from this hctx either if there's no hctx preferred 2001 * by IO scheduler or if it has requests that bypass the 2002 * scheduler. 2003 */ 2004 if (!sq_hctx || sq_hctx == hctx || 2005 !list_empty_careful(&hctx->dispatch)) 2006 blk_mq_delay_run_hw_queue(hctx, msecs); 2007 } 2008 } 2009 EXPORT_SYMBOL(blk_mq_delay_run_hw_queues); 2010 2011 /** 2012 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped 2013 * @q: request queue. 2014 * 2015 * The caller is responsible for serializing this function against 2016 * blk_mq_{start,stop}_hw_queue(). 2017 */ 2018 bool blk_mq_queue_stopped(struct request_queue *q) 2019 { 2020 struct blk_mq_hw_ctx *hctx; 2021 int i; 2022 2023 queue_for_each_hw_ctx(q, hctx, i) 2024 if (blk_mq_hctx_stopped(hctx)) 2025 return true; 2026 2027 return false; 2028 } 2029 EXPORT_SYMBOL(blk_mq_queue_stopped); 2030 2031 /* 2032 * This function is often used for pausing .queue_rq() by driver when 2033 * there isn't enough resource or some conditions aren't satisfied, and 2034 * BLK_STS_RESOURCE is usually returned. 2035 * 2036 * We do not guarantee that dispatch can be drained or blocked 2037 * after blk_mq_stop_hw_queue() returns. Please use 2038 * blk_mq_quiesce_queue() for that requirement. 2039 */ 2040 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) 2041 { 2042 cancel_delayed_work(&hctx->run_work); 2043 2044 set_bit(BLK_MQ_S_STOPPED, &hctx->state); 2045 } 2046 EXPORT_SYMBOL(blk_mq_stop_hw_queue); 2047 2048 /* 2049 * This function is often used for pausing .queue_rq() by driver when 2050 * there isn't enough resource or some conditions aren't satisfied, and 2051 * BLK_STS_RESOURCE is usually returned. 2052 * 2053 * We do not guarantee that dispatch can be drained or blocked 2054 * after blk_mq_stop_hw_queues() returns. Please use 2055 * blk_mq_quiesce_queue() for that requirement. 2056 */ 2057 void blk_mq_stop_hw_queues(struct request_queue *q) 2058 { 2059 struct blk_mq_hw_ctx *hctx; 2060 int i; 2061 2062 queue_for_each_hw_ctx(q, hctx, i) 2063 blk_mq_stop_hw_queue(hctx); 2064 } 2065 EXPORT_SYMBOL(blk_mq_stop_hw_queues); 2066 2067 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) 2068 { 2069 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 2070 2071 blk_mq_run_hw_queue(hctx, false); 2072 } 2073 EXPORT_SYMBOL(blk_mq_start_hw_queue); 2074 2075 void blk_mq_start_hw_queues(struct request_queue *q) 2076 { 2077 struct blk_mq_hw_ctx *hctx; 2078 int i; 2079 2080 queue_for_each_hw_ctx(q, hctx, i) 2081 blk_mq_start_hw_queue(hctx); 2082 } 2083 EXPORT_SYMBOL(blk_mq_start_hw_queues); 2084 2085 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 2086 { 2087 if (!blk_mq_hctx_stopped(hctx)) 2088 return; 2089 2090 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 2091 blk_mq_run_hw_queue(hctx, async); 2092 } 2093 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue); 2094 2095 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) 2096 { 2097 struct blk_mq_hw_ctx *hctx; 2098 int i; 2099 2100 queue_for_each_hw_ctx(q, hctx, i) 2101 blk_mq_start_stopped_hw_queue(hctx, async); 2102 } 2103 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); 2104 2105 static void blk_mq_run_work_fn(struct work_struct *work) 2106 { 2107 struct blk_mq_hw_ctx *hctx; 2108 2109 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); 2110 2111 /* 2112 * If we are stopped, don't run the queue. 2113 */ 2114 if (blk_mq_hctx_stopped(hctx)) 2115 return; 2116 2117 __blk_mq_run_hw_queue(hctx); 2118 } 2119 2120 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, 2121 struct request *rq, 2122 bool at_head) 2123 { 2124 struct blk_mq_ctx *ctx = rq->mq_ctx; 2125 enum hctx_type type = hctx->type; 2126 2127 lockdep_assert_held(&ctx->lock); 2128 2129 trace_block_rq_insert(rq); 2130 2131 if (at_head) 2132 list_add(&rq->queuelist, &ctx->rq_lists[type]); 2133 else 2134 list_add_tail(&rq->queuelist, &ctx->rq_lists[type]); 2135 } 2136 2137 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 2138 bool at_head) 2139 { 2140 struct blk_mq_ctx *ctx = rq->mq_ctx; 2141 2142 lockdep_assert_held(&ctx->lock); 2143 2144 __blk_mq_insert_req_list(hctx, rq, at_head); 2145 blk_mq_hctx_mark_pending(hctx, ctx); 2146 } 2147 2148 /** 2149 * blk_mq_request_bypass_insert - Insert a request at dispatch list. 2150 * @rq: Pointer to request to be inserted. 2151 * @at_head: true if the request should be inserted at the head of the list. 2152 * @run_queue: If we should run the hardware queue after inserting the request. 2153 * 2154 * Should only be used carefully, when the caller knows we want to 2155 * bypass a potential IO scheduler on the target device. 2156 */ 2157 void blk_mq_request_bypass_insert(struct request *rq, bool at_head, 2158 bool run_queue) 2159 { 2160 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 2161 2162 spin_lock(&hctx->lock); 2163 if (at_head) 2164 list_add(&rq->queuelist, &hctx->dispatch); 2165 else 2166 list_add_tail(&rq->queuelist, &hctx->dispatch); 2167 spin_unlock(&hctx->lock); 2168 2169 if (run_queue) 2170 blk_mq_run_hw_queue(hctx, false); 2171 } 2172 2173 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 2174 struct list_head *list) 2175 2176 { 2177 struct request *rq; 2178 enum hctx_type type = hctx->type; 2179 2180 /* 2181 * preemption doesn't flush plug list, so it's possible ctx->cpu is 2182 * offline now 2183 */ 2184 list_for_each_entry(rq, list, queuelist) { 2185 BUG_ON(rq->mq_ctx != ctx); 2186 trace_block_rq_insert(rq); 2187 } 2188 2189 spin_lock(&ctx->lock); 2190 list_splice_tail_init(list, &ctx->rq_lists[type]); 2191 blk_mq_hctx_mark_pending(hctx, ctx); 2192 spin_unlock(&ctx->lock); 2193 } 2194 2195 static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued, 2196 bool from_schedule) 2197 { 2198 if (hctx->queue->mq_ops->commit_rqs) { 2199 trace_block_unplug(hctx->queue, *queued, !from_schedule); 2200 hctx->queue->mq_ops->commit_rqs(hctx); 2201 } 2202 *queued = 0; 2203 } 2204 2205 static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule) 2206 { 2207 struct blk_mq_hw_ctx *hctx = NULL; 2208 struct request *rq; 2209 int queued = 0; 2210 int errors = 0; 2211 2212 while ((rq = rq_list_pop(&plug->mq_list))) { 2213 bool last = rq_list_empty(plug->mq_list); 2214 blk_status_t ret; 2215 2216 if (hctx != rq->mq_hctx) { 2217 if (hctx) 2218 blk_mq_commit_rqs(hctx, &queued, from_schedule); 2219 hctx = rq->mq_hctx; 2220 } 2221 2222 ret = blk_mq_request_issue_directly(rq, last); 2223 switch (ret) { 2224 case BLK_STS_OK: 2225 queued++; 2226 break; 2227 case BLK_STS_RESOURCE: 2228 case BLK_STS_DEV_RESOURCE: 2229 blk_mq_request_bypass_insert(rq, false, last); 2230 blk_mq_commit_rqs(hctx, &queued, from_schedule); 2231 return; 2232 default: 2233 blk_mq_end_request(rq, ret); 2234 errors++; 2235 break; 2236 } 2237 } 2238 2239 /* 2240 * If we didn't flush the entire list, we could have told the driver 2241 * there was more coming, but that turned out to be a lie. 2242 */ 2243 if (errors) 2244 blk_mq_commit_rqs(hctx, &queued, from_schedule); 2245 } 2246 2247 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) 2248 { 2249 struct blk_mq_hw_ctx *this_hctx; 2250 struct blk_mq_ctx *this_ctx; 2251 unsigned int depth; 2252 LIST_HEAD(list); 2253 2254 if (rq_list_empty(plug->mq_list)) 2255 return; 2256 plug->rq_count = 0; 2257 2258 if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) { 2259 blk_mq_plug_issue_direct(plug, false); 2260 if (rq_list_empty(plug->mq_list)) 2261 return; 2262 } 2263 2264 this_hctx = NULL; 2265 this_ctx = NULL; 2266 depth = 0; 2267 do { 2268 struct request *rq; 2269 2270 rq = rq_list_pop(&plug->mq_list); 2271 2272 if (!this_hctx) { 2273 this_hctx = rq->mq_hctx; 2274 this_ctx = rq->mq_ctx; 2275 } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) { 2276 trace_block_unplug(this_hctx->queue, depth, 2277 !from_schedule); 2278 blk_mq_sched_insert_requests(this_hctx, this_ctx, 2279 &list, from_schedule); 2280 depth = 0; 2281 this_hctx = rq->mq_hctx; 2282 this_ctx = rq->mq_ctx; 2283 2284 } 2285 2286 list_add(&rq->queuelist, &list); 2287 depth++; 2288 } while (!rq_list_empty(plug->mq_list)); 2289 2290 if (!list_empty(&list)) { 2291 trace_block_unplug(this_hctx->queue, depth, !from_schedule); 2292 blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, 2293 from_schedule); 2294 } 2295 } 2296 2297 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, 2298 unsigned int nr_segs) 2299 { 2300 int err; 2301 2302 if (bio->bi_opf & REQ_RAHEAD) 2303 rq->cmd_flags |= REQ_FAILFAST_MASK; 2304 2305 rq->__sector = bio->bi_iter.bi_sector; 2306 rq->write_hint = bio->bi_write_hint; 2307 blk_rq_bio_prep(rq, bio, nr_segs); 2308 2309 /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */ 2310 err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO); 2311 WARN_ON_ONCE(err); 2312 2313 blk_account_io_start(rq); 2314 } 2315 2316 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, 2317 struct request *rq, bool last) 2318 { 2319 struct request_queue *q = rq->q; 2320 struct blk_mq_queue_data bd = { 2321 .rq = rq, 2322 .last = last, 2323 }; 2324 blk_status_t ret; 2325 2326 /* 2327 * For OK queue, we are done. For error, caller may kill it. 2328 * Any other error (busy), just add it to our list as we 2329 * previously would have done. 2330 */ 2331 ret = q->mq_ops->queue_rq(hctx, &bd); 2332 switch (ret) { 2333 case BLK_STS_OK: 2334 blk_mq_update_dispatch_busy(hctx, false); 2335 break; 2336 case BLK_STS_RESOURCE: 2337 case BLK_STS_DEV_RESOURCE: 2338 blk_mq_update_dispatch_busy(hctx, true); 2339 __blk_mq_requeue_request(rq); 2340 break; 2341 default: 2342 blk_mq_update_dispatch_busy(hctx, false); 2343 break; 2344 } 2345 2346 return ret; 2347 } 2348 2349 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 2350 struct request *rq, 2351 bool bypass_insert, bool last) 2352 { 2353 struct request_queue *q = rq->q; 2354 bool run_queue = true; 2355 int budget_token; 2356 2357 /* 2358 * RCU or SRCU read lock is needed before checking quiesced flag. 2359 * 2360 * When queue is stopped or quiesced, ignore 'bypass_insert' from 2361 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller, 2362 * and avoid driver to try to dispatch again. 2363 */ 2364 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { 2365 run_queue = false; 2366 bypass_insert = false; 2367 goto insert; 2368 } 2369 2370 if ((rq->rq_flags & RQF_ELV) && !bypass_insert) 2371 goto insert; 2372 2373 budget_token = blk_mq_get_dispatch_budget(q); 2374 if (budget_token < 0) 2375 goto insert; 2376 2377 blk_mq_set_rq_budget_token(rq, budget_token); 2378 2379 if (!blk_mq_get_driver_tag(rq)) { 2380 blk_mq_put_dispatch_budget(q, budget_token); 2381 goto insert; 2382 } 2383 2384 return __blk_mq_issue_directly(hctx, rq, last); 2385 insert: 2386 if (bypass_insert) 2387 return BLK_STS_RESOURCE; 2388 2389 blk_mq_sched_insert_request(rq, false, run_queue, false); 2390 2391 return BLK_STS_OK; 2392 } 2393 2394 /** 2395 * blk_mq_try_issue_directly - Try to send a request directly to device driver. 2396 * @hctx: Pointer of the associated hardware queue. 2397 * @rq: Pointer to request to be sent. 2398 * 2399 * If the device has enough resources to accept a new request now, send the 2400 * request directly to device driver. Else, insert at hctx->dispatch queue, so 2401 * we can try send it another time in the future. Requests inserted at this 2402 * queue have higher priority. 2403 */ 2404 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 2405 struct request *rq) 2406 { 2407 blk_status_t ret; 2408 int srcu_idx; 2409 2410 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); 2411 2412 hctx_lock(hctx, &srcu_idx); 2413 2414 ret = __blk_mq_try_issue_directly(hctx, rq, false, true); 2415 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) 2416 blk_mq_request_bypass_insert(rq, false, true); 2417 else if (ret != BLK_STS_OK) 2418 blk_mq_end_request(rq, ret); 2419 2420 hctx_unlock(hctx, srcu_idx); 2421 } 2422 2423 blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) 2424 { 2425 blk_status_t ret; 2426 int srcu_idx; 2427 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 2428 2429 hctx_lock(hctx, &srcu_idx); 2430 ret = __blk_mq_try_issue_directly(hctx, rq, true, last); 2431 hctx_unlock(hctx, srcu_idx); 2432 2433 return ret; 2434 } 2435 2436 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 2437 struct list_head *list) 2438 { 2439 int queued = 0; 2440 int errors = 0; 2441 2442 while (!list_empty(list)) { 2443 blk_status_t ret; 2444 struct request *rq = list_first_entry(list, struct request, 2445 queuelist); 2446 2447 list_del_init(&rq->queuelist); 2448 ret = blk_mq_request_issue_directly(rq, list_empty(list)); 2449 if (ret != BLK_STS_OK) { 2450 if (ret == BLK_STS_RESOURCE || 2451 ret == BLK_STS_DEV_RESOURCE) { 2452 blk_mq_request_bypass_insert(rq, false, 2453 list_empty(list)); 2454 break; 2455 } 2456 blk_mq_end_request(rq, ret); 2457 errors++; 2458 } else 2459 queued++; 2460 } 2461 2462 /* 2463 * If we didn't flush the entire list, we could have told 2464 * the driver there was more coming, but that turned out to 2465 * be a lie. 2466 */ 2467 if ((!list_empty(list) || errors) && 2468 hctx->queue->mq_ops->commit_rqs && queued) 2469 hctx->queue->mq_ops->commit_rqs(hctx); 2470 } 2471 2472 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) 2473 { 2474 if (!plug->multiple_queues) { 2475 struct request *nxt = rq_list_peek(&plug->mq_list); 2476 2477 if (nxt && nxt->q != rq->q) 2478 plug->multiple_queues = true; 2479 } 2480 if (!plug->has_elevator && (rq->rq_flags & RQF_ELV)) 2481 plug->has_elevator = true; 2482 rq->rq_next = NULL; 2483 rq_list_add(&plug->mq_list, rq); 2484 plug->rq_count++; 2485 } 2486 2487 /* 2488 * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple 2489 * queues. This is important for md arrays to benefit from merging 2490 * requests. 2491 */ 2492 static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug) 2493 { 2494 if (plug->multiple_queues) 2495 return BLK_MAX_REQUEST_COUNT * 2; 2496 return BLK_MAX_REQUEST_COUNT; 2497 } 2498 2499 static bool blk_mq_attempt_bio_merge(struct request_queue *q, 2500 struct bio *bio, unsigned int nr_segs, 2501 bool *same_queue_rq) 2502 { 2503 if (!blk_queue_nomerges(q) && bio_mergeable(bio)) { 2504 if (blk_attempt_plug_merge(q, bio, nr_segs, same_queue_rq)) 2505 return true; 2506 if (blk_mq_sched_bio_merge(q, bio, nr_segs)) 2507 return true; 2508 } 2509 return false; 2510 } 2511 2512 static struct request *blk_mq_get_new_requests(struct request_queue *q, 2513 struct blk_plug *plug, 2514 struct bio *bio, 2515 unsigned int nsegs, 2516 bool *same_queue_rq) 2517 { 2518 struct blk_mq_alloc_data data = { 2519 .q = q, 2520 .nr_tags = 1, 2521 .cmd_flags = bio->bi_opf, 2522 }; 2523 struct request *rq; 2524 2525 if (blk_mq_attempt_bio_merge(q, bio, nsegs, same_queue_rq)) 2526 return NULL; 2527 2528 rq_qos_throttle(q, bio); 2529 2530 if (plug) { 2531 data.nr_tags = plug->nr_ios; 2532 plug->nr_ios = 1; 2533 data.cached_rq = &plug->cached_rq; 2534 } 2535 2536 rq = __blk_mq_alloc_requests(&data); 2537 if (rq) 2538 return rq; 2539 2540 rq_qos_cleanup(q, bio); 2541 if (bio->bi_opf & REQ_NOWAIT) 2542 bio_wouldblock_error(bio); 2543 2544 return NULL; 2545 } 2546 2547 static inline bool blk_mq_can_use_cached_rq(struct request *rq, struct bio *bio) 2548 { 2549 if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type) 2550 return false; 2551 2552 if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf)) 2553 return false; 2554 2555 return true; 2556 } 2557 2558 static inline struct request *blk_mq_get_request(struct request_queue *q, 2559 struct blk_plug *plug, 2560 struct bio *bio, 2561 unsigned int nsegs, 2562 bool *same_queue_rq) 2563 { 2564 struct request *rq; 2565 bool checked = false; 2566 2567 if (plug) { 2568 rq = rq_list_peek(&plug->cached_rq); 2569 if (rq && rq->q == q) { 2570 if (unlikely(!submit_bio_checks(bio))) 2571 return NULL; 2572 if (blk_mq_attempt_bio_merge(q, bio, nsegs, 2573 same_queue_rq)) 2574 return NULL; 2575 checked = true; 2576 if (!blk_mq_can_use_cached_rq(rq, bio)) 2577 goto fallback; 2578 rq->cmd_flags = bio->bi_opf; 2579 plug->cached_rq = rq_list_next(rq); 2580 INIT_LIST_HEAD(&rq->queuelist); 2581 rq_qos_throttle(q, bio); 2582 return rq; 2583 } 2584 } 2585 2586 fallback: 2587 if (unlikely(bio_queue_enter(bio))) 2588 return NULL; 2589 if (unlikely(!checked && !submit_bio_checks(bio))) 2590 goto out_put; 2591 rq = blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq); 2592 if (rq) 2593 return rq; 2594 out_put: 2595 blk_queue_exit(q); 2596 return NULL; 2597 } 2598 2599 /** 2600 * blk_mq_submit_bio - Create and send a request to block device. 2601 * @bio: Bio pointer. 2602 * 2603 * Builds up a request structure from @q and @bio and send to the device. The 2604 * request may not be queued directly to hardware if: 2605 * * This request can be merged with another one 2606 * * We want to place request at plug queue for possible future merging 2607 * * There is an IO scheduler active at this queue 2608 * 2609 * It will not queue the request if there is an error with the bio, or at the 2610 * request creation. 2611 */ 2612 void blk_mq_submit_bio(struct bio *bio) 2613 { 2614 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 2615 const int is_sync = op_is_sync(bio->bi_opf); 2616 struct request *rq; 2617 struct blk_plug *plug; 2618 bool same_queue_rq = false; 2619 unsigned int nr_segs = 1; 2620 blk_status_t ret; 2621 2622 if (unlikely(!blk_crypto_bio_prep(&bio))) 2623 return; 2624 2625 blk_queue_bounce(q, &bio); 2626 if (blk_may_split(q, bio)) 2627 __blk_queue_split(q, &bio, &nr_segs); 2628 2629 if (!bio_integrity_prep(bio)) 2630 return; 2631 2632 plug = blk_mq_plug(q, bio); 2633 rq = blk_mq_get_request(q, plug, bio, nr_segs, &same_queue_rq); 2634 if (unlikely(!rq)) 2635 return; 2636 2637 trace_block_getrq(bio); 2638 2639 rq_qos_track(q, rq, bio); 2640 2641 blk_mq_bio_to_request(rq, bio, nr_segs); 2642 2643 ret = blk_crypto_init_request(rq); 2644 if (ret != BLK_STS_OK) { 2645 bio->bi_status = ret; 2646 bio_endio(bio); 2647 blk_mq_free_request(rq); 2648 return; 2649 } 2650 2651 if (op_is_flush(bio->bi_opf)) { 2652 blk_insert_flush(rq); 2653 return; 2654 } 2655 2656 if (plug && (q->nr_hw_queues == 1 || 2657 blk_mq_is_shared_tags(rq->mq_hctx->flags) || 2658 q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) { 2659 /* 2660 * Use plugging if we have a ->commit_rqs() hook as well, as 2661 * we know the driver uses bd->last in a smart fashion. 2662 * 2663 * Use normal plugging if this disk is slow HDD, as sequential 2664 * IO may benefit a lot from plug merging. 2665 */ 2666 unsigned int request_count = plug->rq_count; 2667 struct request *last = NULL; 2668 2669 if (!request_count) { 2670 trace_block_plug(q); 2671 } else if (!blk_queue_nomerges(q)) { 2672 last = rq_list_peek(&plug->mq_list); 2673 if (blk_rq_bytes(last) < BLK_PLUG_FLUSH_SIZE) 2674 last = NULL; 2675 } 2676 2677 if (request_count >= blk_plug_max_rq_count(plug) || last) { 2678 blk_mq_flush_plug_list(plug, false); 2679 trace_block_plug(q); 2680 } 2681 2682 blk_add_rq_to_plug(plug, rq); 2683 } else if (rq->rq_flags & RQF_ELV) { 2684 /* Insert the request at the IO scheduler queue */ 2685 blk_mq_sched_insert_request(rq, false, true, true); 2686 } else if (plug && !blk_queue_nomerges(q)) { 2687 struct request *next_rq = NULL; 2688 2689 /* 2690 * We do limited plugging. If the bio can be merged, do that. 2691 * Otherwise the existing request in the plug list will be 2692 * issued. So the plug list will have one request at most 2693 * The plug list might get flushed before this. If that happens, 2694 * the plug list is empty, and same_queue_rq is invalid. 2695 */ 2696 if (same_queue_rq) { 2697 next_rq = rq_list_pop(&plug->mq_list); 2698 plug->rq_count--; 2699 } 2700 blk_add_rq_to_plug(plug, rq); 2701 trace_block_plug(q); 2702 2703 if (next_rq) { 2704 trace_block_unplug(q, 1, true); 2705 blk_mq_try_issue_directly(next_rq->mq_hctx, next_rq); 2706 } 2707 } else if ((q->nr_hw_queues > 1 && is_sync) || 2708 !rq->mq_hctx->dispatch_busy) { 2709 /* 2710 * There is no scheduler and we can try to send directly 2711 * to the hardware. 2712 */ 2713 blk_mq_try_issue_directly(rq->mq_hctx, rq); 2714 } else { 2715 /* Default case. */ 2716 blk_mq_sched_insert_request(rq, false, true, true); 2717 } 2718 } 2719 2720 static size_t order_to_size(unsigned int order) 2721 { 2722 return (size_t)PAGE_SIZE << order; 2723 } 2724 2725 /* called before freeing request pool in @tags */ 2726 static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags, 2727 struct blk_mq_tags *tags) 2728 { 2729 struct page *page; 2730 unsigned long flags; 2731 2732 /* There is no need to clear a driver tags own mapping */ 2733 if (drv_tags == tags) 2734 return; 2735 2736 list_for_each_entry(page, &tags->page_list, lru) { 2737 unsigned long start = (unsigned long)page_address(page); 2738 unsigned long end = start + order_to_size(page->private); 2739 int i; 2740 2741 for (i = 0; i < drv_tags->nr_tags; i++) { 2742 struct request *rq = drv_tags->rqs[i]; 2743 unsigned long rq_addr = (unsigned long)rq; 2744 2745 if (rq_addr >= start && rq_addr < end) { 2746 WARN_ON_ONCE(refcount_read(&rq->ref) != 0); 2747 cmpxchg(&drv_tags->rqs[i], rq, NULL); 2748 } 2749 } 2750 } 2751 2752 /* 2753 * Wait until all pending iteration is done. 2754 * 2755 * Request reference is cleared and it is guaranteed to be observed 2756 * after the ->lock is released. 2757 */ 2758 spin_lock_irqsave(&drv_tags->lock, flags); 2759 spin_unlock_irqrestore(&drv_tags->lock, flags); 2760 } 2761 2762 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 2763 unsigned int hctx_idx) 2764 { 2765 struct blk_mq_tags *drv_tags; 2766 struct page *page; 2767 2768 if (blk_mq_is_shared_tags(set->flags)) 2769 drv_tags = set->shared_tags; 2770 else 2771 drv_tags = set->tags[hctx_idx]; 2772 2773 if (tags->static_rqs && set->ops->exit_request) { 2774 int i; 2775 2776 for (i = 0; i < tags->nr_tags; i++) { 2777 struct request *rq = tags->static_rqs[i]; 2778 2779 if (!rq) 2780 continue; 2781 set->ops->exit_request(set, rq, hctx_idx); 2782 tags->static_rqs[i] = NULL; 2783 } 2784 } 2785 2786 blk_mq_clear_rq_mapping(drv_tags, tags); 2787 2788 while (!list_empty(&tags->page_list)) { 2789 page = list_first_entry(&tags->page_list, struct page, lru); 2790 list_del_init(&page->lru); 2791 /* 2792 * Remove kmemleak object previously allocated in 2793 * blk_mq_alloc_rqs(). 2794 */ 2795 kmemleak_free(page_address(page)); 2796 __free_pages(page, page->private); 2797 } 2798 } 2799 2800 void blk_mq_free_rq_map(struct blk_mq_tags *tags) 2801 { 2802 kfree(tags->rqs); 2803 tags->rqs = NULL; 2804 kfree(tags->static_rqs); 2805 tags->static_rqs = NULL; 2806 2807 blk_mq_free_tags(tags); 2808 } 2809 2810 static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, 2811 unsigned int hctx_idx, 2812 unsigned int nr_tags, 2813 unsigned int reserved_tags) 2814 { 2815 struct blk_mq_tags *tags; 2816 int node; 2817 2818 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx); 2819 if (node == NUMA_NO_NODE) 2820 node = set->numa_node; 2821 2822 tags = blk_mq_init_tags(nr_tags, reserved_tags, node, 2823 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); 2824 if (!tags) 2825 return NULL; 2826 2827 tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *), 2828 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 2829 node); 2830 if (!tags->rqs) { 2831 blk_mq_free_tags(tags); 2832 return NULL; 2833 } 2834 2835 tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *), 2836 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 2837 node); 2838 if (!tags->static_rqs) { 2839 kfree(tags->rqs); 2840 blk_mq_free_tags(tags); 2841 return NULL; 2842 } 2843 2844 return tags; 2845 } 2846 2847 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, 2848 unsigned int hctx_idx, int node) 2849 { 2850 int ret; 2851 2852 if (set->ops->init_request) { 2853 ret = set->ops->init_request(set, rq, hctx_idx, node); 2854 if (ret) 2855 return ret; 2856 } 2857 2858 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 2859 return 0; 2860 } 2861 2862 static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, 2863 struct blk_mq_tags *tags, 2864 unsigned int hctx_idx, unsigned int depth) 2865 { 2866 unsigned int i, j, entries_per_page, max_order = 4; 2867 size_t rq_size, left; 2868 int node; 2869 2870 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx); 2871 if (node == NUMA_NO_NODE) 2872 node = set->numa_node; 2873 2874 INIT_LIST_HEAD(&tags->page_list); 2875 2876 /* 2877 * rq_size is the size of the request plus driver payload, rounded 2878 * to the cacheline size 2879 */ 2880 rq_size = round_up(sizeof(struct request) + set->cmd_size, 2881 cache_line_size()); 2882 left = rq_size * depth; 2883 2884 for (i = 0; i < depth; ) { 2885 int this_order = max_order; 2886 struct page *page; 2887 int to_do; 2888 void *p; 2889 2890 while (this_order && left < order_to_size(this_order - 1)) 2891 this_order--; 2892 2893 do { 2894 page = alloc_pages_node(node, 2895 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, 2896 this_order); 2897 if (page) 2898 break; 2899 if (!this_order--) 2900 break; 2901 if (order_to_size(this_order) < rq_size) 2902 break; 2903 } while (1); 2904 2905 if (!page) 2906 goto fail; 2907 2908 page->private = this_order; 2909 list_add_tail(&page->lru, &tags->page_list); 2910 2911 p = page_address(page); 2912 /* 2913 * Allow kmemleak to scan these pages as they contain pointers 2914 * to additional allocations like via ops->init_request(). 2915 */ 2916 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO); 2917 entries_per_page = order_to_size(this_order) / rq_size; 2918 to_do = min(entries_per_page, depth - i); 2919 left -= to_do * rq_size; 2920 for (j = 0; j < to_do; j++) { 2921 struct request *rq = p; 2922 2923 tags->static_rqs[i] = rq; 2924 if (blk_mq_init_request(set, rq, hctx_idx, node)) { 2925 tags->static_rqs[i] = NULL; 2926 goto fail; 2927 } 2928 2929 p += rq_size; 2930 i++; 2931 } 2932 } 2933 return 0; 2934 2935 fail: 2936 blk_mq_free_rqs(set, tags, hctx_idx); 2937 return -ENOMEM; 2938 } 2939 2940 struct rq_iter_data { 2941 struct blk_mq_hw_ctx *hctx; 2942 bool has_rq; 2943 }; 2944 2945 static bool blk_mq_has_request(struct request *rq, void *data, bool reserved) 2946 { 2947 struct rq_iter_data *iter_data = data; 2948 2949 if (rq->mq_hctx != iter_data->hctx) 2950 return true; 2951 iter_data->has_rq = true; 2952 return false; 2953 } 2954 2955 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx) 2956 { 2957 struct blk_mq_tags *tags = hctx->sched_tags ? 2958 hctx->sched_tags : hctx->tags; 2959 struct rq_iter_data data = { 2960 .hctx = hctx, 2961 }; 2962 2963 blk_mq_all_tag_iter(tags, blk_mq_has_request, &data); 2964 return data.has_rq; 2965 } 2966 2967 static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu, 2968 struct blk_mq_hw_ctx *hctx) 2969 { 2970 if (cpumask_next_and(-1, hctx->cpumask, cpu_online_mask) != cpu) 2971 return false; 2972 if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids) 2973 return false; 2974 return true; 2975 } 2976 2977 static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node) 2978 { 2979 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, 2980 struct blk_mq_hw_ctx, cpuhp_online); 2981 2982 if (!cpumask_test_cpu(cpu, hctx->cpumask) || 2983 !blk_mq_last_cpu_in_hctx(cpu, hctx)) 2984 return 0; 2985 2986 /* 2987 * Prevent new request from being allocated on the current hctx. 2988 * 2989 * The smp_mb__after_atomic() Pairs with the implied barrier in 2990 * test_and_set_bit_lock in sbitmap_get(). Ensures the inactive flag is 2991 * seen once we return from the tag allocator. 2992 */ 2993 set_bit(BLK_MQ_S_INACTIVE, &hctx->state); 2994 smp_mb__after_atomic(); 2995 2996 /* 2997 * Try to grab a reference to the queue and wait for any outstanding 2998 * requests. If we could not grab a reference the queue has been 2999 * frozen and there are no requests. 3000 */ 3001 if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) { 3002 while (blk_mq_hctx_has_requests(hctx)) 3003 msleep(5); 3004 percpu_ref_put(&hctx->queue->q_usage_counter); 3005 } 3006 3007 return 0; 3008 } 3009 3010 static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node) 3011 { 3012 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, 3013 struct blk_mq_hw_ctx, cpuhp_online); 3014 3015 if (cpumask_test_cpu(cpu, hctx->cpumask)) 3016 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state); 3017 return 0; 3018 } 3019 3020 /* 3021 * 'cpu' is going away. splice any existing rq_list entries from this 3022 * software queue to the hw queue dispatch list, and ensure that it 3023 * gets run. 3024 */ 3025 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node) 3026 { 3027 struct blk_mq_hw_ctx *hctx; 3028 struct blk_mq_ctx *ctx; 3029 LIST_HEAD(tmp); 3030 enum hctx_type type; 3031 3032 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead); 3033 if (!cpumask_test_cpu(cpu, hctx->cpumask)) 3034 return 0; 3035 3036 ctx = __blk_mq_get_ctx(hctx->queue, cpu); 3037 type = hctx->type; 3038 3039 spin_lock(&ctx->lock); 3040 if (!list_empty(&ctx->rq_lists[type])) { 3041 list_splice_init(&ctx->rq_lists[type], &tmp); 3042 blk_mq_hctx_clear_pending(hctx, ctx); 3043 } 3044 spin_unlock(&ctx->lock); 3045 3046 if (list_empty(&tmp)) 3047 return 0; 3048 3049 spin_lock(&hctx->lock); 3050 list_splice_tail_init(&tmp, &hctx->dispatch); 3051 spin_unlock(&hctx->lock); 3052 3053 blk_mq_run_hw_queue(hctx, true); 3054 return 0; 3055 } 3056 3057 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) 3058 { 3059 if (!(hctx->flags & BLK_MQ_F_STACKING)) 3060 cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, 3061 &hctx->cpuhp_online); 3062 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD, 3063 &hctx->cpuhp_dead); 3064 } 3065 3066 /* 3067 * Before freeing hw queue, clearing the flush request reference in 3068 * tags->rqs[] for avoiding potential UAF. 3069 */ 3070 static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags, 3071 unsigned int queue_depth, struct request *flush_rq) 3072 { 3073 int i; 3074 unsigned long flags; 3075 3076 /* The hw queue may not be mapped yet */ 3077 if (!tags) 3078 return; 3079 3080 WARN_ON_ONCE(refcount_read(&flush_rq->ref) != 0); 3081 3082 for (i = 0; i < queue_depth; i++) 3083 cmpxchg(&tags->rqs[i], flush_rq, NULL); 3084 3085 /* 3086 * Wait until all pending iteration is done. 3087 * 3088 * Request reference is cleared and it is guaranteed to be observed 3089 * after the ->lock is released. 3090 */ 3091 spin_lock_irqsave(&tags->lock, flags); 3092 spin_unlock_irqrestore(&tags->lock, flags); 3093 } 3094 3095 /* hctx->ctxs will be freed in queue's release handler */ 3096 static void blk_mq_exit_hctx(struct request_queue *q, 3097 struct blk_mq_tag_set *set, 3098 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 3099 { 3100 struct request *flush_rq = hctx->fq->flush_rq; 3101 3102 if (blk_mq_hw_queue_mapped(hctx)) 3103 blk_mq_tag_idle(hctx); 3104 3105 blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx], 3106 set->queue_depth, flush_rq); 3107 if (set->ops->exit_request) 3108 set->ops->exit_request(set, flush_rq, hctx_idx); 3109 3110 if (set->ops->exit_hctx) 3111 set->ops->exit_hctx(hctx, hctx_idx); 3112 3113 blk_mq_remove_cpuhp(hctx); 3114 3115 spin_lock(&q->unused_hctx_lock); 3116 list_add(&hctx->hctx_list, &q->unused_hctx_list); 3117 spin_unlock(&q->unused_hctx_lock); 3118 } 3119 3120 static void blk_mq_exit_hw_queues(struct request_queue *q, 3121 struct blk_mq_tag_set *set, int nr_queue) 3122 { 3123 struct blk_mq_hw_ctx *hctx; 3124 unsigned int i; 3125 3126 queue_for_each_hw_ctx(q, hctx, i) { 3127 if (i == nr_queue) 3128 break; 3129 blk_mq_debugfs_unregister_hctx(hctx); 3130 blk_mq_exit_hctx(q, set, hctx, i); 3131 } 3132 } 3133 3134 static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set) 3135 { 3136 int hw_ctx_size = sizeof(struct blk_mq_hw_ctx); 3137 3138 BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu), 3139 __alignof__(struct blk_mq_hw_ctx)) != 3140 sizeof(struct blk_mq_hw_ctx)); 3141 3142 if (tag_set->flags & BLK_MQ_F_BLOCKING) 3143 hw_ctx_size += sizeof(struct srcu_struct); 3144 3145 return hw_ctx_size; 3146 } 3147 3148 static int blk_mq_init_hctx(struct request_queue *q, 3149 struct blk_mq_tag_set *set, 3150 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) 3151 { 3152 hctx->queue_num = hctx_idx; 3153 3154 if (!(hctx->flags & BLK_MQ_F_STACKING)) 3155 cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, 3156 &hctx->cpuhp_online); 3157 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); 3158 3159 hctx->tags = set->tags[hctx_idx]; 3160 3161 if (set->ops->init_hctx && 3162 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) 3163 goto unregister_cpu_notifier; 3164 3165 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, 3166 hctx->numa_node)) 3167 goto exit_hctx; 3168 return 0; 3169 3170 exit_hctx: 3171 if (set->ops->exit_hctx) 3172 set->ops->exit_hctx(hctx, hctx_idx); 3173 unregister_cpu_notifier: 3174 blk_mq_remove_cpuhp(hctx); 3175 return -1; 3176 } 3177 3178 static struct blk_mq_hw_ctx * 3179 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, 3180 int node) 3181 { 3182 struct blk_mq_hw_ctx *hctx; 3183 gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY; 3184 3185 hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node); 3186 if (!hctx) 3187 goto fail_alloc_hctx; 3188 3189 if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node)) 3190 goto free_hctx; 3191 3192 atomic_set(&hctx->nr_active, 0); 3193 if (node == NUMA_NO_NODE) 3194 node = set->numa_node; 3195 hctx->numa_node = node; 3196 3197 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); 3198 spin_lock_init(&hctx->lock); 3199 INIT_LIST_HEAD(&hctx->dispatch); 3200 hctx->queue = q; 3201 hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED; 3202 3203 INIT_LIST_HEAD(&hctx->hctx_list); 3204 3205 /* 3206 * Allocate space for all possible cpus to avoid allocation at 3207 * runtime 3208 */ 3209 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), 3210 gfp, node); 3211 if (!hctx->ctxs) 3212 goto free_cpumask; 3213 3214 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), 3215 gfp, node, false, false)) 3216 goto free_ctxs; 3217 hctx->nr_ctx = 0; 3218 3219 spin_lock_init(&hctx->dispatch_wait_lock); 3220 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); 3221 INIT_LIST_HEAD(&hctx->dispatch_wait.entry); 3222 3223 hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp); 3224 if (!hctx->fq) 3225 goto free_bitmap; 3226 3227 if (hctx->flags & BLK_MQ_F_BLOCKING) 3228 init_srcu_struct(hctx->srcu); 3229 blk_mq_hctx_kobj_init(hctx); 3230 3231 return hctx; 3232 3233 free_bitmap: 3234 sbitmap_free(&hctx->ctx_map); 3235 free_ctxs: 3236 kfree(hctx->ctxs); 3237 free_cpumask: 3238 free_cpumask_var(hctx->cpumask); 3239 free_hctx: 3240 kfree(hctx); 3241 fail_alloc_hctx: 3242 return NULL; 3243 } 3244 3245 static void blk_mq_init_cpu_queues(struct request_queue *q, 3246 unsigned int nr_hw_queues) 3247 { 3248 struct blk_mq_tag_set *set = q->tag_set; 3249 unsigned int i, j; 3250 3251 for_each_possible_cpu(i) { 3252 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); 3253 struct blk_mq_hw_ctx *hctx; 3254 int k; 3255 3256 __ctx->cpu = i; 3257 spin_lock_init(&__ctx->lock); 3258 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++) 3259 INIT_LIST_HEAD(&__ctx->rq_lists[k]); 3260 3261 __ctx->queue = q; 3262 3263 /* 3264 * Set local node, IFF we have more than one hw queue. If 3265 * not, we remain on the home node of the device 3266 */ 3267 for (j = 0; j < set->nr_maps; j++) { 3268 hctx = blk_mq_map_queue_type(q, j, i); 3269 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) 3270 hctx->numa_node = cpu_to_node(i); 3271 } 3272 } 3273 } 3274 3275 struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, 3276 unsigned int hctx_idx, 3277 unsigned int depth) 3278 { 3279 struct blk_mq_tags *tags; 3280 int ret; 3281 3282 tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags); 3283 if (!tags) 3284 return NULL; 3285 3286 ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth); 3287 if (ret) { 3288 blk_mq_free_rq_map(tags); 3289 return NULL; 3290 } 3291 3292 return tags; 3293 } 3294 3295 static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, 3296 int hctx_idx) 3297 { 3298 if (blk_mq_is_shared_tags(set->flags)) { 3299 set->tags[hctx_idx] = set->shared_tags; 3300 3301 return true; 3302 } 3303 3304 set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx, 3305 set->queue_depth); 3306 3307 return set->tags[hctx_idx]; 3308 } 3309 3310 void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, 3311 struct blk_mq_tags *tags, 3312 unsigned int hctx_idx) 3313 { 3314 if (tags) { 3315 blk_mq_free_rqs(set, tags, hctx_idx); 3316 blk_mq_free_rq_map(tags); 3317 } 3318 } 3319 3320 static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, 3321 unsigned int hctx_idx) 3322 { 3323 if (!blk_mq_is_shared_tags(set->flags)) 3324 blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx); 3325 3326 set->tags[hctx_idx] = NULL; 3327 } 3328 3329 static void blk_mq_map_swqueue(struct request_queue *q) 3330 { 3331 unsigned int i, j, hctx_idx; 3332 struct blk_mq_hw_ctx *hctx; 3333 struct blk_mq_ctx *ctx; 3334 struct blk_mq_tag_set *set = q->tag_set; 3335 3336 queue_for_each_hw_ctx(q, hctx, i) { 3337 cpumask_clear(hctx->cpumask); 3338 hctx->nr_ctx = 0; 3339 hctx->dispatch_from = NULL; 3340 } 3341 3342 /* 3343 * Map software to hardware queues. 3344 * 3345 * If the cpu isn't present, the cpu is mapped to first hctx. 3346 */ 3347 for_each_possible_cpu(i) { 3348 3349 ctx = per_cpu_ptr(q->queue_ctx, i); 3350 for (j = 0; j < set->nr_maps; j++) { 3351 if (!set->map[j].nr_queues) { 3352 ctx->hctxs[j] = blk_mq_map_queue_type(q, 3353 HCTX_TYPE_DEFAULT, i); 3354 continue; 3355 } 3356 hctx_idx = set->map[j].mq_map[i]; 3357 /* unmapped hw queue can be remapped after CPU topo changed */ 3358 if (!set->tags[hctx_idx] && 3359 !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) { 3360 /* 3361 * If tags initialization fail for some hctx, 3362 * that hctx won't be brought online. In this 3363 * case, remap the current ctx to hctx[0] which 3364 * is guaranteed to always have tags allocated 3365 */ 3366 set->map[j].mq_map[i] = 0; 3367 } 3368 3369 hctx = blk_mq_map_queue_type(q, j, i); 3370 ctx->hctxs[j] = hctx; 3371 /* 3372 * If the CPU is already set in the mask, then we've 3373 * mapped this one already. This can happen if 3374 * devices share queues across queue maps. 3375 */ 3376 if (cpumask_test_cpu(i, hctx->cpumask)) 3377 continue; 3378 3379 cpumask_set_cpu(i, hctx->cpumask); 3380 hctx->type = j; 3381 ctx->index_hw[hctx->type] = hctx->nr_ctx; 3382 hctx->ctxs[hctx->nr_ctx++] = ctx; 3383 3384 /* 3385 * If the nr_ctx type overflows, we have exceeded the 3386 * amount of sw queues we can support. 3387 */ 3388 BUG_ON(!hctx->nr_ctx); 3389 } 3390 3391 for (; j < HCTX_MAX_TYPES; j++) 3392 ctx->hctxs[j] = blk_mq_map_queue_type(q, 3393 HCTX_TYPE_DEFAULT, i); 3394 } 3395 3396 queue_for_each_hw_ctx(q, hctx, i) { 3397 /* 3398 * If no software queues are mapped to this hardware queue, 3399 * disable it and free the request entries. 3400 */ 3401 if (!hctx->nr_ctx) { 3402 /* Never unmap queue 0. We need it as a 3403 * fallback in case of a new remap fails 3404 * allocation 3405 */ 3406 if (i) 3407 __blk_mq_free_map_and_rqs(set, i); 3408 3409 hctx->tags = NULL; 3410 continue; 3411 } 3412 3413 hctx->tags = set->tags[i]; 3414 WARN_ON(!hctx->tags); 3415 3416 /* 3417 * Set the map size to the number of mapped software queues. 3418 * This is more accurate and more efficient than looping 3419 * over all possibly mapped software queues. 3420 */ 3421 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx); 3422 3423 /* 3424 * Initialize batch roundrobin counts 3425 */ 3426 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); 3427 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 3428 } 3429 } 3430 3431 /* 3432 * Caller needs to ensure that we're either frozen/quiesced, or that 3433 * the queue isn't live yet. 3434 */ 3435 static void queue_set_hctx_shared(struct request_queue *q, bool shared) 3436 { 3437 struct blk_mq_hw_ctx *hctx; 3438 int i; 3439 3440 queue_for_each_hw_ctx(q, hctx, i) { 3441 if (shared) { 3442 hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; 3443 } else { 3444 blk_mq_tag_idle(hctx); 3445 hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; 3446 } 3447 } 3448 } 3449 3450 static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set, 3451 bool shared) 3452 { 3453 struct request_queue *q; 3454 3455 lockdep_assert_held(&set->tag_list_lock); 3456 3457 list_for_each_entry(q, &set->tag_list, tag_set_list) { 3458 blk_mq_freeze_queue(q); 3459 queue_set_hctx_shared(q, shared); 3460 blk_mq_unfreeze_queue(q); 3461 } 3462 } 3463 3464 static void blk_mq_del_queue_tag_set(struct request_queue *q) 3465 { 3466 struct blk_mq_tag_set *set = q->tag_set; 3467 3468 mutex_lock(&set->tag_list_lock); 3469 list_del(&q->tag_set_list); 3470 if (list_is_singular(&set->tag_list)) { 3471 /* just transitioned to unshared */ 3472 set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; 3473 /* update existing queue */ 3474 blk_mq_update_tag_set_shared(set, false); 3475 } 3476 mutex_unlock(&set->tag_list_lock); 3477 INIT_LIST_HEAD(&q->tag_set_list); 3478 } 3479 3480 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, 3481 struct request_queue *q) 3482 { 3483 mutex_lock(&set->tag_list_lock); 3484 3485 /* 3486 * Check to see if we're transitioning to shared (from 1 to 2 queues). 3487 */ 3488 if (!list_empty(&set->tag_list) && 3489 !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { 3490 set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; 3491 /* update existing queue */ 3492 blk_mq_update_tag_set_shared(set, true); 3493 } 3494 if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED) 3495 queue_set_hctx_shared(q, true); 3496 list_add_tail(&q->tag_set_list, &set->tag_list); 3497 3498 mutex_unlock(&set->tag_list_lock); 3499 } 3500 3501 /* All allocations will be freed in release handler of q->mq_kobj */ 3502 static int blk_mq_alloc_ctxs(struct request_queue *q) 3503 { 3504 struct blk_mq_ctxs *ctxs; 3505 int cpu; 3506 3507 ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL); 3508 if (!ctxs) 3509 return -ENOMEM; 3510 3511 ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx); 3512 if (!ctxs->queue_ctx) 3513 goto fail; 3514 3515 for_each_possible_cpu(cpu) { 3516 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu); 3517 ctx->ctxs = ctxs; 3518 } 3519 3520 q->mq_kobj = &ctxs->kobj; 3521 q->queue_ctx = ctxs->queue_ctx; 3522 3523 return 0; 3524 fail: 3525 kfree(ctxs); 3526 return -ENOMEM; 3527 } 3528 3529 /* 3530 * It is the actual release handler for mq, but we do it from 3531 * request queue's release handler for avoiding use-after-free 3532 * and headache because q->mq_kobj shouldn't have been introduced, 3533 * but we can't group ctx/kctx kobj without it. 3534 */ 3535 void blk_mq_release(struct request_queue *q) 3536 { 3537 struct blk_mq_hw_ctx *hctx, *next; 3538 int i; 3539 3540 queue_for_each_hw_ctx(q, hctx, i) 3541 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list)); 3542 3543 /* all hctx are in .unused_hctx_list now */ 3544 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) { 3545 list_del_init(&hctx->hctx_list); 3546 kobject_put(&hctx->kobj); 3547 } 3548 3549 kfree(q->queue_hw_ctx); 3550 3551 /* 3552 * release .mq_kobj and sw queue's kobject now because 3553 * both share lifetime with request queue. 3554 */ 3555 blk_mq_sysfs_deinit(q); 3556 } 3557 3558 static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set, 3559 void *queuedata) 3560 { 3561 struct request_queue *q; 3562 int ret; 3563 3564 q = blk_alloc_queue(set->numa_node); 3565 if (!q) 3566 return ERR_PTR(-ENOMEM); 3567 q->queuedata = queuedata; 3568 ret = blk_mq_init_allocated_queue(set, q); 3569 if (ret) { 3570 blk_cleanup_queue(q); 3571 return ERR_PTR(ret); 3572 } 3573 return q; 3574 } 3575 3576 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) 3577 { 3578 return blk_mq_init_queue_data(set, NULL); 3579 } 3580 EXPORT_SYMBOL(blk_mq_init_queue); 3581 3582 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata, 3583 struct lock_class_key *lkclass) 3584 { 3585 struct request_queue *q; 3586 struct gendisk *disk; 3587 3588 q = blk_mq_init_queue_data(set, queuedata); 3589 if (IS_ERR(q)) 3590 return ERR_CAST(q); 3591 3592 disk = __alloc_disk_node(q, set->numa_node, lkclass); 3593 if (!disk) { 3594 blk_cleanup_queue(q); 3595 return ERR_PTR(-ENOMEM); 3596 } 3597 return disk; 3598 } 3599 EXPORT_SYMBOL(__blk_mq_alloc_disk); 3600 3601 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx( 3602 struct blk_mq_tag_set *set, struct request_queue *q, 3603 int hctx_idx, int node) 3604 { 3605 struct blk_mq_hw_ctx *hctx = NULL, *tmp; 3606 3607 /* reuse dead hctx first */ 3608 spin_lock(&q->unused_hctx_lock); 3609 list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) { 3610 if (tmp->numa_node == node) { 3611 hctx = tmp; 3612 break; 3613 } 3614 } 3615 if (hctx) 3616 list_del_init(&hctx->hctx_list); 3617 spin_unlock(&q->unused_hctx_lock); 3618 3619 if (!hctx) 3620 hctx = blk_mq_alloc_hctx(q, set, node); 3621 if (!hctx) 3622 goto fail; 3623 3624 if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) 3625 goto free_hctx; 3626 3627 return hctx; 3628 3629 free_hctx: 3630 kobject_put(&hctx->kobj); 3631 fail: 3632 return NULL; 3633 } 3634 3635 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, 3636 struct request_queue *q) 3637 { 3638 int i, j, end; 3639 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; 3640 3641 if (q->nr_hw_queues < set->nr_hw_queues) { 3642 struct blk_mq_hw_ctx **new_hctxs; 3643 3644 new_hctxs = kcalloc_node(set->nr_hw_queues, 3645 sizeof(*new_hctxs), GFP_KERNEL, 3646 set->numa_node); 3647 if (!new_hctxs) 3648 return; 3649 if (hctxs) 3650 memcpy(new_hctxs, hctxs, q->nr_hw_queues * 3651 sizeof(*hctxs)); 3652 q->queue_hw_ctx = new_hctxs; 3653 kfree(hctxs); 3654 hctxs = new_hctxs; 3655 } 3656 3657 /* protect against switching io scheduler */ 3658 mutex_lock(&q->sysfs_lock); 3659 for (i = 0; i < set->nr_hw_queues; i++) { 3660 int node; 3661 struct blk_mq_hw_ctx *hctx; 3662 3663 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], i); 3664 /* 3665 * If the hw queue has been mapped to another numa node, 3666 * we need to realloc the hctx. If allocation fails, fallback 3667 * to use the previous one. 3668 */ 3669 if (hctxs[i] && (hctxs[i]->numa_node == node)) 3670 continue; 3671 3672 hctx = blk_mq_alloc_and_init_hctx(set, q, i, node); 3673 if (hctx) { 3674 if (hctxs[i]) 3675 blk_mq_exit_hctx(q, set, hctxs[i], i); 3676 hctxs[i] = hctx; 3677 } else { 3678 if (hctxs[i]) 3679 pr_warn("Allocate new hctx on node %d fails,\ 3680 fallback to previous one on node %d\n", 3681 node, hctxs[i]->numa_node); 3682 else 3683 break; 3684 } 3685 } 3686 /* 3687 * Increasing nr_hw_queues fails. Free the newly allocated 3688 * hctxs and keep the previous q->nr_hw_queues. 3689 */ 3690 if (i != set->nr_hw_queues) { 3691 j = q->nr_hw_queues; 3692 end = i; 3693 } else { 3694 j = i; 3695 end = q->nr_hw_queues; 3696 q->nr_hw_queues = set->nr_hw_queues; 3697 } 3698 3699 for (; j < end; j++) { 3700 struct blk_mq_hw_ctx *hctx = hctxs[j]; 3701 3702 if (hctx) { 3703 blk_mq_exit_hctx(q, set, hctx, j); 3704 hctxs[j] = NULL; 3705 } 3706 } 3707 mutex_unlock(&q->sysfs_lock); 3708 } 3709 3710 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 3711 struct request_queue *q) 3712 { 3713 /* mark the queue as mq asap */ 3714 q->mq_ops = set->ops; 3715 3716 q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn, 3717 blk_mq_poll_stats_bkt, 3718 BLK_MQ_POLL_STATS_BKTS, q); 3719 if (!q->poll_cb) 3720 goto err_exit; 3721 3722 if (blk_mq_alloc_ctxs(q)) 3723 goto err_poll; 3724 3725 /* init q->mq_kobj and sw queues' kobjects */ 3726 blk_mq_sysfs_init(q); 3727 3728 INIT_LIST_HEAD(&q->unused_hctx_list); 3729 spin_lock_init(&q->unused_hctx_lock); 3730 3731 blk_mq_realloc_hw_ctxs(set, q); 3732 if (!q->nr_hw_queues) 3733 goto err_hctxs; 3734 3735 INIT_WORK(&q->timeout_work, blk_mq_timeout_work); 3736 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); 3737 3738 q->tag_set = set; 3739 3740 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; 3741 if (set->nr_maps > HCTX_TYPE_POLL && 3742 set->map[HCTX_TYPE_POLL].nr_queues) 3743 blk_queue_flag_set(QUEUE_FLAG_POLL, q); 3744 3745 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); 3746 INIT_LIST_HEAD(&q->requeue_list); 3747 spin_lock_init(&q->requeue_lock); 3748 3749 q->nr_requests = set->queue_depth; 3750 3751 /* 3752 * Default to classic polling 3753 */ 3754 q->poll_nsec = BLK_MQ_POLL_CLASSIC; 3755 3756 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 3757 blk_mq_add_queue_tag_set(set, q); 3758 blk_mq_map_swqueue(q); 3759 return 0; 3760 3761 err_hctxs: 3762 kfree(q->queue_hw_ctx); 3763 q->nr_hw_queues = 0; 3764 blk_mq_sysfs_deinit(q); 3765 err_poll: 3766 blk_stat_free_callback(q->poll_cb); 3767 q->poll_cb = NULL; 3768 err_exit: 3769 q->mq_ops = NULL; 3770 return -ENOMEM; 3771 } 3772 EXPORT_SYMBOL(blk_mq_init_allocated_queue); 3773 3774 /* tags can _not_ be used after returning from blk_mq_exit_queue */ 3775 void blk_mq_exit_queue(struct request_queue *q) 3776 { 3777 struct blk_mq_tag_set *set = q->tag_set; 3778 3779 /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */ 3780 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); 3781 /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */ 3782 blk_mq_del_queue_tag_set(q); 3783 } 3784 3785 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 3786 { 3787 int i; 3788 3789 if (blk_mq_is_shared_tags(set->flags)) { 3790 set->shared_tags = blk_mq_alloc_map_and_rqs(set, 3791 BLK_MQ_NO_HCTX_IDX, 3792 set->queue_depth); 3793 if (!set->shared_tags) 3794 return -ENOMEM; 3795 } 3796 3797 for (i = 0; i < set->nr_hw_queues; i++) { 3798 if (!__blk_mq_alloc_map_and_rqs(set, i)) 3799 goto out_unwind; 3800 cond_resched(); 3801 } 3802 3803 return 0; 3804 3805 out_unwind: 3806 while (--i >= 0) 3807 __blk_mq_free_map_and_rqs(set, i); 3808 3809 if (blk_mq_is_shared_tags(set->flags)) { 3810 blk_mq_free_map_and_rqs(set, set->shared_tags, 3811 BLK_MQ_NO_HCTX_IDX); 3812 } 3813 3814 return -ENOMEM; 3815 } 3816 3817 /* 3818 * Allocate the request maps associated with this tag_set. Note that this 3819 * may reduce the depth asked for, if memory is tight. set->queue_depth 3820 * will be updated to reflect the allocated depth. 3821 */ 3822 static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set) 3823 { 3824 unsigned int depth; 3825 int err; 3826 3827 depth = set->queue_depth; 3828 do { 3829 err = __blk_mq_alloc_rq_maps(set); 3830 if (!err) 3831 break; 3832 3833 set->queue_depth >>= 1; 3834 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { 3835 err = -ENOMEM; 3836 break; 3837 } 3838 } while (set->queue_depth); 3839 3840 if (!set->queue_depth || err) { 3841 pr_err("blk-mq: failed to allocate request map\n"); 3842 return -ENOMEM; 3843 } 3844 3845 if (depth != set->queue_depth) 3846 pr_info("blk-mq: reduced tag depth (%u -> %u)\n", 3847 depth, set->queue_depth); 3848 3849 return 0; 3850 } 3851 3852 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) 3853 { 3854 /* 3855 * blk_mq_map_queues() and multiple .map_queues() implementations 3856 * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the 3857 * number of hardware queues. 3858 */ 3859 if (set->nr_maps == 1) 3860 set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues; 3861 3862 if (set->ops->map_queues && !is_kdump_kernel()) { 3863 int i; 3864 3865 /* 3866 * transport .map_queues is usually done in the following 3867 * way: 3868 * 3869 * for (queue = 0; queue < set->nr_hw_queues; queue++) { 3870 * mask = get_cpu_mask(queue) 3871 * for_each_cpu(cpu, mask) 3872 * set->map[x].mq_map[cpu] = queue; 3873 * } 3874 * 3875 * When we need to remap, the table has to be cleared for 3876 * killing stale mapping since one CPU may not be mapped 3877 * to any hw queue. 3878 */ 3879 for (i = 0; i < set->nr_maps; i++) 3880 blk_mq_clear_mq_map(&set->map[i]); 3881 3882 return set->ops->map_queues(set); 3883 } else { 3884 BUG_ON(set->nr_maps > 1); 3885 return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); 3886 } 3887 } 3888 3889 static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set, 3890 int cur_nr_hw_queues, int new_nr_hw_queues) 3891 { 3892 struct blk_mq_tags **new_tags; 3893 3894 if (cur_nr_hw_queues >= new_nr_hw_queues) 3895 return 0; 3896 3897 new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *), 3898 GFP_KERNEL, set->numa_node); 3899 if (!new_tags) 3900 return -ENOMEM; 3901 3902 if (set->tags) 3903 memcpy(new_tags, set->tags, cur_nr_hw_queues * 3904 sizeof(*set->tags)); 3905 kfree(set->tags); 3906 set->tags = new_tags; 3907 set->nr_hw_queues = new_nr_hw_queues; 3908 3909 return 0; 3910 } 3911 3912 static int blk_mq_alloc_tag_set_tags(struct blk_mq_tag_set *set, 3913 int new_nr_hw_queues) 3914 { 3915 return blk_mq_realloc_tag_set_tags(set, 0, new_nr_hw_queues); 3916 } 3917 3918 /* 3919 * Alloc a tag set to be associated with one or more request queues. 3920 * May fail with EINVAL for various error conditions. May adjust the 3921 * requested depth down, if it's too large. In that case, the set 3922 * value will be stored in set->queue_depth. 3923 */ 3924 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) 3925 { 3926 int i, ret; 3927 3928 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS); 3929 3930 if (!set->nr_hw_queues) 3931 return -EINVAL; 3932 if (!set->queue_depth) 3933 return -EINVAL; 3934 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) 3935 return -EINVAL; 3936 3937 if (!set->ops->queue_rq) 3938 return -EINVAL; 3939 3940 if (!set->ops->get_budget ^ !set->ops->put_budget) 3941 return -EINVAL; 3942 3943 if (set->queue_depth > BLK_MQ_MAX_DEPTH) { 3944 pr_info("blk-mq: reduced tag depth to %u\n", 3945 BLK_MQ_MAX_DEPTH); 3946 set->queue_depth = BLK_MQ_MAX_DEPTH; 3947 } 3948 3949 if (!set->nr_maps) 3950 set->nr_maps = 1; 3951 else if (set->nr_maps > HCTX_MAX_TYPES) 3952 return -EINVAL; 3953 3954 /* 3955 * If a crashdump is active, then we are potentially in a very 3956 * memory constrained environment. Limit us to 1 queue and 3957 * 64 tags to prevent using too much memory. 3958 */ 3959 if (is_kdump_kernel()) { 3960 set->nr_hw_queues = 1; 3961 set->nr_maps = 1; 3962 set->queue_depth = min(64U, set->queue_depth); 3963 } 3964 /* 3965 * There is no use for more h/w queues than cpus if we just have 3966 * a single map 3967 */ 3968 if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids) 3969 set->nr_hw_queues = nr_cpu_ids; 3970 3971 if (blk_mq_alloc_tag_set_tags(set, set->nr_hw_queues) < 0) 3972 return -ENOMEM; 3973 3974 ret = -ENOMEM; 3975 for (i = 0; i < set->nr_maps; i++) { 3976 set->map[i].mq_map = kcalloc_node(nr_cpu_ids, 3977 sizeof(set->map[i].mq_map[0]), 3978 GFP_KERNEL, set->numa_node); 3979 if (!set->map[i].mq_map) 3980 goto out_free_mq_map; 3981 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues; 3982 } 3983 3984 ret = blk_mq_update_queue_map(set); 3985 if (ret) 3986 goto out_free_mq_map; 3987 3988 ret = blk_mq_alloc_set_map_and_rqs(set); 3989 if (ret) 3990 goto out_free_mq_map; 3991 3992 mutex_init(&set->tag_list_lock); 3993 INIT_LIST_HEAD(&set->tag_list); 3994 3995 return 0; 3996 3997 out_free_mq_map: 3998 for (i = 0; i < set->nr_maps; i++) { 3999 kfree(set->map[i].mq_map); 4000 set->map[i].mq_map = NULL; 4001 } 4002 kfree(set->tags); 4003 set->tags = NULL; 4004 return ret; 4005 } 4006 EXPORT_SYMBOL(blk_mq_alloc_tag_set); 4007 4008 /* allocate and initialize a tagset for a simple single-queue device */ 4009 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set, 4010 const struct blk_mq_ops *ops, unsigned int queue_depth, 4011 unsigned int set_flags) 4012 { 4013 memset(set, 0, sizeof(*set)); 4014 set->ops = ops; 4015 set->nr_hw_queues = 1; 4016 set->nr_maps = 1; 4017 set->queue_depth = queue_depth; 4018 set->numa_node = NUMA_NO_NODE; 4019 set->flags = set_flags; 4020 return blk_mq_alloc_tag_set(set); 4021 } 4022 EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set); 4023 4024 void blk_mq_free_tag_set(struct blk_mq_tag_set *set) 4025 { 4026 int i, j; 4027 4028 for (i = 0; i < set->nr_hw_queues; i++) 4029 __blk_mq_free_map_and_rqs(set, i); 4030 4031 if (blk_mq_is_shared_tags(set->flags)) { 4032 blk_mq_free_map_and_rqs(set, set->shared_tags, 4033 BLK_MQ_NO_HCTX_IDX); 4034 } 4035 4036 for (j = 0; j < set->nr_maps; j++) { 4037 kfree(set->map[j].mq_map); 4038 set->map[j].mq_map = NULL; 4039 } 4040 4041 kfree(set->tags); 4042 set->tags = NULL; 4043 } 4044 EXPORT_SYMBOL(blk_mq_free_tag_set); 4045 4046 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) 4047 { 4048 struct blk_mq_tag_set *set = q->tag_set; 4049 struct blk_mq_hw_ctx *hctx; 4050 int i, ret; 4051 4052 if (!set) 4053 return -EINVAL; 4054 4055 if (q->nr_requests == nr) 4056 return 0; 4057 4058 blk_mq_freeze_queue(q); 4059 blk_mq_quiesce_queue(q); 4060 4061 ret = 0; 4062 queue_for_each_hw_ctx(q, hctx, i) { 4063 if (!hctx->tags) 4064 continue; 4065 /* 4066 * If we're using an MQ scheduler, just update the scheduler 4067 * queue depth. This is similar to what the old code would do. 4068 */ 4069 if (hctx->sched_tags) { 4070 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, 4071 nr, true); 4072 } else { 4073 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, 4074 false); 4075 } 4076 if (ret) 4077 break; 4078 if (q->elevator && q->elevator->type->ops.depth_updated) 4079 q->elevator->type->ops.depth_updated(hctx); 4080 } 4081 if (!ret) { 4082 q->nr_requests = nr; 4083 if (blk_mq_is_shared_tags(set->flags)) { 4084 if (q->elevator) 4085 blk_mq_tag_update_sched_shared_tags(q); 4086 else 4087 blk_mq_tag_resize_shared_tags(set, nr); 4088 } 4089 } 4090 4091 blk_mq_unquiesce_queue(q); 4092 blk_mq_unfreeze_queue(q); 4093 4094 return ret; 4095 } 4096 4097 /* 4098 * request_queue and elevator_type pair. 4099 * It is just used by __blk_mq_update_nr_hw_queues to cache 4100 * the elevator_type associated with a request_queue. 4101 */ 4102 struct blk_mq_qe_pair { 4103 struct list_head node; 4104 struct request_queue *q; 4105 struct elevator_type *type; 4106 }; 4107 4108 /* 4109 * Cache the elevator_type in qe pair list and switch the 4110 * io scheduler to 'none' 4111 */ 4112 static bool blk_mq_elv_switch_none(struct list_head *head, 4113 struct request_queue *q) 4114 { 4115 struct blk_mq_qe_pair *qe; 4116 4117 if (!q->elevator) 4118 return true; 4119 4120 qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY); 4121 if (!qe) 4122 return false; 4123 4124 INIT_LIST_HEAD(&qe->node); 4125 qe->q = q; 4126 qe->type = q->elevator->type; 4127 list_add(&qe->node, head); 4128 4129 mutex_lock(&q->sysfs_lock); 4130 /* 4131 * After elevator_switch_mq, the previous elevator_queue will be 4132 * released by elevator_release. The reference of the io scheduler 4133 * module get by elevator_get will also be put. So we need to get 4134 * a reference of the io scheduler module here to prevent it to be 4135 * removed. 4136 */ 4137 __module_get(qe->type->elevator_owner); 4138 elevator_switch_mq(q, NULL); 4139 mutex_unlock(&q->sysfs_lock); 4140 4141 return true; 4142 } 4143 4144 static void blk_mq_elv_switch_back(struct list_head *head, 4145 struct request_queue *q) 4146 { 4147 struct blk_mq_qe_pair *qe; 4148 struct elevator_type *t = NULL; 4149 4150 list_for_each_entry(qe, head, node) 4151 if (qe->q == q) { 4152 t = qe->type; 4153 break; 4154 } 4155 4156 if (!t) 4157 return; 4158 4159 list_del(&qe->node); 4160 kfree(qe); 4161 4162 mutex_lock(&q->sysfs_lock); 4163 elevator_switch_mq(q, t); 4164 mutex_unlock(&q->sysfs_lock); 4165 } 4166 4167 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, 4168 int nr_hw_queues) 4169 { 4170 struct request_queue *q; 4171 LIST_HEAD(head); 4172 int prev_nr_hw_queues; 4173 4174 lockdep_assert_held(&set->tag_list_lock); 4175 4176 if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids) 4177 nr_hw_queues = nr_cpu_ids; 4178 if (nr_hw_queues < 1) 4179 return; 4180 if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues) 4181 return; 4182 4183 list_for_each_entry(q, &set->tag_list, tag_set_list) 4184 blk_mq_freeze_queue(q); 4185 /* 4186 * Switch IO scheduler to 'none', cleaning up the data associated 4187 * with the previous scheduler. We will switch back once we are done 4188 * updating the new sw to hw queue mappings. 4189 */ 4190 list_for_each_entry(q, &set->tag_list, tag_set_list) 4191 if (!blk_mq_elv_switch_none(&head, q)) 4192 goto switch_back; 4193 4194 list_for_each_entry(q, &set->tag_list, tag_set_list) { 4195 blk_mq_debugfs_unregister_hctxs(q); 4196 blk_mq_sysfs_unregister(q); 4197 } 4198 4199 prev_nr_hw_queues = set->nr_hw_queues; 4200 if (blk_mq_realloc_tag_set_tags(set, set->nr_hw_queues, nr_hw_queues) < 4201 0) 4202 goto reregister; 4203 4204 set->nr_hw_queues = nr_hw_queues; 4205 fallback: 4206 blk_mq_update_queue_map(set); 4207 list_for_each_entry(q, &set->tag_list, tag_set_list) { 4208 blk_mq_realloc_hw_ctxs(set, q); 4209 if (q->nr_hw_queues != set->nr_hw_queues) { 4210 int i = prev_nr_hw_queues; 4211 4212 pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n", 4213 nr_hw_queues, prev_nr_hw_queues); 4214 for (; i < set->nr_hw_queues; i++) 4215 __blk_mq_free_map_and_rqs(set, i); 4216 4217 set->nr_hw_queues = prev_nr_hw_queues; 4218 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); 4219 goto fallback; 4220 } 4221 blk_mq_map_swqueue(q); 4222 } 4223 4224 reregister: 4225 list_for_each_entry(q, &set->tag_list, tag_set_list) { 4226 blk_mq_sysfs_register(q); 4227 blk_mq_debugfs_register_hctxs(q); 4228 } 4229 4230 switch_back: 4231 list_for_each_entry(q, &set->tag_list, tag_set_list) 4232 blk_mq_elv_switch_back(&head, q); 4233 4234 list_for_each_entry(q, &set->tag_list, tag_set_list) 4235 blk_mq_unfreeze_queue(q); 4236 } 4237 4238 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) 4239 { 4240 mutex_lock(&set->tag_list_lock); 4241 __blk_mq_update_nr_hw_queues(set, nr_hw_queues); 4242 mutex_unlock(&set->tag_list_lock); 4243 } 4244 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); 4245 4246 /* Enable polling stats and return whether they were already enabled. */ 4247 static bool blk_poll_stats_enable(struct request_queue *q) 4248 { 4249 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) || 4250 blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q)) 4251 return true; 4252 blk_stat_add_callback(q, q->poll_cb); 4253 return false; 4254 } 4255 4256 static void blk_mq_poll_stats_start(struct request_queue *q) 4257 { 4258 /* 4259 * We don't arm the callback if polling stats are not enabled or the 4260 * callback is already active. 4261 */ 4262 if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) || 4263 blk_stat_is_active(q->poll_cb)) 4264 return; 4265 4266 blk_stat_activate_msecs(q->poll_cb, 100); 4267 } 4268 4269 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb) 4270 { 4271 struct request_queue *q = cb->data; 4272 int bucket; 4273 4274 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) { 4275 if (cb->stat[bucket].nr_samples) 4276 q->poll_stat[bucket] = cb->stat[bucket]; 4277 } 4278 } 4279 4280 static unsigned long blk_mq_poll_nsecs(struct request_queue *q, 4281 struct request *rq) 4282 { 4283 unsigned long ret = 0; 4284 int bucket; 4285 4286 /* 4287 * If stats collection isn't on, don't sleep but turn it on for 4288 * future users 4289 */ 4290 if (!blk_poll_stats_enable(q)) 4291 return 0; 4292 4293 /* 4294 * As an optimistic guess, use half of the mean service time 4295 * for this type of request. We can (and should) make this smarter. 4296 * For instance, if the completion latencies are tight, we can 4297 * get closer than just half the mean. This is especially 4298 * important on devices where the completion latencies are longer 4299 * than ~10 usec. We do use the stats for the relevant IO size 4300 * if available which does lead to better estimates. 4301 */ 4302 bucket = blk_mq_poll_stats_bkt(rq); 4303 if (bucket < 0) 4304 return ret; 4305 4306 if (q->poll_stat[bucket].nr_samples) 4307 ret = (q->poll_stat[bucket].mean + 1) / 2; 4308 4309 return ret; 4310 } 4311 4312 static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc) 4313 { 4314 struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, qc); 4315 struct request *rq = blk_qc_to_rq(hctx, qc); 4316 struct hrtimer_sleeper hs; 4317 enum hrtimer_mode mode; 4318 unsigned int nsecs; 4319 ktime_t kt; 4320 4321 /* 4322 * If a request has completed on queue that uses an I/O scheduler, we 4323 * won't get back a request from blk_qc_to_rq. 4324 */ 4325 if (!rq || (rq->rq_flags & RQF_MQ_POLL_SLEPT)) 4326 return false; 4327 4328 /* 4329 * If we get here, hybrid polling is enabled. Hence poll_nsec can be: 4330 * 4331 * 0: use half of prev avg 4332 * >0: use this specific value 4333 */ 4334 if (q->poll_nsec > 0) 4335 nsecs = q->poll_nsec; 4336 else 4337 nsecs = blk_mq_poll_nsecs(q, rq); 4338 4339 if (!nsecs) 4340 return false; 4341 4342 rq->rq_flags |= RQF_MQ_POLL_SLEPT; 4343 4344 /* 4345 * This will be replaced with the stats tracking code, using 4346 * 'avg_completion_time / 2' as the pre-sleep target. 4347 */ 4348 kt = nsecs; 4349 4350 mode = HRTIMER_MODE_REL; 4351 hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode); 4352 hrtimer_set_expires(&hs.timer, kt); 4353 4354 do { 4355 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE) 4356 break; 4357 set_current_state(TASK_UNINTERRUPTIBLE); 4358 hrtimer_sleeper_start_expires(&hs, mode); 4359 if (hs.task) 4360 io_schedule(); 4361 hrtimer_cancel(&hs.timer); 4362 mode = HRTIMER_MODE_ABS; 4363 } while (hs.task && !signal_pending(current)); 4364 4365 __set_current_state(TASK_RUNNING); 4366 destroy_hrtimer_on_stack(&hs.timer); 4367 4368 /* 4369 * If we sleep, have the caller restart the poll loop to reset the 4370 * state. Like for the other success return cases, the caller is 4371 * responsible for checking if the IO completed. If the IO isn't 4372 * complete, we'll get called again and will go straight to the busy 4373 * poll loop. 4374 */ 4375 return true; 4376 } 4377 4378 static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, 4379 struct io_comp_batch *iob, unsigned int flags) 4380 { 4381 struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie); 4382 long state = get_current_state(); 4383 int ret; 4384 4385 do { 4386 ret = q->mq_ops->poll(hctx, iob); 4387 if (ret > 0) { 4388 __set_current_state(TASK_RUNNING); 4389 return ret; 4390 } 4391 4392 if (signal_pending_state(state, current)) 4393 __set_current_state(TASK_RUNNING); 4394 if (task_is_running(current)) 4395 return 1; 4396 4397 if (ret < 0 || (flags & BLK_POLL_ONESHOT)) 4398 break; 4399 cpu_relax(); 4400 } while (!need_resched()); 4401 4402 __set_current_state(TASK_RUNNING); 4403 return 0; 4404 } 4405 4406 int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob, 4407 unsigned int flags) 4408 { 4409 if (!(flags & BLK_POLL_NOSLEEP) && 4410 q->poll_nsec != BLK_MQ_POLL_CLASSIC) { 4411 if (blk_mq_poll_hybrid(q, cookie)) 4412 return 1; 4413 } 4414 return blk_mq_poll_classic(q, cookie, iob, flags); 4415 } 4416 4417 unsigned int blk_mq_rq_cpu(struct request *rq) 4418 { 4419 return rq->mq_ctx->cpu; 4420 } 4421 EXPORT_SYMBOL(blk_mq_rq_cpu); 4422 4423 void blk_mq_cancel_work_sync(struct request_queue *q) 4424 { 4425 if (queue_is_mq(q)) { 4426 struct blk_mq_hw_ctx *hctx; 4427 int i; 4428 4429 cancel_delayed_work_sync(&q->requeue_work); 4430 4431 queue_for_each_hw_ctx(q, hctx, i) 4432 cancel_delayed_work_sync(&hctx->run_work); 4433 } 4434 } 4435 4436 static int __init blk_mq_init(void) 4437 { 4438 int i; 4439 4440 for_each_possible_cpu(i) 4441 init_llist_head(&per_cpu(blk_cpu_done, i)); 4442 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); 4443 4444 cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, 4445 "block/softirq:dead", NULL, 4446 blk_softirq_cpu_dead); 4447 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, 4448 blk_mq_hctx_notify_dead); 4449 cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online", 4450 blk_mq_hctx_notify_online, 4451 blk_mq_hctx_notify_offline); 4452 return 0; 4453 } 4454 subsys_initcall(blk_mq_init); 4455