1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Block multiqueue core code 4 * 5 * Copyright (C) 2013-2014 Jens Axboe 6 * Copyright (C) 2013-2014 Christoph Hellwig 7 */ 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/backing-dev.h> 11 #include <linux/bio.h> 12 #include <linux/blkdev.h> 13 #include <linux/blk-integrity.h> 14 #include <linux/kmemleak.h> 15 #include <linux/mm.h> 16 #include <linux/init.h> 17 #include <linux/slab.h> 18 #include <linux/workqueue.h> 19 #include <linux/smp.h> 20 #include <linux/interrupt.h> 21 #include <linux/llist.h> 22 #include <linux/cpu.h> 23 #include <linux/cache.h> 24 #include <linux/sched/sysctl.h> 25 #include <linux/sched/topology.h> 26 #include <linux/sched/signal.h> 27 #include <linux/delay.h> 28 #include <linux/crash_dump.h> 29 #include <linux/prefetch.h> 30 #include <linux/blk-crypto.h> 31 #include <linux/part_stat.h> 32 33 #include <trace/events/block.h> 34 35 #include <linux/blk-mq.h> 36 #include <linux/t10-pi.h> 37 #include "blk.h" 38 #include "blk-mq.h" 39 #include "blk-mq-debugfs.h" 40 #include "blk-mq-tag.h" 41 #include "blk-pm.h" 42 #include "blk-stat.h" 43 #include "blk-mq-sched.h" 44 #include "blk-rq-qos.h" 45 46 static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); 47 48 static void blk_mq_poll_stats_start(struct request_queue *q); 49 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); 50 51 static int blk_mq_poll_stats_bkt(const struct request *rq) 52 { 53 int ddir, sectors, bucket; 54 55 ddir = rq_data_dir(rq); 56 sectors = blk_rq_stats_sectors(rq); 57 58 bucket = ddir + 2 * ilog2(sectors); 59 60 if (bucket < 0) 61 return -1; 62 else if (bucket >= BLK_MQ_POLL_STATS_BKTS) 63 return ddir + BLK_MQ_POLL_STATS_BKTS - 2; 64 65 return bucket; 66 } 67 68 #define BLK_QC_T_SHIFT 16 69 #define BLK_QC_T_INTERNAL (1U << 31) 70 71 static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q, 72 blk_qc_t qc) 73 { 74 return xa_load(&q->hctx_table, 75 (qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT); 76 } 77 78 static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx, 79 blk_qc_t qc) 80 { 81 unsigned int tag = qc & ((1U << BLK_QC_T_SHIFT) - 1); 82 83 if (qc & BLK_QC_T_INTERNAL) 84 return blk_mq_tag_to_rq(hctx->sched_tags, tag); 85 return blk_mq_tag_to_rq(hctx->tags, tag); 86 } 87 88 static inline blk_qc_t blk_rq_to_qc(struct request *rq) 89 { 90 return (rq->mq_hctx->queue_num << BLK_QC_T_SHIFT) | 91 (rq->tag != -1 ? 92 rq->tag : (rq->internal_tag | BLK_QC_T_INTERNAL)); 93 } 94 95 /* 96 * Check if any of the ctx, dispatch list or elevator 97 * have pending work in this hardware queue. 98 */ 99 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) 100 { 101 return !list_empty_careful(&hctx->dispatch) || 102 sbitmap_any_bit_set(&hctx->ctx_map) || 103 blk_mq_sched_has_work(hctx); 104 } 105 106 /* 107 * Mark this ctx as having pending work in this hardware queue 108 */ 109 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, 110 struct blk_mq_ctx *ctx) 111 { 112 const int bit = ctx->index_hw[hctx->type]; 113 114 if (!sbitmap_test_bit(&hctx->ctx_map, bit)) 115 sbitmap_set_bit(&hctx->ctx_map, bit); 116 } 117 118 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, 119 struct blk_mq_ctx *ctx) 120 { 121 const int bit = ctx->index_hw[hctx->type]; 122 123 sbitmap_clear_bit(&hctx->ctx_map, bit); 124 } 125 126 struct mq_inflight { 127 struct block_device *part; 128 unsigned int inflight[2]; 129 }; 130 131 static bool blk_mq_check_inflight(struct request *rq, void *priv, 132 bool reserved) 133 { 134 struct mq_inflight *mi = priv; 135 136 if (rq->part && blk_do_io_stat(rq) && 137 (!mi->part->bd_partno || rq->part == mi->part) && 138 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) 139 mi->inflight[rq_data_dir(rq)]++; 140 141 return true; 142 } 143 144 unsigned int blk_mq_in_flight(struct request_queue *q, 145 struct block_device *part) 146 { 147 struct mq_inflight mi = { .part = part }; 148 149 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 150 151 return mi.inflight[0] + mi.inflight[1]; 152 } 153 154 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, 155 unsigned int inflight[2]) 156 { 157 struct mq_inflight mi = { .part = part }; 158 159 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 160 inflight[0] = mi.inflight[0]; 161 inflight[1] = mi.inflight[1]; 162 } 163 164 void blk_freeze_queue_start(struct request_queue *q) 165 { 166 mutex_lock(&q->mq_freeze_lock); 167 if (++q->mq_freeze_depth == 1) { 168 percpu_ref_kill(&q->q_usage_counter); 169 mutex_unlock(&q->mq_freeze_lock); 170 if (queue_is_mq(q)) 171 blk_mq_run_hw_queues(q, false); 172 } else { 173 mutex_unlock(&q->mq_freeze_lock); 174 } 175 } 176 EXPORT_SYMBOL_GPL(blk_freeze_queue_start); 177 178 void blk_mq_freeze_queue_wait(struct request_queue *q) 179 { 180 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); 181 } 182 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait); 183 184 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, 185 unsigned long timeout) 186 { 187 return wait_event_timeout(q->mq_freeze_wq, 188 percpu_ref_is_zero(&q->q_usage_counter), 189 timeout); 190 } 191 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout); 192 193 /* 194 * Guarantee no request is in use, so we can change any data structure of 195 * the queue afterward. 196 */ 197 void blk_freeze_queue(struct request_queue *q) 198 { 199 /* 200 * In the !blk_mq case we are only calling this to kill the 201 * q_usage_counter, otherwise this increases the freeze depth 202 * and waits for it to return to zero. For this reason there is 203 * no blk_unfreeze_queue(), and blk_freeze_queue() is not 204 * exported to drivers as the only user for unfreeze is blk_mq. 205 */ 206 blk_freeze_queue_start(q); 207 blk_mq_freeze_queue_wait(q); 208 } 209 210 void blk_mq_freeze_queue(struct request_queue *q) 211 { 212 /* 213 * ...just an alias to keep freeze and unfreeze actions balanced 214 * in the blk_mq_* namespace 215 */ 216 blk_freeze_queue(q); 217 } 218 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); 219 220 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic) 221 { 222 mutex_lock(&q->mq_freeze_lock); 223 if (force_atomic) 224 q->q_usage_counter.data->force_atomic = true; 225 q->mq_freeze_depth--; 226 WARN_ON_ONCE(q->mq_freeze_depth < 0); 227 if (!q->mq_freeze_depth) { 228 percpu_ref_resurrect(&q->q_usage_counter); 229 wake_up_all(&q->mq_freeze_wq); 230 } 231 mutex_unlock(&q->mq_freeze_lock); 232 } 233 234 void blk_mq_unfreeze_queue(struct request_queue *q) 235 { 236 __blk_mq_unfreeze_queue(q, false); 237 } 238 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); 239 240 /* 241 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the 242 * mpt3sas driver such that this function can be removed. 243 */ 244 void blk_mq_quiesce_queue_nowait(struct request_queue *q) 245 { 246 unsigned long flags; 247 248 spin_lock_irqsave(&q->queue_lock, flags); 249 if (!q->quiesce_depth++) 250 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); 251 spin_unlock_irqrestore(&q->queue_lock, flags); 252 } 253 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); 254 255 /** 256 * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done 257 * @q: request queue. 258 * 259 * Note: it is driver's responsibility for making sure that quiesce has 260 * been started. 261 */ 262 void blk_mq_wait_quiesce_done(struct request_queue *q) 263 { 264 if (blk_queue_has_srcu(q)) 265 synchronize_srcu(q->srcu); 266 else 267 synchronize_rcu(); 268 } 269 EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done); 270 271 /** 272 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished 273 * @q: request queue. 274 * 275 * Note: this function does not prevent that the struct request end_io() 276 * callback function is invoked. Once this function is returned, we make 277 * sure no dispatch can happen until the queue is unquiesced via 278 * blk_mq_unquiesce_queue(). 279 */ 280 void blk_mq_quiesce_queue(struct request_queue *q) 281 { 282 blk_mq_quiesce_queue_nowait(q); 283 blk_mq_wait_quiesce_done(q); 284 } 285 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); 286 287 /* 288 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue() 289 * @q: request queue. 290 * 291 * This function recovers queue into the state before quiescing 292 * which is done by blk_mq_quiesce_queue. 293 */ 294 void blk_mq_unquiesce_queue(struct request_queue *q) 295 { 296 unsigned long flags; 297 bool run_queue = false; 298 299 spin_lock_irqsave(&q->queue_lock, flags); 300 if (WARN_ON_ONCE(q->quiesce_depth <= 0)) { 301 ; 302 } else if (!--q->quiesce_depth) { 303 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); 304 run_queue = true; 305 } 306 spin_unlock_irqrestore(&q->queue_lock, flags); 307 308 /* dispatch requests which are inserted during quiescing */ 309 if (run_queue) 310 blk_mq_run_hw_queues(q, true); 311 } 312 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue); 313 314 void blk_mq_wake_waiters(struct request_queue *q) 315 { 316 struct blk_mq_hw_ctx *hctx; 317 unsigned long i; 318 319 queue_for_each_hw_ctx(q, hctx, i) 320 if (blk_mq_hw_queue_mapped(hctx)) 321 blk_mq_tag_wakeup_all(hctx->tags, true); 322 } 323 324 void blk_rq_init(struct request_queue *q, struct request *rq) 325 { 326 memset(rq, 0, sizeof(*rq)); 327 328 INIT_LIST_HEAD(&rq->queuelist); 329 rq->q = q; 330 rq->__sector = (sector_t) -1; 331 INIT_HLIST_NODE(&rq->hash); 332 RB_CLEAR_NODE(&rq->rb_node); 333 rq->tag = BLK_MQ_NO_TAG; 334 rq->internal_tag = BLK_MQ_NO_TAG; 335 rq->start_time_ns = ktime_get_ns(); 336 rq->part = NULL; 337 blk_crypto_rq_set_defaults(rq); 338 } 339 EXPORT_SYMBOL(blk_rq_init); 340 341 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, 342 struct blk_mq_tags *tags, unsigned int tag, u64 alloc_time_ns) 343 { 344 struct blk_mq_ctx *ctx = data->ctx; 345 struct blk_mq_hw_ctx *hctx = data->hctx; 346 struct request_queue *q = data->q; 347 struct request *rq = tags->static_rqs[tag]; 348 349 rq->q = q; 350 rq->mq_ctx = ctx; 351 rq->mq_hctx = hctx; 352 rq->cmd_flags = data->cmd_flags; 353 354 if (data->flags & BLK_MQ_REQ_PM) 355 data->rq_flags |= RQF_PM; 356 if (blk_queue_io_stat(q)) 357 data->rq_flags |= RQF_IO_STAT; 358 rq->rq_flags = data->rq_flags; 359 360 if (!(data->rq_flags & RQF_ELV)) { 361 rq->tag = tag; 362 rq->internal_tag = BLK_MQ_NO_TAG; 363 } else { 364 rq->tag = BLK_MQ_NO_TAG; 365 rq->internal_tag = tag; 366 } 367 rq->timeout = 0; 368 369 if (blk_mq_need_time_stamp(rq)) 370 rq->start_time_ns = ktime_get_ns(); 371 else 372 rq->start_time_ns = 0; 373 rq->part = NULL; 374 #ifdef CONFIG_BLK_RQ_ALLOC_TIME 375 rq->alloc_time_ns = alloc_time_ns; 376 #endif 377 rq->io_start_time_ns = 0; 378 rq->stats_sectors = 0; 379 rq->nr_phys_segments = 0; 380 #if defined(CONFIG_BLK_DEV_INTEGRITY) 381 rq->nr_integrity_segments = 0; 382 #endif 383 rq->end_io = NULL; 384 rq->end_io_data = NULL; 385 386 blk_crypto_rq_set_defaults(rq); 387 INIT_LIST_HEAD(&rq->queuelist); 388 /* tag was already set */ 389 WRITE_ONCE(rq->deadline, 0); 390 req_ref_set(rq, 1); 391 392 if (rq->rq_flags & RQF_ELV) { 393 struct elevator_queue *e = data->q->elevator; 394 395 INIT_HLIST_NODE(&rq->hash); 396 RB_CLEAR_NODE(&rq->rb_node); 397 398 if (!op_is_flush(data->cmd_flags) && 399 e->type->ops.prepare_request) { 400 e->type->ops.prepare_request(rq); 401 rq->rq_flags |= RQF_ELVPRIV; 402 } 403 } 404 405 return rq; 406 } 407 408 static inline struct request * 409 __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data, 410 u64 alloc_time_ns) 411 { 412 unsigned int tag, tag_offset; 413 struct blk_mq_tags *tags; 414 struct request *rq; 415 unsigned long tag_mask; 416 int i, nr = 0; 417 418 tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset); 419 if (unlikely(!tag_mask)) 420 return NULL; 421 422 tags = blk_mq_tags_from_data(data); 423 for (i = 0; tag_mask; i++) { 424 if (!(tag_mask & (1UL << i))) 425 continue; 426 tag = tag_offset + i; 427 prefetch(tags->static_rqs[tag]); 428 tag_mask &= ~(1UL << i); 429 rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns); 430 rq_list_add(data->cached_rq, rq); 431 nr++; 432 } 433 /* caller already holds a reference, add for remainder */ 434 percpu_ref_get_many(&data->q->q_usage_counter, nr - 1); 435 data->nr_tags -= nr; 436 437 return rq_list_pop(data->cached_rq); 438 } 439 440 static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) 441 { 442 struct request_queue *q = data->q; 443 u64 alloc_time_ns = 0; 444 struct request *rq; 445 unsigned int tag; 446 447 /* alloc_time includes depth and tag waits */ 448 if (blk_queue_rq_alloc_time(q)) 449 alloc_time_ns = ktime_get_ns(); 450 451 if (data->cmd_flags & REQ_NOWAIT) 452 data->flags |= BLK_MQ_REQ_NOWAIT; 453 454 if (q->elevator) { 455 struct elevator_queue *e = q->elevator; 456 457 data->rq_flags |= RQF_ELV; 458 459 /* 460 * Flush/passthrough requests are special and go directly to the 461 * dispatch list. Don't include reserved tags in the 462 * limiting, as it isn't useful. 463 */ 464 if (!op_is_flush(data->cmd_flags) && 465 !blk_op_is_passthrough(data->cmd_flags) && 466 e->type->ops.limit_depth && 467 !(data->flags & BLK_MQ_REQ_RESERVED)) 468 e->type->ops.limit_depth(data->cmd_flags, data); 469 } 470 471 retry: 472 data->ctx = blk_mq_get_ctx(q); 473 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); 474 if (!(data->rq_flags & RQF_ELV)) 475 blk_mq_tag_busy(data->hctx); 476 477 /* 478 * Try batched alloc if we want more than 1 tag. 479 */ 480 if (data->nr_tags > 1) { 481 rq = __blk_mq_alloc_requests_batch(data, alloc_time_ns); 482 if (rq) 483 return rq; 484 data->nr_tags = 1; 485 } 486 487 /* 488 * Waiting allocations only fail because of an inactive hctx. In that 489 * case just retry the hctx assignment and tag allocation as CPU hotplug 490 * should have migrated us to an online CPU by now. 491 */ 492 tag = blk_mq_get_tag(data); 493 if (tag == BLK_MQ_NO_TAG) { 494 if (data->flags & BLK_MQ_REQ_NOWAIT) 495 return NULL; 496 /* 497 * Give up the CPU and sleep for a random short time to 498 * ensure that thread using a realtime scheduling class 499 * are migrated off the CPU, and thus off the hctx that 500 * is going away. 501 */ 502 msleep(3); 503 goto retry; 504 } 505 506 return blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag, 507 alloc_time_ns); 508 } 509 510 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, 511 blk_mq_req_flags_t flags) 512 { 513 struct blk_mq_alloc_data data = { 514 .q = q, 515 .flags = flags, 516 .cmd_flags = op, 517 .nr_tags = 1, 518 }; 519 struct request *rq; 520 int ret; 521 522 ret = blk_queue_enter(q, flags); 523 if (ret) 524 return ERR_PTR(ret); 525 526 rq = __blk_mq_alloc_requests(&data); 527 if (!rq) 528 goto out_queue_exit; 529 rq->__data_len = 0; 530 rq->__sector = (sector_t) -1; 531 rq->bio = rq->biotail = NULL; 532 return rq; 533 out_queue_exit: 534 blk_queue_exit(q); 535 return ERR_PTR(-EWOULDBLOCK); 536 } 537 EXPORT_SYMBOL(blk_mq_alloc_request); 538 539 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, 540 unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx) 541 { 542 struct blk_mq_alloc_data data = { 543 .q = q, 544 .flags = flags, 545 .cmd_flags = op, 546 .nr_tags = 1, 547 }; 548 u64 alloc_time_ns = 0; 549 unsigned int cpu; 550 unsigned int tag; 551 int ret; 552 553 /* alloc_time includes depth and tag waits */ 554 if (blk_queue_rq_alloc_time(q)) 555 alloc_time_ns = ktime_get_ns(); 556 557 /* 558 * If the tag allocator sleeps we could get an allocation for a 559 * different hardware context. No need to complicate the low level 560 * allocator for this for the rare use case of a command tied to 561 * a specific queue. 562 */ 563 if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED)))) 564 return ERR_PTR(-EINVAL); 565 566 if (hctx_idx >= q->nr_hw_queues) 567 return ERR_PTR(-EIO); 568 569 ret = blk_queue_enter(q, flags); 570 if (ret) 571 return ERR_PTR(ret); 572 573 /* 574 * Check if the hardware context is actually mapped to anything. 575 * If not tell the caller that it should skip this queue. 576 */ 577 ret = -EXDEV; 578 data.hctx = xa_load(&q->hctx_table, hctx_idx); 579 if (!blk_mq_hw_queue_mapped(data.hctx)) 580 goto out_queue_exit; 581 cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); 582 data.ctx = __blk_mq_get_ctx(q, cpu); 583 584 if (!q->elevator) 585 blk_mq_tag_busy(data.hctx); 586 else 587 data.rq_flags |= RQF_ELV; 588 589 ret = -EWOULDBLOCK; 590 tag = blk_mq_get_tag(&data); 591 if (tag == BLK_MQ_NO_TAG) 592 goto out_queue_exit; 593 return blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag, 594 alloc_time_ns); 595 596 out_queue_exit: 597 blk_queue_exit(q); 598 return ERR_PTR(ret); 599 } 600 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 601 602 static void __blk_mq_free_request(struct request *rq) 603 { 604 struct request_queue *q = rq->q; 605 struct blk_mq_ctx *ctx = rq->mq_ctx; 606 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 607 const int sched_tag = rq->internal_tag; 608 609 blk_crypto_free_request(rq); 610 blk_pm_mark_last_busy(rq); 611 rq->mq_hctx = NULL; 612 if (rq->tag != BLK_MQ_NO_TAG) 613 blk_mq_put_tag(hctx->tags, ctx, rq->tag); 614 if (sched_tag != BLK_MQ_NO_TAG) 615 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag); 616 blk_mq_sched_restart(hctx); 617 blk_queue_exit(q); 618 } 619 620 void blk_mq_free_request(struct request *rq) 621 { 622 struct request_queue *q = rq->q; 623 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 624 625 if ((rq->rq_flags & RQF_ELVPRIV) && 626 q->elevator->type->ops.finish_request) 627 q->elevator->type->ops.finish_request(rq); 628 629 if (rq->rq_flags & RQF_MQ_INFLIGHT) 630 __blk_mq_dec_active_requests(hctx); 631 632 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) 633 laptop_io_completion(q->disk->bdi); 634 635 rq_qos_done(q, rq); 636 637 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 638 if (req_ref_put_and_test(rq)) 639 __blk_mq_free_request(rq); 640 } 641 EXPORT_SYMBOL_GPL(blk_mq_free_request); 642 643 void blk_mq_free_plug_rqs(struct blk_plug *plug) 644 { 645 struct request *rq; 646 647 while ((rq = rq_list_pop(&plug->cached_rq)) != NULL) 648 blk_mq_free_request(rq); 649 } 650 651 void blk_dump_rq_flags(struct request *rq, char *msg) 652 { 653 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg, 654 rq->q->disk ? rq->q->disk->disk_name : "?", 655 (unsigned long long) rq->cmd_flags); 656 657 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 658 (unsigned long long)blk_rq_pos(rq), 659 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 660 printk(KERN_INFO " bio %p, biotail %p, len %u\n", 661 rq->bio, rq->biotail, blk_rq_bytes(rq)); 662 } 663 EXPORT_SYMBOL(blk_dump_rq_flags); 664 665 static void req_bio_endio(struct request *rq, struct bio *bio, 666 unsigned int nbytes, blk_status_t error) 667 { 668 if (unlikely(error)) { 669 bio->bi_status = error; 670 } else if (req_op(rq) == REQ_OP_ZONE_APPEND) { 671 /* 672 * Partial zone append completions cannot be supported as the 673 * BIO fragments may end up not being written sequentially. 674 */ 675 if (bio->bi_iter.bi_size != nbytes) 676 bio->bi_status = BLK_STS_IOERR; 677 else 678 bio->bi_iter.bi_sector = rq->__sector; 679 } 680 681 bio_advance(bio, nbytes); 682 683 if (unlikely(rq->rq_flags & RQF_QUIET)) 684 bio_set_flag(bio, BIO_QUIET); 685 /* don't actually finish bio if it's part of flush sequence */ 686 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) 687 bio_endio(bio); 688 } 689 690 static void blk_account_io_completion(struct request *req, unsigned int bytes) 691 { 692 if (req->part && blk_do_io_stat(req)) { 693 const int sgrp = op_stat_group(req_op(req)); 694 695 part_stat_lock(); 696 part_stat_add(req->part, sectors[sgrp], bytes >> 9); 697 part_stat_unlock(); 698 } 699 } 700 701 static void blk_print_req_error(struct request *req, blk_status_t status) 702 { 703 printk_ratelimited(KERN_ERR 704 "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x " 705 "phys_seg %u prio class %u\n", 706 blk_status_to_str(status), 707 req->q->disk ? req->q->disk->disk_name : "?", 708 blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)), 709 req->cmd_flags & ~REQ_OP_MASK, 710 req->nr_phys_segments, 711 IOPRIO_PRIO_CLASS(req->ioprio)); 712 } 713 714 /* 715 * Fully end IO on a request. Does not support partial completions, or 716 * errors. 717 */ 718 static void blk_complete_request(struct request *req) 719 { 720 const bool is_flush = (req->rq_flags & RQF_FLUSH_SEQ) != 0; 721 int total_bytes = blk_rq_bytes(req); 722 struct bio *bio = req->bio; 723 724 trace_block_rq_complete(req, BLK_STS_OK, total_bytes); 725 726 if (!bio) 727 return; 728 729 #ifdef CONFIG_BLK_DEV_INTEGRITY 730 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ) 731 req->q->integrity.profile->complete_fn(req, total_bytes); 732 #endif 733 734 blk_account_io_completion(req, total_bytes); 735 736 do { 737 struct bio *next = bio->bi_next; 738 739 /* Completion has already been traced */ 740 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 741 742 if (req_op(req) == REQ_OP_ZONE_APPEND) 743 bio->bi_iter.bi_sector = req->__sector; 744 745 if (!is_flush) 746 bio_endio(bio); 747 bio = next; 748 } while (bio); 749 750 /* 751 * Reset counters so that the request stacking driver 752 * can find how many bytes remain in the request 753 * later. 754 */ 755 req->bio = NULL; 756 req->__data_len = 0; 757 } 758 759 /** 760 * blk_update_request - Complete multiple bytes without completing the request 761 * @req: the request being processed 762 * @error: block status code 763 * @nr_bytes: number of bytes to complete for @req 764 * 765 * Description: 766 * Ends I/O on a number of bytes attached to @req, but doesn't complete 767 * the request structure even if @req doesn't have leftover. 768 * If @req has leftover, sets it up for the next range of segments. 769 * 770 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 771 * %false return from this function. 772 * 773 * Note: 774 * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function 775 * except in the consistency check at the end of this function. 776 * 777 * Return: 778 * %false - this request doesn't have any more data 779 * %true - this request has more data 780 **/ 781 bool blk_update_request(struct request *req, blk_status_t error, 782 unsigned int nr_bytes) 783 { 784 int total_bytes; 785 786 trace_block_rq_complete(req, error, nr_bytes); 787 788 if (!req->bio) 789 return false; 790 791 #ifdef CONFIG_BLK_DEV_INTEGRITY 792 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && 793 error == BLK_STS_OK) 794 req->q->integrity.profile->complete_fn(req, nr_bytes); 795 #endif 796 797 if (unlikely(error && !blk_rq_is_passthrough(req) && 798 !(req->rq_flags & RQF_QUIET)) && 799 !test_bit(GD_DEAD, &req->q->disk->state)) { 800 blk_print_req_error(req, error); 801 trace_block_rq_error(req, error, nr_bytes); 802 } 803 804 blk_account_io_completion(req, nr_bytes); 805 806 total_bytes = 0; 807 while (req->bio) { 808 struct bio *bio = req->bio; 809 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); 810 811 if (bio_bytes == bio->bi_iter.bi_size) 812 req->bio = bio->bi_next; 813 814 /* Completion has already been traced */ 815 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 816 req_bio_endio(req, bio, bio_bytes, error); 817 818 total_bytes += bio_bytes; 819 nr_bytes -= bio_bytes; 820 821 if (!nr_bytes) 822 break; 823 } 824 825 /* 826 * completely done 827 */ 828 if (!req->bio) { 829 /* 830 * Reset counters so that the request stacking driver 831 * can find how many bytes remain in the request 832 * later. 833 */ 834 req->__data_len = 0; 835 return false; 836 } 837 838 req->__data_len -= total_bytes; 839 840 /* update sector only for requests with clear definition of sector */ 841 if (!blk_rq_is_passthrough(req)) 842 req->__sector += total_bytes >> 9; 843 844 /* mixed attributes always follow the first bio */ 845 if (req->rq_flags & RQF_MIXED_MERGE) { 846 req->cmd_flags &= ~REQ_FAILFAST_MASK; 847 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; 848 } 849 850 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) { 851 /* 852 * If total number of sectors is less than the first segment 853 * size, something has gone terribly wrong. 854 */ 855 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 856 blk_dump_rq_flags(req, "request botched"); 857 req->__data_len = blk_rq_cur_bytes(req); 858 } 859 860 /* recalculate the number of segments */ 861 req->nr_phys_segments = blk_recalc_rq_segments(req); 862 } 863 864 return true; 865 } 866 EXPORT_SYMBOL_GPL(blk_update_request); 867 868 static void __blk_account_io_done(struct request *req, u64 now) 869 { 870 const int sgrp = op_stat_group(req_op(req)); 871 872 part_stat_lock(); 873 update_io_ticks(req->part, jiffies, true); 874 part_stat_inc(req->part, ios[sgrp]); 875 part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns); 876 part_stat_unlock(); 877 } 878 879 static inline void blk_account_io_done(struct request *req, u64 now) 880 { 881 /* 882 * Account IO completion. flush_rq isn't accounted as a 883 * normal IO on queueing nor completion. Accounting the 884 * containing request is enough. 885 */ 886 if (blk_do_io_stat(req) && req->part && 887 !(req->rq_flags & RQF_FLUSH_SEQ)) 888 __blk_account_io_done(req, now); 889 } 890 891 static void __blk_account_io_start(struct request *rq) 892 { 893 /* 894 * All non-passthrough requests are created from a bio with one 895 * exception: when a flush command that is part of a flush sequence 896 * generated by the state machine in blk-flush.c is cloned onto the 897 * lower device by dm-multipath we can get here without a bio. 898 */ 899 if (rq->bio) 900 rq->part = rq->bio->bi_bdev; 901 else 902 rq->part = rq->q->disk->part0; 903 904 part_stat_lock(); 905 update_io_ticks(rq->part, jiffies, false); 906 part_stat_unlock(); 907 } 908 909 static inline void blk_account_io_start(struct request *req) 910 { 911 if (blk_do_io_stat(req)) 912 __blk_account_io_start(req); 913 } 914 915 static inline void __blk_mq_end_request_acct(struct request *rq, u64 now) 916 { 917 if (rq->rq_flags & RQF_STATS) { 918 blk_mq_poll_stats_start(rq->q); 919 blk_stat_add(rq, now); 920 } 921 922 blk_mq_sched_completed_request(rq, now); 923 blk_account_io_done(rq, now); 924 } 925 926 inline void __blk_mq_end_request(struct request *rq, blk_status_t error) 927 { 928 if (blk_mq_need_time_stamp(rq)) 929 __blk_mq_end_request_acct(rq, ktime_get_ns()); 930 931 if (rq->end_io) { 932 rq_qos_done(rq->q, rq); 933 rq->end_io(rq, error); 934 } else { 935 blk_mq_free_request(rq); 936 } 937 } 938 EXPORT_SYMBOL(__blk_mq_end_request); 939 940 void blk_mq_end_request(struct request *rq, blk_status_t error) 941 { 942 if (blk_update_request(rq, error, blk_rq_bytes(rq))) 943 BUG(); 944 __blk_mq_end_request(rq, error); 945 } 946 EXPORT_SYMBOL(blk_mq_end_request); 947 948 #define TAG_COMP_BATCH 32 949 950 static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx, 951 int *tag_array, int nr_tags) 952 { 953 struct request_queue *q = hctx->queue; 954 955 /* 956 * All requests should have been marked as RQF_MQ_INFLIGHT, so 957 * update hctx->nr_active in batch 958 */ 959 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) 960 __blk_mq_sub_active_requests(hctx, nr_tags); 961 962 blk_mq_put_tags(hctx->tags, tag_array, nr_tags); 963 percpu_ref_put_many(&q->q_usage_counter, nr_tags); 964 } 965 966 void blk_mq_end_request_batch(struct io_comp_batch *iob) 967 { 968 int tags[TAG_COMP_BATCH], nr_tags = 0; 969 struct blk_mq_hw_ctx *cur_hctx = NULL; 970 struct request *rq; 971 u64 now = 0; 972 973 if (iob->need_ts) 974 now = ktime_get_ns(); 975 976 while ((rq = rq_list_pop(&iob->req_list)) != NULL) { 977 prefetch(rq->bio); 978 prefetch(rq->rq_next); 979 980 blk_complete_request(rq); 981 if (iob->need_ts) 982 __blk_mq_end_request_acct(rq, now); 983 984 rq_qos_done(rq->q, rq); 985 986 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 987 if (!req_ref_put_and_test(rq)) 988 continue; 989 990 blk_crypto_free_request(rq); 991 blk_pm_mark_last_busy(rq); 992 993 if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) { 994 if (cur_hctx) 995 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags); 996 nr_tags = 0; 997 cur_hctx = rq->mq_hctx; 998 } 999 tags[nr_tags++] = rq->tag; 1000 } 1001 1002 if (nr_tags) 1003 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags); 1004 } 1005 EXPORT_SYMBOL_GPL(blk_mq_end_request_batch); 1006 1007 static void blk_complete_reqs(struct llist_head *list) 1008 { 1009 struct llist_node *entry = llist_reverse_order(llist_del_all(list)); 1010 struct request *rq, *next; 1011 1012 llist_for_each_entry_safe(rq, next, entry, ipi_list) 1013 rq->q->mq_ops->complete(rq); 1014 } 1015 1016 static __latent_entropy void blk_done_softirq(struct softirq_action *h) 1017 { 1018 blk_complete_reqs(this_cpu_ptr(&blk_cpu_done)); 1019 } 1020 1021 static int blk_softirq_cpu_dead(unsigned int cpu) 1022 { 1023 blk_complete_reqs(&per_cpu(blk_cpu_done, cpu)); 1024 return 0; 1025 } 1026 1027 static void __blk_mq_complete_request_remote(void *data) 1028 { 1029 __raise_softirq_irqoff(BLOCK_SOFTIRQ); 1030 } 1031 1032 static inline bool blk_mq_complete_need_ipi(struct request *rq) 1033 { 1034 int cpu = raw_smp_processor_id(); 1035 1036 if (!IS_ENABLED(CONFIG_SMP) || 1037 !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) 1038 return false; 1039 /* 1040 * With force threaded interrupts enabled, raising softirq from an SMP 1041 * function call will always result in waking the ksoftirqd thread. 1042 * This is probably worse than completing the request on a different 1043 * cache domain. 1044 */ 1045 if (force_irqthreads()) 1046 return false; 1047 1048 /* same CPU or cache domain? Complete locally */ 1049 if (cpu == rq->mq_ctx->cpu || 1050 (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) && 1051 cpus_share_cache(cpu, rq->mq_ctx->cpu))) 1052 return false; 1053 1054 /* don't try to IPI to an offline CPU */ 1055 return cpu_online(rq->mq_ctx->cpu); 1056 } 1057 1058 static void blk_mq_complete_send_ipi(struct request *rq) 1059 { 1060 struct llist_head *list; 1061 unsigned int cpu; 1062 1063 cpu = rq->mq_ctx->cpu; 1064 list = &per_cpu(blk_cpu_done, cpu); 1065 if (llist_add(&rq->ipi_list, list)) { 1066 INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq); 1067 smp_call_function_single_async(cpu, &rq->csd); 1068 } 1069 } 1070 1071 static void blk_mq_raise_softirq(struct request *rq) 1072 { 1073 struct llist_head *list; 1074 1075 preempt_disable(); 1076 list = this_cpu_ptr(&blk_cpu_done); 1077 if (llist_add(&rq->ipi_list, list)) 1078 raise_softirq(BLOCK_SOFTIRQ); 1079 preempt_enable(); 1080 } 1081 1082 bool blk_mq_complete_request_remote(struct request *rq) 1083 { 1084 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); 1085 1086 /* 1087 * For a polled request, always complete locally, it's pointless 1088 * to redirect the completion. 1089 */ 1090 if (rq->cmd_flags & REQ_POLLED) 1091 return false; 1092 1093 if (blk_mq_complete_need_ipi(rq)) { 1094 blk_mq_complete_send_ipi(rq); 1095 return true; 1096 } 1097 1098 if (rq->q->nr_hw_queues == 1) { 1099 blk_mq_raise_softirq(rq); 1100 return true; 1101 } 1102 return false; 1103 } 1104 EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote); 1105 1106 /** 1107 * blk_mq_complete_request - end I/O on a request 1108 * @rq: the request being processed 1109 * 1110 * Description: 1111 * Complete a request by scheduling the ->complete_rq operation. 1112 **/ 1113 void blk_mq_complete_request(struct request *rq) 1114 { 1115 if (!blk_mq_complete_request_remote(rq)) 1116 rq->q->mq_ops->complete(rq); 1117 } 1118 EXPORT_SYMBOL(blk_mq_complete_request); 1119 1120 /** 1121 * blk_mq_start_request - Start processing a request 1122 * @rq: Pointer to request to be started 1123 * 1124 * Function used by device drivers to notify the block layer that a request 1125 * is going to be processed now, so blk layer can do proper initializations 1126 * such as starting the timeout timer. 1127 */ 1128 void blk_mq_start_request(struct request *rq) 1129 { 1130 struct request_queue *q = rq->q; 1131 1132 trace_block_rq_issue(rq); 1133 1134 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { 1135 rq->io_start_time_ns = ktime_get_ns(); 1136 rq->stats_sectors = blk_rq_sectors(rq); 1137 rq->rq_flags |= RQF_STATS; 1138 rq_qos_issue(q, rq); 1139 } 1140 1141 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE); 1142 1143 blk_add_timer(rq); 1144 WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT); 1145 1146 #ifdef CONFIG_BLK_DEV_INTEGRITY 1147 if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE) 1148 q->integrity.profile->prepare_fn(rq); 1149 #endif 1150 if (rq->bio && rq->bio->bi_opf & REQ_POLLED) 1151 WRITE_ONCE(rq->bio->bi_cookie, blk_rq_to_qc(rq)); 1152 } 1153 EXPORT_SYMBOL(blk_mq_start_request); 1154 1155 /* 1156 * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple 1157 * queues. This is important for md arrays to benefit from merging 1158 * requests. 1159 */ 1160 static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug) 1161 { 1162 if (plug->multiple_queues) 1163 return BLK_MAX_REQUEST_COUNT * 2; 1164 return BLK_MAX_REQUEST_COUNT; 1165 } 1166 1167 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) 1168 { 1169 struct request *last = rq_list_peek(&plug->mq_list); 1170 1171 if (!plug->rq_count) { 1172 trace_block_plug(rq->q); 1173 } else if (plug->rq_count >= blk_plug_max_rq_count(plug) || 1174 (!blk_queue_nomerges(rq->q) && 1175 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { 1176 blk_mq_flush_plug_list(plug, false); 1177 trace_block_plug(rq->q); 1178 } 1179 1180 if (!plug->multiple_queues && last && last->q != rq->q) 1181 plug->multiple_queues = true; 1182 if (!plug->has_elevator && (rq->rq_flags & RQF_ELV)) 1183 plug->has_elevator = true; 1184 rq->rq_next = NULL; 1185 rq_list_add(&plug->mq_list, rq); 1186 plug->rq_count++; 1187 } 1188 1189 /** 1190 * blk_execute_rq_nowait - insert a request to I/O scheduler for execution 1191 * @rq: request to insert 1192 * @at_head: insert request at head or tail of queue 1193 * 1194 * Description: 1195 * Insert a fully prepared request at the back of the I/O scheduler queue 1196 * for execution. Don't wait for completion. 1197 * 1198 * Note: 1199 * This function will invoke @done directly if the queue is dead. 1200 */ 1201 void blk_execute_rq_nowait(struct request *rq, bool at_head) 1202 { 1203 WARN_ON(irqs_disabled()); 1204 WARN_ON(!blk_rq_is_passthrough(rq)); 1205 1206 blk_account_io_start(rq); 1207 if (current->plug) 1208 blk_add_rq_to_plug(current->plug, rq); 1209 else 1210 blk_mq_sched_insert_request(rq, at_head, true, false); 1211 } 1212 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); 1213 1214 struct blk_rq_wait { 1215 struct completion done; 1216 blk_status_t ret; 1217 }; 1218 1219 static void blk_end_sync_rq(struct request *rq, blk_status_t ret) 1220 { 1221 struct blk_rq_wait *wait = rq->end_io_data; 1222 1223 wait->ret = ret; 1224 complete(&wait->done); 1225 } 1226 1227 static bool blk_rq_is_poll(struct request *rq) 1228 { 1229 if (!rq->mq_hctx) 1230 return false; 1231 if (rq->mq_hctx->type != HCTX_TYPE_POLL) 1232 return false; 1233 if (WARN_ON_ONCE(!rq->bio)) 1234 return false; 1235 return true; 1236 } 1237 1238 static void blk_rq_poll_completion(struct request *rq, struct completion *wait) 1239 { 1240 do { 1241 bio_poll(rq->bio, NULL, 0); 1242 cond_resched(); 1243 } while (!completion_done(wait)); 1244 } 1245 1246 /** 1247 * blk_execute_rq - insert a request into queue for execution 1248 * @rq: request to insert 1249 * @at_head: insert request at head or tail of queue 1250 * 1251 * Description: 1252 * Insert a fully prepared request at the back of the I/O scheduler queue 1253 * for execution and wait for completion. 1254 * Return: The blk_status_t result provided to blk_mq_end_request(). 1255 */ 1256 blk_status_t blk_execute_rq(struct request *rq, bool at_head) 1257 { 1258 struct blk_rq_wait wait = { 1259 .done = COMPLETION_INITIALIZER_ONSTACK(wait.done), 1260 }; 1261 1262 WARN_ON(irqs_disabled()); 1263 WARN_ON(!blk_rq_is_passthrough(rq)); 1264 1265 rq->end_io_data = &wait; 1266 rq->end_io = blk_end_sync_rq; 1267 1268 blk_account_io_start(rq); 1269 blk_mq_sched_insert_request(rq, at_head, true, false); 1270 1271 if (blk_rq_is_poll(rq)) { 1272 blk_rq_poll_completion(rq, &wait.done); 1273 } else { 1274 /* 1275 * Prevent hang_check timer from firing at us during very long 1276 * I/O 1277 */ 1278 unsigned long hang_check = sysctl_hung_task_timeout_secs; 1279 1280 if (hang_check) 1281 while (!wait_for_completion_io_timeout(&wait.done, 1282 hang_check * (HZ/2))) 1283 ; 1284 else 1285 wait_for_completion_io(&wait.done); 1286 } 1287 1288 return wait.ret; 1289 } 1290 EXPORT_SYMBOL(blk_execute_rq); 1291 1292 static void __blk_mq_requeue_request(struct request *rq) 1293 { 1294 struct request_queue *q = rq->q; 1295 1296 blk_mq_put_driver_tag(rq); 1297 1298 trace_block_rq_requeue(rq); 1299 rq_qos_requeue(q, rq); 1300 1301 if (blk_mq_request_started(rq)) { 1302 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 1303 rq->rq_flags &= ~RQF_TIMED_OUT; 1304 } 1305 } 1306 1307 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) 1308 { 1309 __blk_mq_requeue_request(rq); 1310 1311 /* this request will be re-inserted to io scheduler queue */ 1312 blk_mq_sched_requeue_request(rq); 1313 1314 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); 1315 } 1316 EXPORT_SYMBOL(blk_mq_requeue_request); 1317 1318 static void blk_mq_requeue_work(struct work_struct *work) 1319 { 1320 struct request_queue *q = 1321 container_of(work, struct request_queue, requeue_work.work); 1322 LIST_HEAD(rq_list); 1323 struct request *rq, *next; 1324 1325 spin_lock_irq(&q->requeue_lock); 1326 list_splice_init(&q->requeue_list, &rq_list); 1327 spin_unlock_irq(&q->requeue_lock); 1328 1329 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { 1330 if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP))) 1331 continue; 1332 1333 rq->rq_flags &= ~RQF_SOFTBARRIER; 1334 list_del_init(&rq->queuelist); 1335 /* 1336 * If RQF_DONTPREP, rq has contained some driver specific 1337 * data, so insert it to hctx dispatch list to avoid any 1338 * merge. 1339 */ 1340 if (rq->rq_flags & RQF_DONTPREP) 1341 blk_mq_request_bypass_insert(rq, false, false); 1342 else 1343 blk_mq_sched_insert_request(rq, true, false, false); 1344 } 1345 1346 while (!list_empty(&rq_list)) { 1347 rq = list_entry(rq_list.next, struct request, queuelist); 1348 list_del_init(&rq->queuelist); 1349 blk_mq_sched_insert_request(rq, false, false, false); 1350 } 1351 1352 blk_mq_run_hw_queues(q, false); 1353 } 1354 1355 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, 1356 bool kick_requeue_list) 1357 { 1358 struct request_queue *q = rq->q; 1359 unsigned long flags; 1360 1361 /* 1362 * We abuse this flag that is otherwise used by the I/O scheduler to 1363 * request head insertion from the workqueue. 1364 */ 1365 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER); 1366 1367 spin_lock_irqsave(&q->requeue_lock, flags); 1368 if (at_head) { 1369 rq->rq_flags |= RQF_SOFTBARRIER; 1370 list_add(&rq->queuelist, &q->requeue_list); 1371 } else { 1372 list_add_tail(&rq->queuelist, &q->requeue_list); 1373 } 1374 spin_unlock_irqrestore(&q->requeue_lock, flags); 1375 1376 if (kick_requeue_list) 1377 blk_mq_kick_requeue_list(q); 1378 } 1379 1380 void blk_mq_kick_requeue_list(struct request_queue *q) 1381 { 1382 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0); 1383 } 1384 EXPORT_SYMBOL(blk_mq_kick_requeue_list); 1385 1386 void blk_mq_delay_kick_requeue_list(struct request_queue *q, 1387 unsigned long msecs) 1388 { 1389 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 1390 msecs_to_jiffies(msecs)); 1391 } 1392 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); 1393 1394 static bool blk_mq_rq_inflight(struct request *rq, void *priv, 1395 bool reserved) 1396 { 1397 /* 1398 * If we find a request that isn't idle we know the queue is busy 1399 * as it's checked in the iter. 1400 * Return false to stop the iteration. 1401 */ 1402 if (blk_mq_request_started(rq)) { 1403 bool *busy = priv; 1404 1405 *busy = true; 1406 return false; 1407 } 1408 1409 return true; 1410 } 1411 1412 bool blk_mq_queue_inflight(struct request_queue *q) 1413 { 1414 bool busy = false; 1415 1416 blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy); 1417 return busy; 1418 } 1419 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight); 1420 1421 static void blk_mq_rq_timed_out(struct request *req, bool reserved) 1422 { 1423 req->rq_flags |= RQF_TIMED_OUT; 1424 if (req->q->mq_ops->timeout) { 1425 enum blk_eh_timer_return ret; 1426 1427 ret = req->q->mq_ops->timeout(req, reserved); 1428 if (ret == BLK_EH_DONE) 1429 return; 1430 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER); 1431 } 1432 1433 blk_add_timer(req); 1434 } 1435 1436 static bool blk_mq_req_expired(struct request *rq, unsigned long *next) 1437 { 1438 unsigned long deadline; 1439 1440 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT) 1441 return false; 1442 if (rq->rq_flags & RQF_TIMED_OUT) 1443 return false; 1444 1445 deadline = READ_ONCE(rq->deadline); 1446 if (time_after_eq(jiffies, deadline)) 1447 return true; 1448 1449 if (*next == 0) 1450 *next = deadline; 1451 else if (time_after(*next, deadline)) 1452 *next = deadline; 1453 return false; 1454 } 1455 1456 void blk_mq_put_rq_ref(struct request *rq) 1457 { 1458 if (is_flush_rq(rq)) 1459 rq->end_io(rq, 0); 1460 else if (req_ref_put_and_test(rq)) 1461 __blk_mq_free_request(rq); 1462 } 1463 1464 static bool blk_mq_check_expired(struct request *rq, void *priv, bool reserved) 1465 { 1466 unsigned long *next = priv; 1467 1468 /* 1469 * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot 1470 * be reallocated underneath the timeout handler's processing, then 1471 * the expire check is reliable. If the request is not expired, then 1472 * it was completed and reallocated as a new request after returning 1473 * from blk_mq_check_expired(). 1474 */ 1475 if (blk_mq_req_expired(rq, next)) 1476 blk_mq_rq_timed_out(rq, reserved); 1477 return true; 1478 } 1479 1480 static void blk_mq_timeout_work(struct work_struct *work) 1481 { 1482 struct request_queue *q = 1483 container_of(work, struct request_queue, timeout_work); 1484 unsigned long next = 0; 1485 struct blk_mq_hw_ctx *hctx; 1486 unsigned long i; 1487 1488 /* A deadlock might occur if a request is stuck requiring a 1489 * timeout at the same time a queue freeze is waiting 1490 * completion, since the timeout code would not be able to 1491 * acquire the queue reference here. 1492 * 1493 * That's why we don't use blk_queue_enter here; instead, we use 1494 * percpu_ref_tryget directly, because we need to be able to 1495 * obtain a reference even in the short window between the queue 1496 * starting to freeze, by dropping the first reference in 1497 * blk_freeze_queue_start, and the moment the last request is 1498 * consumed, marked by the instant q_usage_counter reaches 1499 * zero. 1500 */ 1501 if (!percpu_ref_tryget(&q->q_usage_counter)) 1502 return; 1503 1504 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next); 1505 1506 if (next != 0) { 1507 mod_timer(&q->timeout, next); 1508 } else { 1509 /* 1510 * Request timeouts are handled as a forward rolling timer. If 1511 * we end up here it means that no requests are pending and 1512 * also that no request has been pending for a while. Mark 1513 * each hctx as idle. 1514 */ 1515 queue_for_each_hw_ctx(q, hctx, i) { 1516 /* the hctx may be unmapped, so check it here */ 1517 if (blk_mq_hw_queue_mapped(hctx)) 1518 blk_mq_tag_idle(hctx); 1519 } 1520 } 1521 blk_queue_exit(q); 1522 } 1523 1524 struct flush_busy_ctx_data { 1525 struct blk_mq_hw_ctx *hctx; 1526 struct list_head *list; 1527 }; 1528 1529 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data) 1530 { 1531 struct flush_busy_ctx_data *flush_data = data; 1532 struct blk_mq_hw_ctx *hctx = flush_data->hctx; 1533 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 1534 enum hctx_type type = hctx->type; 1535 1536 spin_lock(&ctx->lock); 1537 list_splice_tail_init(&ctx->rq_lists[type], flush_data->list); 1538 sbitmap_clear_bit(sb, bitnr); 1539 spin_unlock(&ctx->lock); 1540 return true; 1541 } 1542 1543 /* 1544 * Process software queues that have been marked busy, splicing them 1545 * to the for-dispatch 1546 */ 1547 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) 1548 { 1549 struct flush_busy_ctx_data data = { 1550 .hctx = hctx, 1551 .list = list, 1552 }; 1553 1554 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data); 1555 } 1556 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs); 1557 1558 struct dispatch_rq_data { 1559 struct blk_mq_hw_ctx *hctx; 1560 struct request *rq; 1561 }; 1562 1563 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr, 1564 void *data) 1565 { 1566 struct dispatch_rq_data *dispatch_data = data; 1567 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; 1568 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 1569 enum hctx_type type = hctx->type; 1570 1571 spin_lock(&ctx->lock); 1572 if (!list_empty(&ctx->rq_lists[type])) { 1573 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next); 1574 list_del_init(&dispatch_data->rq->queuelist); 1575 if (list_empty(&ctx->rq_lists[type])) 1576 sbitmap_clear_bit(sb, bitnr); 1577 } 1578 spin_unlock(&ctx->lock); 1579 1580 return !dispatch_data->rq; 1581 } 1582 1583 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 1584 struct blk_mq_ctx *start) 1585 { 1586 unsigned off = start ? start->index_hw[hctx->type] : 0; 1587 struct dispatch_rq_data data = { 1588 .hctx = hctx, 1589 .rq = NULL, 1590 }; 1591 1592 __sbitmap_for_each_set(&hctx->ctx_map, off, 1593 dispatch_rq_from_ctx, &data); 1594 1595 return data.rq; 1596 } 1597 1598 static bool __blk_mq_alloc_driver_tag(struct request *rq) 1599 { 1600 struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags; 1601 unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags; 1602 int tag; 1603 1604 blk_mq_tag_busy(rq->mq_hctx); 1605 1606 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) { 1607 bt = &rq->mq_hctx->tags->breserved_tags; 1608 tag_offset = 0; 1609 } else { 1610 if (!hctx_may_queue(rq->mq_hctx, bt)) 1611 return false; 1612 } 1613 1614 tag = __sbitmap_queue_get(bt); 1615 if (tag == BLK_MQ_NO_TAG) 1616 return false; 1617 1618 rq->tag = tag + tag_offset; 1619 return true; 1620 } 1621 1622 bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq) 1623 { 1624 if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq)) 1625 return false; 1626 1627 if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) && 1628 !(rq->rq_flags & RQF_MQ_INFLIGHT)) { 1629 rq->rq_flags |= RQF_MQ_INFLIGHT; 1630 __blk_mq_inc_active_requests(hctx); 1631 } 1632 hctx->tags->rqs[rq->tag] = rq; 1633 return true; 1634 } 1635 1636 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, 1637 int flags, void *key) 1638 { 1639 struct blk_mq_hw_ctx *hctx; 1640 1641 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); 1642 1643 spin_lock(&hctx->dispatch_wait_lock); 1644 if (!list_empty(&wait->entry)) { 1645 struct sbitmap_queue *sbq; 1646 1647 list_del_init(&wait->entry); 1648 sbq = &hctx->tags->bitmap_tags; 1649 atomic_dec(&sbq->ws_active); 1650 } 1651 spin_unlock(&hctx->dispatch_wait_lock); 1652 1653 blk_mq_run_hw_queue(hctx, true); 1654 return 1; 1655 } 1656 1657 /* 1658 * Mark us waiting for a tag. For shared tags, this involves hooking us into 1659 * the tag wakeups. For non-shared tags, we can simply mark us needing a 1660 * restart. For both cases, take care to check the condition again after 1661 * marking us as waiting. 1662 */ 1663 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, 1664 struct request *rq) 1665 { 1666 struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags; 1667 struct wait_queue_head *wq; 1668 wait_queue_entry_t *wait; 1669 bool ret; 1670 1671 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { 1672 blk_mq_sched_mark_restart_hctx(hctx); 1673 1674 /* 1675 * It's possible that a tag was freed in the window between the 1676 * allocation failure and adding the hardware queue to the wait 1677 * queue. 1678 * 1679 * Don't clear RESTART here, someone else could have set it. 1680 * At most this will cost an extra queue run. 1681 */ 1682 return blk_mq_get_driver_tag(rq); 1683 } 1684 1685 wait = &hctx->dispatch_wait; 1686 if (!list_empty_careful(&wait->entry)) 1687 return false; 1688 1689 wq = &bt_wait_ptr(sbq, hctx)->wait; 1690 1691 spin_lock_irq(&wq->lock); 1692 spin_lock(&hctx->dispatch_wait_lock); 1693 if (!list_empty(&wait->entry)) { 1694 spin_unlock(&hctx->dispatch_wait_lock); 1695 spin_unlock_irq(&wq->lock); 1696 return false; 1697 } 1698 1699 atomic_inc(&sbq->ws_active); 1700 wait->flags &= ~WQ_FLAG_EXCLUSIVE; 1701 __add_wait_queue(wq, wait); 1702 1703 /* 1704 * It's possible that a tag was freed in the window between the 1705 * allocation failure and adding the hardware queue to the wait 1706 * queue. 1707 */ 1708 ret = blk_mq_get_driver_tag(rq); 1709 if (!ret) { 1710 spin_unlock(&hctx->dispatch_wait_lock); 1711 spin_unlock_irq(&wq->lock); 1712 return false; 1713 } 1714 1715 /* 1716 * We got a tag, remove ourselves from the wait queue to ensure 1717 * someone else gets the wakeup. 1718 */ 1719 list_del_init(&wait->entry); 1720 atomic_dec(&sbq->ws_active); 1721 spin_unlock(&hctx->dispatch_wait_lock); 1722 spin_unlock_irq(&wq->lock); 1723 1724 return true; 1725 } 1726 1727 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT 8 1728 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR 4 1729 /* 1730 * Update dispatch busy with the Exponential Weighted Moving Average(EWMA): 1731 * - EWMA is one simple way to compute running average value 1732 * - weight(7/8 and 1/8) is applied so that it can decrease exponentially 1733 * - take 4 as factor for avoiding to get too small(0) result, and this 1734 * factor doesn't matter because EWMA decreases exponentially 1735 */ 1736 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy) 1737 { 1738 unsigned int ewma; 1739 1740 ewma = hctx->dispatch_busy; 1741 1742 if (!ewma && !busy) 1743 return; 1744 1745 ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1; 1746 if (busy) 1747 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR; 1748 ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT; 1749 1750 hctx->dispatch_busy = ewma; 1751 } 1752 1753 #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */ 1754 1755 static void blk_mq_handle_dev_resource(struct request *rq, 1756 struct list_head *list) 1757 { 1758 struct request *next = 1759 list_first_entry_or_null(list, struct request, queuelist); 1760 1761 /* 1762 * If an I/O scheduler has been configured and we got a driver tag for 1763 * the next request already, free it. 1764 */ 1765 if (next) 1766 blk_mq_put_driver_tag(next); 1767 1768 list_add(&rq->queuelist, list); 1769 __blk_mq_requeue_request(rq); 1770 } 1771 1772 static void blk_mq_handle_zone_resource(struct request *rq, 1773 struct list_head *zone_list) 1774 { 1775 /* 1776 * If we end up here it is because we cannot dispatch a request to a 1777 * specific zone due to LLD level zone-write locking or other zone 1778 * related resource not being available. In this case, set the request 1779 * aside in zone_list for retrying it later. 1780 */ 1781 list_add(&rq->queuelist, zone_list); 1782 __blk_mq_requeue_request(rq); 1783 } 1784 1785 enum prep_dispatch { 1786 PREP_DISPATCH_OK, 1787 PREP_DISPATCH_NO_TAG, 1788 PREP_DISPATCH_NO_BUDGET, 1789 }; 1790 1791 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq, 1792 bool need_budget) 1793 { 1794 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 1795 int budget_token = -1; 1796 1797 if (need_budget) { 1798 budget_token = blk_mq_get_dispatch_budget(rq->q); 1799 if (budget_token < 0) { 1800 blk_mq_put_driver_tag(rq); 1801 return PREP_DISPATCH_NO_BUDGET; 1802 } 1803 blk_mq_set_rq_budget_token(rq, budget_token); 1804 } 1805 1806 if (!blk_mq_get_driver_tag(rq)) { 1807 /* 1808 * The initial allocation attempt failed, so we need to 1809 * rerun the hardware queue when a tag is freed. The 1810 * waitqueue takes care of that. If the queue is run 1811 * before we add this entry back on the dispatch list, 1812 * we'll re-run it below. 1813 */ 1814 if (!blk_mq_mark_tag_wait(hctx, rq)) { 1815 /* 1816 * All budgets not got from this function will be put 1817 * together during handling partial dispatch 1818 */ 1819 if (need_budget) 1820 blk_mq_put_dispatch_budget(rq->q, budget_token); 1821 return PREP_DISPATCH_NO_TAG; 1822 } 1823 } 1824 1825 return PREP_DISPATCH_OK; 1826 } 1827 1828 /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */ 1829 static void blk_mq_release_budgets(struct request_queue *q, 1830 struct list_head *list) 1831 { 1832 struct request *rq; 1833 1834 list_for_each_entry(rq, list, queuelist) { 1835 int budget_token = blk_mq_get_rq_budget_token(rq); 1836 1837 if (budget_token >= 0) 1838 blk_mq_put_dispatch_budget(q, budget_token); 1839 } 1840 } 1841 1842 /* 1843 * Returns true if we did some work AND can potentially do more. 1844 */ 1845 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, 1846 unsigned int nr_budgets) 1847 { 1848 enum prep_dispatch prep; 1849 struct request_queue *q = hctx->queue; 1850 struct request *rq, *nxt; 1851 int errors, queued; 1852 blk_status_t ret = BLK_STS_OK; 1853 LIST_HEAD(zone_list); 1854 bool needs_resource = false; 1855 1856 if (list_empty(list)) 1857 return false; 1858 1859 /* 1860 * Now process all the entries, sending them to the driver. 1861 */ 1862 errors = queued = 0; 1863 do { 1864 struct blk_mq_queue_data bd; 1865 1866 rq = list_first_entry(list, struct request, queuelist); 1867 1868 WARN_ON_ONCE(hctx != rq->mq_hctx); 1869 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets); 1870 if (prep != PREP_DISPATCH_OK) 1871 break; 1872 1873 list_del_init(&rq->queuelist); 1874 1875 bd.rq = rq; 1876 1877 /* 1878 * Flag last if we have no more requests, or if we have more 1879 * but can't assign a driver tag to it. 1880 */ 1881 if (list_empty(list)) 1882 bd.last = true; 1883 else { 1884 nxt = list_first_entry(list, struct request, queuelist); 1885 bd.last = !blk_mq_get_driver_tag(nxt); 1886 } 1887 1888 /* 1889 * once the request is queued to lld, no need to cover the 1890 * budget any more 1891 */ 1892 if (nr_budgets) 1893 nr_budgets--; 1894 ret = q->mq_ops->queue_rq(hctx, &bd); 1895 switch (ret) { 1896 case BLK_STS_OK: 1897 queued++; 1898 break; 1899 case BLK_STS_RESOURCE: 1900 needs_resource = true; 1901 fallthrough; 1902 case BLK_STS_DEV_RESOURCE: 1903 blk_mq_handle_dev_resource(rq, list); 1904 goto out; 1905 case BLK_STS_ZONE_RESOURCE: 1906 /* 1907 * Move the request to zone_list and keep going through 1908 * the dispatch list to find more requests the drive can 1909 * accept. 1910 */ 1911 blk_mq_handle_zone_resource(rq, &zone_list); 1912 needs_resource = true; 1913 break; 1914 default: 1915 errors++; 1916 blk_mq_end_request(rq, ret); 1917 } 1918 } while (!list_empty(list)); 1919 out: 1920 if (!list_empty(&zone_list)) 1921 list_splice_tail_init(&zone_list, list); 1922 1923 /* If we didn't flush the entire list, we could have told the driver 1924 * there was more coming, but that turned out to be a lie. 1925 */ 1926 if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued) 1927 q->mq_ops->commit_rqs(hctx); 1928 /* 1929 * Any items that need requeuing? Stuff them into hctx->dispatch, 1930 * that is where we will continue on next queue run. 1931 */ 1932 if (!list_empty(list)) { 1933 bool needs_restart; 1934 /* For non-shared tags, the RESTART check will suffice */ 1935 bool no_tag = prep == PREP_DISPATCH_NO_TAG && 1936 (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED); 1937 1938 if (nr_budgets) 1939 blk_mq_release_budgets(q, list); 1940 1941 spin_lock(&hctx->lock); 1942 list_splice_tail_init(list, &hctx->dispatch); 1943 spin_unlock(&hctx->lock); 1944 1945 /* 1946 * Order adding requests to hctx->dispatch and checking 1947 * SCHED_RESTART flag. The pair of this smp_mb() is the one 1948 * in blk_mq_sched_restart(). Avoid restart code path to 1949 * miss the new added requests to hctx->dispatch, meantime 1950 * SCHED_RESTART is observed here. 1951 */ 1952 smp_mb(); 1953 1954 /* 1955 * If SCHED_RESTART was set by the caller of this function and 1956 * it is no longer set that means that it was cleared by another 1957 * thread and hence that a queue rerun is needed. 1958 * 1959 * If 'no_tag' is set, that means that we failed getting 1960 * a driver tag with an I/O scheduler attached. If our dispatch 1961 * waitqueue is no longer active, ensure that we run the queue 1962 * AFTER adding our entries back to the list. 1963 * 1964 * If no I/O scheduler has been configured it is possible that 1965 * the hardware queue got stopped and restarted before requests 1966 * were pushed back onto the dispatch list. Rerun the queue to 1967 * avoid starvation. Notes: 1968 * - blk_mq_run_hw_queue() checks whether or not a queue has 1969 * been stopped before rerunning a queue. 1970 * - Some but not all block drivers stop a queue before 1971 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq 1972 * and dm-rq. 1973 * 1974 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART 1975 * bit is set, run queue after a delay to avoid IO stalls 1976 * that could otherwise occur if the queue is idle. We'll do 1977 * similar if we couldn't get budget or couldn't lock a zone 1978 * and SCHED_RESTART is set. 1979 */ 1980 needs_restart = blk_mq_sched_needs_restart(hctx); 1981 if (prep == PREP_DISPATCH_NO_BUDGET) 1982 needs_resource = true; 1983 if (!needs_restart || 1984 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) 1985 blk_mq_run_hw_queue(hctx, true); 1986 else if (needs_restart && needs_resource) 1987 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); 1988 1989 blk_mq_update_dispatch_busy(hctx, true); 1990 return false; 1991 } else 1992 blk_mq_update_dispatch_busy(hctx, false); 1993 1994 return (queued + errors) != 0; 1995 } 1996 1997 /** 1998 * __blk_mq_run_hw_queue - Run a hardware queue. 1999 * @hctx: Pointer to the hardware queue to run. 2000 * 2001 * Send pending requests to the hardware. 2002 */ 2003 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) 2004 { 2005 /* 2006 * We can't run the queue inline with ints disabled. Ensure that 2007 * we catch bad users of this early. 2008 */ 2009 WARN_ON_ONCE(in_interrupt()); 2010 2011 blk_mq_run_dispatch_ops(hctx->queue, 2012 blk_mq_sched_dispatch_requests(hctx)); 2013 } 2014 2015 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) 2016 { 2017 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask); 2018 2019 if (cpu >= nr_cpu_ids) 2020 cpu = cpumask_first(hctx->cpumask); 2021 return cpu; 2022 } 2023 2024 /* 2025 * It'd be great if the workqueue API had a way to pass 2026 * in a mask and had some smarts for more clever placement. 2027 * For now we just round-robin here, switching for every 2028 * BLK_MQ_CPU_WORK_BATCH queued items. 2029 */ 2030 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) 2031 { 2032 bool tried = false; 2033 int next_cpu = hctx->next_cpu; 2034 2035 if (hctx->queue->nr_hw_queues == 1) 2036 return WORK_CPU_UNBOUND; 2037 2038 if (--hctx->next_cpu_batch <= 0) { 2039 select_cpu: 2040 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, 2041 cpu_online_mask); 2042 if (next_cpu >= nr_cpu_ids) 2043 next_cpu = blk_mq_first_mapped_cpu(hctx); 2044 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 2045 } 2046 2047 /* 2048 * Do unbound schedule if we can't find a online CPU for this hctx, 2049 * and it should only happen in the path of handling CPU DEAD. 2050 */ 2051 if (!cpu_online(next_cpu)) { 2052 if (!tried) { 2053 tried = true; 2054 goto select_cpu; 2055 } 2056 2057 /* 2058 * Make sure to re-select CPU next time once after CPUs 2059 * in hctx->cpumask become online again. 2060 */ 2061 hctx->next_cpu = next_cpu; 2062 hctx->next_cpu_batch = 1; 2063 return WORK_CPU_UNBOUND; 2064 } 2065 2066 hctx->next_cpu = next_cpu; 2067 return next_cpu; 2068 } 2069 2070 /** 2071 * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue. 2072 * @hctx: Pointer to the hardware queue to run. 2073 * @async: If we want to run the queue asynchronously. 2074 * @msecs: Milliseconds of delay to wait before running the queue. 2075 * 2076 * If !@async, try to run the queue now. Else, run the queue asynchronously and 2077 * with a delay of @msecs. 2078 */ 2079 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, 2080 unsigned long msecs) 2081 { 2082 if (unlikely(blk_mq_hctx_stopped(hctx))) 2083 return; 2084 2085 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { 2086 int cpu = get_cpu(); 2087 if (cpumask_test_cpu(cpu, hctx->cpumask)) { 2088 __blk_mq_run_hw_queue(hctx); 2089 put_cpu(); 2090 return; 2091 } 2092 2093 put_cpu(); 2094 } 2095 2096 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, 2097 msecs_to_jiffies(msecs)); 2098 } 2099 2100 /** 2101 * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously. 2102 * @hctx: Pointer to the hardware queue to run. 2103 * @msecs: Milliseconds of delay to wait before running the queue. 2104 * 2105 * Run a hardware queue asynchronously with a delay of @msecs. 2106 */ 2107 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) 2108 { 2109 __blk_mq_delay_run_hw_queue(hctx, true, msecs); 2110 } 2111 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue); 2112 2113 /** 2114 * blk_mq_run_hw_queue - Start to run a hardware queue. 2115 * @hctx: Pointer to the hardware queue to run. 2116 * @async: If we want to run the queue asynchronously. 2117 * 2118 * Check if the request queue is not in a quiesced state and if there are 2119 * pending requests to be sent. If this is true, run the queue to send requests 2120 * to hardware. 2121 */ 2122 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 2123 { 2124 bool need_run; 2125 2126 /* 2127 * When queue is quiesced, we may be switching io scheduler, or 2128 * updating nr_hw_queues, or other things, and we can't run queue 2129 * any more, even __blk_mq_hctx_has_pending() can't be called safely. 2130 * 2131 * And queue will be rerun in blk_mq_unquiesce_queue() if it is 2132 * quiesced. 2133 */ 2134 __blk_mq_run_dispatch_ops(hctx->queue, false, 2135 need_run = !blk_queue_quiesced(hctx->queue) && 2136 blk_mq_hctx_has_pending(hctx)); 2137 2138 if (need_run) 2139 __blk_mq_delay_run_hw_queue(hctx, async, 0); 2140 } 2141 EXPORT_SYMBOL(blk_mq_run_hw_queue); 2142 2143 /* 2144 * Is the request queue handled by an IO scheduler that does not respect 2145 * hardware queues when dispatching? 2146 */ 2147 static bool blk_mq_has_sqsched(struct request_queue *q) 2148 { 2149 struct elevator_queue *e = q->elevator; 2150 2151 if (e && e->type->ops.dispatch_request && 2152 !(e->type->elevator_features & ELEVATOR_F_MQ_AWARE)) 2153 return true; 2154 return false; 2155 } 2156 2157 /* 2158 * Return prefered queue to dispatch from (if any) for non-mq aware IO 2159 * scheduler. 2160 */ 2161 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q) 2162 { 2163 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); 2164 /* 2165 * If the IO scheduler does not respect hardware queues when 2166 * dispatching, we just don't bother with multiple HW queues and 2167 * dispatch from hctx for the current CPU since running multiple queues 2168 * just causes lock contention inside the scheduler and pointless cache 2169 * bouncing. 2170 */ 2171 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, 0, ctx); 2172 2173 if (!blk_mq_hctx_stopped(hctx)) 2174 return hctx; 2175 return NULL; 2176 } 2177 2178 /** 2179 * blk_mq_run_hw_queues - Run all hardware queues in a request queue. 2180 * @q: Pointer to the request queue to run. 2181 * @async: If we want to run the queue asynchronously. 2182 */ 2183 void blk_mq_run_hw_queues(struct request_queue *q, bool async) 2184 { 2185 struct blk_mq_hw_ctx *hctx, *sq_hctx; 2186 unsigned long i; 2187 2188 sq_hctx = NULL; 2189 if (blk_mq_has_sqsched(q)) 2190 sq_hctx = blk_mq_get_sq_hctx(q); 2191 queue_for_each_hw_ctx(q, hctx, i) { 2192 if (blk_mq_hctx_stopped(hctx)) 2193 continue; 2194 /* 2195 * Dispatch from this hctx either if there's no hctx preferred 2196 * by IO scheduler or if it has requests that bypass the 2197 * scheduler. 2198 */ 2199 if (!sq_hctx || sq_hctx == hctx || 2200 !list_empty_careful(&hctx->dispatch)) 2201 blk_mq_run_hw_queue(hctx, async); 2202 } 2203 } 2204 EXPORT_SYMBOL(blk_mq_run_hw_queues); 2205 2206 /** 2207 * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously. 2208 * @q: Pointer to the request queue to run. 2209 * @msecs: Milliseconds of delay to wait before running the queues. 2210 */ 2211 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs) 2212 { 2213 struct blk_mq_hw_ctx *hctx, *sq_hctx; 2214 unsigned long i; 2215 2216 sq_hctx = NULL; 2217 if (blk_mq_has_sqsched(q)) 2218 sq_hctx = blk_mq_get_sq_hctx(q); 2219 queue_for_each_hw_ctx(q, hctx, i) { 2220 if (blk_mq_hctx_stopped(hctx)) 2221 continue; 2222 /* 2223 * If there is already a run_work pending, leave the 2224 * pending delay untouched. Otherwise, a hctx can stall 2225 * if another hctx is re-delaying the other's work 2226 * before the work executes. 2227 */ 2228 if (delayed_work_pending(&hctx->run_work)) 2229 continue; 2230 /* 2231 * Dispatch from this hctx either if there's no hctx preferred 2232 * by IO scheduler or if it has requests that bypass the 2233 * scheduler. 2234 */ 2235 if (!sq_hctx || sq_hctx == hctx || 2236 !list_empty_careful(&hctx->dispatch)) 2237 blk_mq_delay_run_hw_queue(hctx, msecs); 2238 } 2239 } 2240 EXPORT_SYMBOL(blk_mq_delay_run_hw_queues); 2241 2242 /** 2243 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped 2244 * @q: request queue. 2245 * 2246 * The caller is responsible for serializing this function against 2247 * blk_mq_{start,stop}_hw_queue(). 2248 */ 2249 bool blk_mq_queue_stopped(struct request_queue *q) 2250 { 2251 struct blk_mq_hw_ctx *hctx; 2252 unsigned long i; 2253 2254 queue_for_each_hw_ctx(q, hctx, i) 2255 if (blk_mq_hctx_stopped(hctx)) 2256 return true; 2257 2258 return false; 2259 } 2260 EXPORT_SYMBOL(blk_mq_queue_stopped); 2261 2262 /* 2263 * This function is often used for pausing .queue_rq() by driver when 2264 * there isn't enough resource or some conditions aren't satisfied, and 2265 * BLK_STS_RESOURCE is usually returned. 2266 * 2267 * We do not guarantee that dispatch can be drained or blocked 2268 * after blk_mq_stop_hw_queue() returns. Please use 2269 * blk_mq_quiesce_queue() for that requirement. 2270 */ 2271 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) 2272 { 2273 cancel_delayed_work(&hctx->run_work); 2274 2275 set_bit(BLK_MQ_S_STOPPED, &hctx->state); 2276 } 2277 EXPORT_SYMBOL(blk_mq_stop_hw_queue); 2278 2279 /* 2280 * This function is often used for pausing .queue_rq() by driver when 2281 * there isn't enough resource or some conditions aren't satisfied, and 2282 * BLK_STS_RESOURCE is usually returned. 2283 * 2284 * We do not guarantee that dispatch can be drained or blocked 2285 * after blk_mq_stop_hw_queues() returns. Please use 2286 * blk_mq_quiesce_queue() for that requirement. 2287 */ 2288 void blk_mq_stop_hw_queues(struct request_queue *q) 2289 { 2290 struct blk_mq_hw_ctx *hctx; 2291 unsigned long i; 2292 2293 queue_for_each_hw_ctx(q, hctx, i) 2294 blk_mq_stop_hw_queue(hctx); 2295 } 2296 EXPORT_SYMBOL(blk_mq_stop_hw_queues); 2297 2298 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) 2299 { 2300 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 2301 2302 blk_mq_run_hw_queue(hctx, false); 2303 } 2304 EXPORT_SYMBOL(blk_mq_start_hw_queue); 2305 2306 void blk_mq_start_hw_queues(struct request_queue *q) 2307 { 2308 struct blk_mq_hw_ctx *hctx; 2309 unsigned long i; 2310 2311 queue_for_each_hw_ctx(q, hctx, i) 2312 blk_mq_start_hw_queue(hctx); 2313 } 2314 EXPORT_SYMBOL(blk_mq_start_hw_queues); 2315 2316 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 2317 { 2318 if (!blk_mq_hctx_stopped(hctx)) 2319 return; 2320 2321 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 2322 blk_mq_run_hw_queue(hctx, async); 2323 } 2324 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue); 2325 2326 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) 2327 { 2328 struct blk_mq_hw_ctx *hctx; 2329 unsigned long i; 2330 2331 queue_for_each_hw_ctx(q, hctx, i) 2332 blk_mq_start_stopped_hw_queue(hctx, async); 2333 } 2334 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); 2335 2336 static void blk_mq_run_work_fn(struct work_struct *work) 2337 { 2338 struct blk_mq_hw_ctx *hctx; 2339 2340 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); 2341 2342 /* 2343 * If we are stopped, don't run the queue. 2344 */ 2345 if (blk_mq_hctx_stopped(hctx)) 2346 return; 2347 2348 __blk_mq_run_hw_queue(hctx); 2349 } 2350 2351 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, 2352 struct request *rq, 2353 bool at_head) 2354 { 2355 struct blk_mq_ctx *ctx = rq->mq_ctx; 2356 enum hctx_type type = hctx->type; 2357 2358 lockdep_assert_held(&ctx->lock); 2359 2360 trace_block_rq_insert(rq); 2361 2362 if (at_head) 2363 list_add(&rq->queuelist, &ctx->rq_lists[type]); 2364 else 2365 list_add_tail(&rq->queuelist, &ctx->rq_lists[type]); 2366 } 2367 2368 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, 2369 bool at_head) 2370 { 2371 struct blk_mq_ctx *ctx = rq->mq_ctx; 2372 2373 lockdep_assert_held(&ctx->lock); 2374 2375 __blk_mq_insert_req_list(hctx, rq, at_head); 2376 blk_mq_hctx_mark_pending(hctx, ctx); 2377 } 2378 2379 /** 2380 * blk_mq_request_bypass_insert - Insert a request at dispatch list. 2381 * @rq: Pointer to request to be inserted. 2382 * @at_head: true if the request should be inserted at the head of the list. 2383 * @run_queue: If we should run the hardware queue after inserting the request. 2384 * 2385 * Should only be used carefully, when the caller knows we want to 2386 * bypass a potential IO scheduler on the target device. 2387 */ 2388 void blk_mq_request_bypass_insert(struct request *rq, bool at_head, 2389 bool run_queue) 2390 { 2391 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 2392 2393 spin_lock(&hctx->lock); 2394 if (at_head) 2395 list_add(&rq->queuelist, &hctx->dispatch); 2396 else 2397 list_add_tail(&rq->queuelist, &hctx->dispatch); 2398 spin_unlock(&hctx->lock); 2399 2400 if (run_queue) 2401 blk_mq_run_hw_queue(hctx, false); 2402 } 2403 2404 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 2405 struct list_head *list) 2406 2407 { 2408 struct request *rq; 2409 enum hctx_type type = hctx->type; 2410 2411 /* 2412 * preemption doesn't flush plug list, so it's possible ctx->cpu is 2413 * offline now 2414 */ 2415 list_for_each_entry(rq, list, queuelist) { 2416 BUG_ON(rq->mq_ctx != ctx); 2417 trace_block_rq_insert(rq); 2418 } 2419 2420 spin_lock(&ctx->lock); 2421 list_splice_tail_init(list, &ctx->rq_lists[type]); 2422 blk_mq_hctx_mark_pending(hctx, ctx); 2423 spin_unlock(&ctx->lock); 2424 } 2425 2426 static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued, 2427 bool from_schedule) 2428 { 2429 if (hctx->queue->mq_ops->commit_rqs) { 2430 trace_block_unplug(hctx->queue, *queued, !from_schedule); 2431 hctx->queue->mq_ops->commit_rqs(hctx); 2432 } 2433 *queued = 0; 2434 } 2435 2436 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, 2437 unsigned int nr_segs) 2438 { 2439 int err; 2440 2441 if (bio->bi_opf & REQ_RAHEAD) 2442 rq->cmd_flags |= REQ_FAILFAST_MASK; 2443 2444 rq->__sector = bio->bi_iter.bi_sector; 2445 blk_rq_bio_prep(rq, bio, nr_segs); 2446 2447 /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */ 2448 err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO); 2449 WARN_ON_ONCE(err); 2450 2451 blk_account_io_start(rq); 2452 } 2453 2454 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, 2455 struct request *rq, bool last) 2456 { 2457 struct request_queue *q = rq->q; 2458 struct blk_mq_queue_data bd = { 2459 .rq = rq, 2460 .last = last, 2461 }; 2462 blk_status_t ret; 2463 2464 /* 2465 * For OK queue, we are done. For error, caller may kill it. 2466 * Any other error (busy), just add it to our list as we 2467 * previously would have done. 2468 */ 2469 ret = q->mq_ops->queue_rq(hctx, &bd); 2470 switch (ret) { 2471 case BLK_STS_OK: 2472 blk_mq_update_dispatch_busy(hctx, false); 2473 break; 2474 case BLK_STS_RESOURCE: 2475 case BLK_STS_DEV_RESOURCE: 2476 blk_mq_update_dispatch_busy(hctx, true); 2477 __blk_mq_requeue_request(rq); 2478 break; 2479 default: 2480 blk_mq_update_dispatch_busy(hctx, false); 2481 break; 2482 } 2483 2484 return ret; 2485 } 2486 2487 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 2488 struct request *rq, 2489 bool bypass_insert, bool last) 2490 { 2491 struct request_queue *q = rq->q; 2492 bool run_queue = true; 2493 int budget_token; 2494 2495 /* 2496 * RCU or SRCU read lock is needed before checking quiesced flag. 2497 * 2498 * When queue is stopped or quiesced, ignore 'bypass_insert' from 2499 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller, 2500 * and avoid driver to try to dispatch again. 2501 */ 2502 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { 2503 run_queue = false; 2504 bypass_insert = false; 2505 goto insert; 2506 } 2507 2508 if ((rq->rq_flags & RQF_ELV) && !bypass_insert) 2509 goto insert; 2510 2511 budget_token = blk_mq_get_dispatch_budget(q); 2512 if (budget_token < 0) 2513 goto insert; 2514 2515 blk_mq_set_rq_budget_token(rq, budget_token); 2516 2517 if (!blk_mq_get_driver_tag(rq)) { 2518 blk_mq_put_dispatch_budget(q, budget_token); 2519 goto insert; 2520 } 2521 2522 return __blk_mq_issue_directly(hctx, rq, last); 2523 insert: 2524 if (bypass_insert) 2525 return BLK_STS_RESOURCE; 2526 2527 blk_mq_sched_insert_request(rq, false, run_queue, false); 2528 2529 return BLK_STS_OK; 2530 } 2531 2532 /** 2533 * blk_mq_try_issue_directly - Try to send a request directly to device driver. 2534 * @hctx: Pointer of the associated hardware queue. 2535 * @rq: Pointer to request to be sent. 2536 * 2537 * If the device has enough resources to accept a new request now, send the 2538 * request directly to device driver. Else, insert at hctx->dispatch queue, so 2539 * we can try send it another time in the future. Requests inserted at this 2540 * queue have higher priority. 2541 */ 2542 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 2543 struct request *rq) 2544 { 2545 blk_status_t ret = 2546 __blk_mq_try_issue_directly(hctx, rq, false, true); 2547 2548 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) 2549 blk_mq_request_bypass_insert(rq, false, true); 2550 else if (ret != BLK_STS_OK) 2551 blk_mq_end_request(rq, ret); 2552 } 2553 2554 static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) 2555 { 2556 return __blk_mq_try_issue_directly(rq->mq_hctx, rq, true, last); 2557 } 2558 2559 static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule) 2560 { 2561 struct blk_mq_hw_ctx *hctx = NULL; 2562 struct request *rq; 2563 int queued = 0; 2564 int errors = 0; 2565 2566 while ((rq = rq_list_pop(&plug->mq_list))) { 2567 bool last = rq_list_empty(plug->mq_list); 2568 blk_status_t ret; 2569 2570 if (hctx != rq->mq_hctx) { 2571 if (hctx) 2572 blk_mq_commit_rqs(hctx, &queued, from_schedule); 2573 hctx = rq->mq_hctx; 2574 } 2575 2576 ret = blk_mq_request_issue_directly(rq, last); 2577 switch (ret) { 2578 case BLK_STS_OK: 2579 queued++; 2580 break; 2581 case BLK_STS_RESOURCE: 2582 case BLK_STS_DEV_RESOURCE: 2583 blk_mq_request_bypass_insert(rq, false, last); 2584 blk_mq_commit_rqs(hctx, &queued, from_schedule); 2585 return; 2586 default: 2587 blk_mq_end_request(rq, ret); 2588 errors++; 2589 break; 2590 } 2591 } 2592 2593 /* 2594 * If we didn't flush the entire list, we could have told the driver 2595 * there was more coming, but that turned out to be a lie. 2596 */ 2597 if (errors) 2598 blk_mq_commit_rqs(hctx, &queued, from_schedule); 2599 } 2600 2601 static void __blk_mq_flush_plug_list(struct request_queue *q, 2602 struct blk_plug *plug) 2603 { 2604 if (blk_queue_quiesced(q)) 2605 return; 2606 q->mq_ops->queue_rqs(&plug->mq_list); 2607 } 2608 2609 static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) 2610 { 2611 struct blk_mq_hw_ctx *this_hctx = NULL; 2612 struct blk_mq_ctx *this_ctx = NULL; 2613 struct request *requeue_list = NULL; 2614 unsigned int depth = 0; 2615 LIST_HEAD(list); 2616 2617 do { 2618 struct request *rq = rq_list_pop(&plug->mq_list); 2619 2620 if (!this_hctx) { 2621 this_hctx = rq->mq_hctx; 2622 this_ctx = rq->mq_ctx; 2623 } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) { 2624 rq_list_add(&requeue_list, rq); 2625 continue; 2626 } 2627 list_add_tail(&rq->queuelist, &list); 2628 depth++; 2629 } while (!rq_list_empty(plug->mq_list)); 2630 2631 plug->mq_list = requeue_list; 2632 trace_block_unplug(this_hctx->queue, depth, !from_sched); 2633 blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_sched); 2634 } 2635 2636 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) 2637 { 2638 struct request *rq; 2639 2640 if (rq_list_empty(plug->mq_list)) 2641 return; 2642 plug->rq_count = 0; 2643 2644 if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) { 2645 struct request_queue *q; 2646 2647 rq = rq_list_peek(&plug->mq_list); 2648 q = rq->q; 2649 2650 /* 2651 * Peek first request and see if we have a ->queue_rqs() hook. 2652 * If we do, we can dispatch the whole plug list in one go. We 2653 * already know at this point that all requests belong to the 2654 * same queue, caller must ensure that's the case. 2655 * 2656 * Since we pass off the full list to the driver at this point, 2657 * we do not increment the active request count for the queue. 2658 * Bypass shared tags for now because of that. 2659 */ 2660 if (q->mq_ops->queue_rqs && 2661 !(rq->mq_hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { 2662 blk_mq_run_dispatch_ops(q, 2663 __blk_mq_flush_plug_list(q, plug)); 2664 if (rq_list_empty(plug->mq_list)) 2665 return; 2666 } 2667 2668 blk_mq_run_dispatch_ops(q, 2669 blk_mq_plug_issue_direct(plug, false)); 2670 if (rq_list_empty(plug->mq_list)) 2671 return; 2672 } 2673 2674 do { 2675 blk_mq_dispatch_plug_list(plug, from_schedule); 2676 } while (!rq_list_empty(plug->mq_list)); 2677 } 2678 2679 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 2680 struct list_head *list) 2681 { 2682 int queued = 0; 2683 int errors = 0; 2684 2685 while (!list_empty(list)) { 2686 blk_status_t ret; 2687 struct request *rq = list_first_entry(list, struct request, 2688 queuelist); 2689 2690 list_del_init(&rq->queuelist); 2691 ret = blk_mq_request_issue_directly(rq, list_empty(list)); 2692 if (ret != BLK_STS_OK) { 2693 if (ret == BLK_STS_RESOURCE || 2694 ret == BLK_STS_DEV_RESOURCE) { 2695 blk_mq_request_bypass_insert(rq, false, 2696 list_empty(list)); 2697 break; 2698 } 2699 blk_mq_end_request(rq, ret); 2700 errors++; 2701 } else 2702 queued++; 2703 } 2704 2705 /* 2706 * If we didn't flush the entire list, we could have told 2707 * the driver there was more coming, but that turned out to 2708 * be a lie. 2709 */ 2710 if ((!list_empty(list) || errors) && 2711 hctx->queue->mq_ops->commit_rqs && queued) 2712 hctx->queue->mq_ops->commit_rqs(hctx); 2713 } 2714 2715 static bool blk_mq_attempt_bio_merge(struct request_queue *q, 2716 struct bio *bio, unsigned int nr_segs) 2717 { 2718 if (!blk_queue_nomerges(q) && bio_mergeable(bio)) { 2719 if (blk_attempt_plug_merge(q, bio, nr_segs)) 2720 return true; 2721 if (blk_mq_sched_bio_merge(q, bio, nr_segs)) 2722 return true; 2723 } 2724 return false; 2725 } 2726 2727 static struct request *blk_mq_get_new_requests(struct request_queue *q, 2728 struct blk_plug *plug, 2729 struct bio *bio, 2730 unsigned int nsegs) 2731 { 2732 struct blk_mq_alloc_data data = { 2733 .q = q, 2734 .nr_tags = 1, 2735 .cmd_flags = bio->bi_opf, 2736 }; 2737 struct request *rq; 2738 2739 if (unlikely(bio_queue_enter(bio))) 2740 return NULL; 2741 2742 if (blk_mq_attempt_bio_merge(q, bio, nsegs)) 2743 goto queue_exit; 2744 2745 rq_qos_throttle(q, bio); 2746 2747 if (plug) { 2748 data.nr_tags = plug->nr_ios; 2749 plug->nr_ios = 1; 2750 data.cached_rq = &plug->cached_rq; 2751 } 2752 2753 rq = __blk_mq_alloc_requests(&data); 2754 if (rq) 2755 return rq; 2756 rq_qos_cleanup(q, bio); 2757 if (bio->bi_opf & REQ_NOWAIT) 2758 bio_wouldblock_error(bio); 2759 queue_exit: 2760 blk_queue_exit(q); 2761 return NULL; 2762 } 2763 2764 static inline struct request *blk_mq_get_cached_request(struct request_queue *q, 2765 struct blk_plug *plug, struct bio **bio, unsigned int nsegs) 2766 { 2767 struct request *rq; 2768 2769 if (!plug) 2770 return NULL; 2771 rq = rq_list_peek(&plug->cached_rq); 2772 if (!rq || rq->q != q) 2773 return NULL; 2774 2775 if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) { 2776 *bio = NULL; 2777 return NULL; 2778 } 2779 2780 rq_qos_throttle(q, *bio); 2781 2782 if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type) 2783 return NULL; 2784 if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf)) 2785 return NULL; 2786 2787 rq->cmd_flags = (*bio)->bi_opf; 2788 plug->cached_rq = rq_list_next(rq); 2789 INIT_LIST_HEAD(&rq->queuelist); 2790 return rq; 2791 } 2792 2793 /** 2794 * blk_mq_submit_bio - Create and send a request to block device. 2795 * @bio: Bio pointer. 2796 * 2797 * Builds up a request structure from @q and @bio and send to the device. The 2798 * request may not be queued directly to hardware if: 2799 * * This request can be merged with another one 2800 * * We want to place request at plug queue for possible future merging 2801 * * There is an IO scheduler active at this queue 2802 * 2803 * It will not queue the request if there is an error with the bio, or at the 2804 * request creation. 2805 */ 2806 void blk_mq_submit_bio(struct bio *bio) 2807 { 2808 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 2809 struct blk_plug *plug = blk_mq_plug(q, bio); 2810 const int is_sync = op_is_sync(bio->bi_opf); 2811 struct request *rq; 2812 unsigned int nr_segs = 1; 2813 blk_status_t ret; 2814 2815 blk_queue_bounce(q, &bio); 2816 if (blk_may_split(q, bio)) 2817 __blk_queue_split(q, &bio, &nr_segs); 2818 2819 if (!bio_integrity_prep(bio)) 2820 return; 2821 2822 rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs); 2823 if (!rq) { 2824 if (!bio) 2825 return; 2826 rq = blk_mq_get_new_requests(q, plug, bio, nr_segs); 2827 if (unlikely(!rq)) 2828 return; 2829 } 2830 2831 trace_block_getrq(bio); 2832 2833 rq_qos_track(q, rq, bio); 2834 2835 blk_mq_bio_to_request(rq, bio, nr_segs); 2836 2837 ret = blk_crypto_init_request(rq); 2838 if (ret != BLK_STS_OK) { 2839 bio->bi_status = ret; 2840 bio_endio(bio); 2841 blk_mq_free_request(rq); 2842 return; 2843 } 2844 2845 if (op_is_flush(bio->bi_opf)) { 2846 blk_insert_flush(rq); 2847 return; 2848 } 2849 2850 if (plug) 2851 blk_add_rq_to_plug(plug, rq); 2852 else if ((rq->rq_flags & RQF_ELV) || 2853 (rq->mq_hctx->dispatch_busy && 2854 (q->nr_hw_queues == 1 || !is_sync))) 2855 blk_mq_sched_insert_request(rq, false, true, true); 2856 else 2857 blk_mq_run_dispatch_ops(rq->q, 2858 blk_mq_try_issue_directly(rq->mq_hctx, rq)); 2859 } 2860 2861 #ifdef CONFIG_BLK_MQ_STACKING 2862 /** 2863 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 2864 * @rq: the request being queued 2865 */ 2866 blk_status_t blk_insert_cloned_request(struct request *rq) 2867 { 2868 struct request_queue *q = rq->q; 2869 unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq)); 2870 blk_status_t ret; 2871 2872 if (blk_rq_sectors(rq) > max_sectors) { 2873 /* 2874 * SCSI device does not have a good way to return if 2875 * Write Same/Zero is actually supported. If a device rejects 2876 * a non-read/write command (discard, write same,etc.) the 2877 * low-level device driver will set the relevant queue limit to 2878 * 0 to prevent blk-lib from issuing more of the offending 2879 * operations. Commands queued prior to the queue limit being 2880 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O 2881 * errors being propagated to upper layers. 2882 */ 2883 if (max_sectors == 0) 2884 return BLK_STS_NOTSUPP; 2885 2886 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n", 2887 __func__, blk_rq_sectors(rq), max_sectors); 2888 return BLK_STS_IOERR; 2889 } 2890 2891 /* 2892 * The queue settings related to segment counting may differ from the 2893 * original queue. 2894 */ 2895 rq->nr_phys_segments = blk_recalc_rq_segments(rq); 2896 if (rq->nr_phys_segments > queue_max_segments(q)) { 2897 printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n", 2898 __func__, rq->nr_phys_segments, queue_max_segments(q)); 2899 return BLK_STS_IOERR; 2900 } 2901 2902 if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq))) 2903 return BLK_STS_IOERR; 2904 2905 if (blk_crypto_insert_cloned_request(rq)) 2906 return BLK_STS_IOERR; 2907 2908 blk_account_io_start(rq); 2909 2910 /* 2911 * Since we have a scheduler attached on the top device, 2912 * bypass a potential scheduler on the bottom device for 2913 * insert. 2914 */ 2915 blk_mq_run_dispatch_ops(q, 2916 ret = blk_mq_request_issue_directly(rq, true)); 2917 if (ret) 2918 blk_account_io_done(rq, ktime_get_ns()); 2919 return ret; 2920 } 2921 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 2922 2923 /** 2924 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 2925 * @rq: the clone request to be cleaned up 2926 * 2927 * Description: 2928 * Free all bios in @rq for a cloned request. 2929 */ 2930 void blk_rq_unprep_clone(struct request *rq) 2931 { 2932 struct bio *bio; 2933 2934 while ((bio = rq->bio) != NULL) { 2935 rq->bio = bio->bi_next; 2936 2937 bio_put(bio); 2938 } 2939 } 2940 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 2941 2942 /** 2943 * blk_rq_prep_clone - Helper function to setup clone request 2944 * @rq: the request to be setup 2945 * @rq_src: original request to be cloned 2946 * @bs: bio_set that bios for clone are allocated from 2947 * @gfp_mask: memory allocation mask for bio 2948 * @bio_ctr: setup function to be called for each clone bio. 2949 * Returns %0 for success, non %0 for failure. 2950 * @data: private data to be passed to @bio_ctr 2951 * 2952 * Description: 2953 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 2954 * Also, pages which the original bios are pointing to are not copied 2955 * and the cloned bios just point same pages. 2956 * So cloned bios must be completed before original bios, which means 2957 * the caller must complete @rq before @rq_src. 2958 */ 2959 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 2960 struct bio_set *bs, gfp_t gfp_mask, 2961 int (*bio_ctr)(struct bio *, struct bio *, void *), 2962 void *data) 2963 { 2964 struct bio *bio, *bio_src; 2965 2966 if (!bs) 2967 bs = &fs_bio_set; 2968 2969 __rq_for_each_bio(bio_src, rq_src) { 2970 bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask, 2971 bs); 2972 if (!bio) 2973 goto free_and_out; 2974 2975 if (bio_ctr && bio_ctr(bio, bio_src, data)) 2976 goto free_and_out; 2977 2978 if (rq->bio) { 2979 rq->biotail->bi_next = bio; 2980 rq->biotail = bio; 2981 } else { 2982 rq->bio = rq->biotail = bio; 2983 } 2984 bio = NULL; 2985 } 2986 2987 /* Copy attributes of the original request to the clone request. */ 2988 rq->__sector = blk_rq_pos(rq_src); 2989 rq->__data_len = blk_rq_bytes(rq_src); 2990 if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) { 2991 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; 2992 rq->special_vec = rq_src->special_vec; 2993 } 2994 rq->nr_phys_segments = rq_src->nr_phys_segments; 2995 rq->ioprio = rq_src->ioprio; 2996 2997 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0) 2998 goto free_and_out; 2999 3000 return 0; 3001 3002 free_and_out: 3003 if (bio) 3004 bio_put(bio); 3005 blk_rq_unprep_clone(rq); 3006 3007 return -ENOMEM; 3008 } 3009 EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 3010 #endif /* CONFIG_BLK_MQ_STACKING */ 3011 3012 /* 3013 * Steal bios from a request and add them to a bio list. 3014 * The request must not have been partially completed before. 3015 */ 3016 void blk_steal_bios(struct bio_list *list, struct request *rq) 3017 { 3018 if (rq->bio) { 3019 if (list->tail) 3020 list->tail->bi_next = rq->bio; 3021 else 3022 list->head = rq->bio; 3023 list->tail = rq->biotail; 3024 3025 rq->bio = NULL; 3026 rq->biotail = NULL; 3027 } 3028 3029 rq->__data_len = 0; 3030 } 3031 EXPORT_SYMBOL_GPL(blk_steal_bios); 3032 3033 static size_t order_to_size(unsigned int order) 3034 { 3035 return (size_t)PAGE_SIZE << order; 3036 } 3037 3038 /* called before freeing request pool in @tags */ 3039 static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags, 3040 struct blk_mq_tags *tags) 3041 { 3042 struct page *page; 3043 unsigned long flags; 3044 3045 /* There is no need to clear a driver tags own mapping */ 3046 if (drv_tags == tags) 3047 return; 3048 3049 list_for_each_entry(page, &tags->page_list, lru) { 3050 unsigned long start = (unsigned long)page_address(page); 3051 unsigned long end = start + order_to_size(page->private); 3052 int i; 3053 3054 for (i = 0; i < drv_tags->nr_tags; i++) { 3055 struct request *rq = drv_tags->rqs[i]; 3056 unsigned long rq_addr = (unsigned long)rq; 3057 3058 if (rq_addr >= start && rq_addr < end) { 3059 WARN_ON_ONCE(req_ref_read(rq) != 0); 3060 cmpxchg(&drv_tags->rqs[i], rq, NULL); 3061 } 3062 } 3063 } 3064 3065 /* 3066 * Wait until all pending iteration is done. 3067 * 3068 * Request reference is cleared and it is guaranteed to be observed 3069 * after the ->lock is released. 3070 */ 3071 spin_lock_irqsave(&drv_tags->lock, flags); 3072 spin_unlock_irqrestore(&drv_tags->lock, flags); 3073 } 3074 3075 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 3076 unsigned int hctx_idx) 3077 { 3078 struct blk_mq_tags *drv_tags; 3079 struct page *page; 3080 3081 if (list_empty(&tags->page_list)) 3082 return; 3083 3084 if (blk_mq_is_shared_tags(set->flags)) 3085 drv_tags = set->shared_tags; 3086 else 3087 drv_tags = set->tags[hctx_idx]; 3088 3089 if (tags->static_rqs && set->ops->exit_request) { 3090 int i; 3091 3092 for (i = 0; i < tags->nr_tags; i++) { 3093 struct request *rq = tags->static_rqs[i]; 3094 3095 if (!rq) 3096 continue; 3097 set->ops->exit_request(set, rq, hctx_idx); 3098 tags->static_rqs[i] = NULL; 3099 } 3100 } 3101 3102 blk_mq_clear_rq_mapping(drv_tags, tags); 3103 3104 while (!list_empty(&tags->page_list)) { 3105 page = list_first_entry(&tags->page_list, struct page, lru); 3106 list_del_init(&page->lru); 3107 /* 3108 * Remove kmemleak object previously allocated in 3109 * blk_mq_alloc_rqs(). 3110 */ 3111 kmemleak_free(page_address(page)); 3112 __free_pages(page, page->private); 3113 } 3114 } 3115 3116 void blk_mq_free_rq_map(struct blk_mq_tags *tags) 3117 { 3118 kfree(tags->rqs); 3119 tags->rqs = NULL; 3120 kfree(tags->static_rqs); 3121 tags->static_rqs = NULL; 3122 3123 blk_mq_free_tags(tags); 3124 } 3125 3126 static enum hctx_type hctx_idx_to_type(struct blk_mq_tag_set *set, 3127 unsigned int hctx_idx) 3128 { 3129 int i; 3130 3131 for (i = 0; i < set->nr_maps; i++) { 3132 unsigned int start = set->map[i].queue_offset; 3133 unsigned int end = start + set->map[i].nr_queues; 3134 3135 if (hctx_idx >= start && hctx_idx < end) 3136 break; 3137 } 3138 3139 if (i >= set->nr_maps) 3140 i = HCTX_TYPE_DEFAULT; 3141 3142 return i; 3143 } 3144 3145 static int blk_mq_get_hctx_node(struct blk_mq_tag_set *set, 3146 unsigned int hctx_idx) 3147 { 3148 enum hctx_type type = hctx_idx_to_type(set, hctx_idx); 3149 3150 return blk_mq_hw_queue_to_node(&set->map[type], hctx_idx); 3151 } 3152 3153 static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, 3154 unsigned int hctx_idx, 3155 unsigned int nr_tags, 3156 unsigned int reserved_tags) 3157 { 3158 int node = blk_mq_get_hctx_node(set, hctx_idx); 3159 struct blk_mq_tags *tags; 3160 3161 if (node == NUMA_NO_NODE) 3162 node = set->numa_node; 3163 3164 tags = blk_mq_init_tags(nr_tags, reserved_tags, node, 3165 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); 3166 if (!tags) 3167 return NULL; 3168 3169 tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *), 3170 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 3171 node); 3172 if (!tags->rqs) { 3173 blk_mq_free_tags(tags); 3174 return NULL; 3175 } 3176 3177 tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *), 3178 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 3179 node); 3180 if (!tags->static_rqs) { 3181 kfree(tags->rqs); 3182 blk_mq_free_tags(tags); 3183 return NULL; 3184 } 3185 3186 return tags; 3187 } 3188 3189 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, 3190 unsigned int hctx_idx, int node) 3191 { 3192 int ret; 3193 3194 if (set->ops->init_request) { 3195 ret = set->ops->init_request(set, rq, hctx_idx, node); 3196 if (ret) 3197 return ret; 3198 } 3199 3200 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 3201 return 0; 3202 } 3203 3204 static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, 3205 struct blk_mq_tags *tags, 3206 unsigned int hctx_idx, unsigned int depth) 3207 { 3208 unsigned int i, j, entries_per_page, max_order = 4; 3209 int node = blk_mq_get_hctx_node(set, hctx_idx); 3210 size_t rq_size, left; 3211 3212 if (node == NUMA_NO_NODE) 3213 node = set->numa_node; 3214 3215 INIT_LIST_HEAD(&tags->page_list); 3216 3217 /* 3218 * rq_size is the size of the request plus driver payload, rounded 3219 * to the cacheline size 3220 */ 3221 rq_size = round_up(sizeof(struct request) + set->cmd_size, 3222 cache_line_size()); 3223 left = rq_size * depth; 3224 3225 for (i = 0; i < depth; ) { 3226 int this_order = max_order; 3227 struct page *page; 3228 int to_do; 3229 void *p; 3230 3231 while (this_order && left < order_to_size(this_order - 1)) 3232 this_order--; 3233 3234 do { 3235 page = alloc_pages_node(node, 3236 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, 3237 this_order); 3238 if (page) 3239 break; 3240 if (!this_order--) 3241 break; 3242 if (order_to_size(this_order) < rq_size) 3243 break; 3244 } while (1); 3245 3246 if (!page) 3247 goto fail; 3248 3249 page->private = this_order; 3250 list_add_tail(&page->lru, &tags->page_list); 3251 3252 p = page_address(page); 3253 /* 3254 * Allow kmemleak to scan these pages as they contain pointers 3255 * to additional allocations like via ops->init_request(). 3256 */ 3257 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO); 3258 entries_per_page = order_to_size(this_order) / rq_size; 3259 to_do = min(entries_per_page, depth - i); 3260 left -= to_do * rq_size; 3261 for (j = 0; j < to_do; j++) { 3262 struct request *rq = p; 3263 3264 tags->static_rqs[i] = rq; 3265 if (blk_mq_init_request(set, rq, hctx_idx, node)) { 3266 tags->static_rqs[i] = NULL; 3267 goto fail; 3268 } 3269 3270 p += rq_size; 3271 i++; 3272 } 3273 } 3274 return 0; 3275 3276 fail: 3277 blk_mq_free_rqs(set, tags, hctx_idx); 3278 return -ENOMEM; 3279 } 3280 3281 struct rq_iter_data { 3282 struct blk_mq_hw_ctx *hctx; 3283 bool has_rq; 3284 }; 3285 3286 static bool blk_mq_has_request(struct request *rq, void *data, bool reserved) 3287 { 3288 struct rq_iter_data *iter_data = data; 3289 3290 if (rq->mq_hctx != iter_data->hctx) 3291 return true; 3292 iter_data->has_rq = true; 3293 return false; 3294 } 3295 3296 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx) 3297 { 3298 struct blk_mq_tags *tags = hctx->sched_tags ? 3299 hctx->sched_tags : hctx->tags; 3300 struct rq_iter_data data = { 3301 .hctx = hctx, 3302 }; 3303 3304 blk_mq_all_tag_iter(tags, blk_mq_has_request, &data); 3305 return data.has_rq; 3306 } 3307 3308 static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu, 3309 struct blk_mq_hw_ctx *hctx) 3310 { 3311 if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu) 3312 return false; 3313 if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids) 3314 return false; 3315 return true; 3316 } 3317 3318 static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node) 3319 { 3320 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, 3321 struct blk_mq_hw_ctx, cpuhp_online); 3322 3323 if (!cpumask_test_cpu(cpu, hctx->cpumask) || 3324 !blk_mq_last_cpu_in_hctx(cpu, hctx)) 3325 return 0; 3326 3327 /* 3328 * Prevent new request from being allocated on the current hctx. 3329 * 3330 * The smp_mb__after_atomic() Pairs with the implied barrier in 3331 * test_and_set_bit_lock in sbitmap_get(). Ensures the inactive flag is 3332 * seen once we return from the tag allocator. 3333 */ 3334 set_bit(BLK_MQ_S_INACTIVE, &hctx->state); 3335 smp_mb__after_atomic(); 3336 3337 /* 3338 * Try to grab a reference to the queue and wait for any outstanding 3339 * requests. If we could not grab a reference the queue has been 3340 * frozen and there are no requests. 3341 */ 3342 if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) { 3343 while (blk_mq_hctx_has_requests(hctx)) 3344 msleep(5); 3345 percpu_ref_put(&hctx->queue->q_usage_counter); 3346 } 3347 3348 return 0; 3349 } 3350 3351 static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node) 3352 { 3353 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, 3354 struct blk_mq_hw_ctx, cpuhp_online); 3355 3356 if (cpumask_test_cpu(cpu, hctx->cpumask)) 3357 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state); 3358 return 0; 3359 } 3360 3361 /* 3362 * 'cpu' is going away. splice any existing rq_list entries from this 3363 * software queue to the hw queue dispatch list, and ensure that it 3364 * gets run. 3365 */ 3366 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node) 3367 { 3368 struct blk_mq_hw_ctx *hctx; 3369 struct blk_mq_ctx *ctx; 3370 LIST_HEAD(tmp); 3371 enum hctx_type type; 3372 3373 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead); 3374 if (!cpumask_test_cpu(cpu, hctx->cpumask)) 3375 return 0; 3376 3377 ctx = __blk_mq_get_ctx(hctx->queue, cpu); 3378 type = hctx->type; 3379 3380 spin_lock(&ctx->lock); 3381 if (!list_empty(&ctx->rq_lists[type])) { 3382 list_splice_init(&ctx->rq_lists[type], &tmp); 3383 blk_mq_hctx_clear_pending(hctx, ctx); 3384 } 3385 spin_unlock(&ctx->lock); 3386 3387 if (list_empty(&tmp)) 3388 return 0; 3389 3390 spin_lock(&hctx->lock); 3391 list_splice_tail_init(&tmp, &hctx->dispatch); 3392 spin_unlock(&hctx->lock); 3393 3394 blk_mq_run_hw_queue(hctx, true); 3395 return 0; 3396 } 3397 3398 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) 3399 { 3400 if (!(hctx->flags & BLK_MQ_F_STACKING)) 3401 cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, 3402 &hctx->cpuhp_online); 3403 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD, 3404 &hctx->cpuhp_dead); 3405 } 3406 3407 /* 3408 * Before freeing hw queue, clearing the flush request reference in 3409 * tags->rqs[] for avoiding potential UAF. 3410 */ 3411 static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags, 3412 unsigned int queue_depth, struct request *flush_rq) 3413 { 3414 int i; 3415 unsigned long flags; 3416 3417 /* The hw queue may not be mapped yet */ 3418 if (!tags) 3419 return; 3420 3421 WARN_ON_ONCE(req_ref_read(flush_rq) != 0); 3422 3423 for (i = 0; i < queue_depth; i++) 3424 cmpxchg(&tags->rqs[i], flush_rq, NULL); 3425 3426 /* 3427 * Wait until all pending iteration is done. 3428 * 3429 * Request reference is cleared and it is guaranteed to be observed 3430 * after the ->lock is released. 3431 */ 3432 spin_lock_irqsave(&tags->lock, flags); 3433 spin_unlock_irqrestore(&tags->lock, flags); 3434 } 3435 3436 /* hctx->ctxs will be freed in queue's release handler */ 3437 static void blk_mq_exit_hctx(struct request_queue *q, 3438 struct blk_mq_tag_set *set, 3439 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 3440 { 3441 struct request *flush_rq = hctx->fq->flush_rq; 3442 3443 if (blk_mq_hw_queue_mapped(hctx)) 3444 blk_mq_tag_idle(hctx); 3445 3446 blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx], 3447 set->queue_depth, flush_rq); 3448 if (set->ops->exit_request) 3449 set->ops->exit_request(set, flush_rq, hctx_idx); 3450 3451 if (set->ops->exit_hctx) 3452 set->ops->exit_hctx(hctx, hctx_idx); 3453 3454 blk_mq_remove_cpuhp(hctx); 3455 3456 xa_erase(&q->hctx_table, hctx_idx); 3457 3458 spin_lock(&q->unused_hctx_lock); 3459 list_add(&hctx->hctx_list, &q->unused_hctx_list); 3460 spin_unlock(&q->unused_hctx_lock); 3461 } 3462 3463 static void blk_mq_exit_hw_queues(struct request_queue *q, 3464 struct blk_mq_tag_set *set, int nr_queue) 3465 { 3466 struct blk_mq_hw_ctx *hctx; 3467 unsigned long i; 3468 3469 queue_for_each_hw_ctx(q, hctx, i) { 3470 if (i == nr_queue) 3471 break; 3472 blk_mq_exit_hctx(q, set, hctx, i); 3473 } 3474 } 3475 3476 static int blk_mq_init_hctx(struct request_queue *q, 3477 struct blk_mq_tag_set *set, 3478 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) 3479 { 3480 hctx->queue_num = hctx_idx; 3481 3482 if (!(hctx->flags & BLK_MQ_F_STACKING)) 3483 cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, 3484 &hctx->cpuhp_online); 3485 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); 3486 3487 hctx->tags = set->tags[hctx_idx]; 3488 3489 if (set->ops->init_hctx && 3490 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) 3491 goto unregister_cpu_notifier; 3492 3493 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, 3494 hctx->numa_node)) 3495 goto exit_hctx; 3496 3497 if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL)) 3498 goto exit_flush_rq; 3499 3500 return 0; 3501 3502 exit_flush_rq: 3503 if (set->ops->exit_request) 3504 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); 3505 exit_hctx: 3506 if (set->ops->exit_hctx) 3507 set->ops->exit_hctx(hctx, hctx_idx); 3508 unregister_cpu_notifier: 3509 blk_mq_remove_cpuhp(hctx); 3510 return -1; 3511 } 3512 3513 static struct blk_mq_hw_ctx * 3514 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, 3515 int node) 3516 { 3517 struct blk_mq_hw_ctx *hctx; 3518 gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY; 3519 3520 hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node); 3521 if (!hctx) 3522 goto fail_alloc_hctx; 3523 3524 if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node)) 3525 goto free_hctx; 3526 3527 atomic_set(&hctx->nr_active, 0); 3528 if (node == NUMA_NO_NODE) 3529 node = set->numa_node; 3530 hctx->numa_node = node; 3531 3532 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); 3533 spin_lock_init(&hctx->lock); 3534 INIT_LIST_HEAD(&hctx->dispatch); 3535 hctx->queue = q; 3536 hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED; 3537 3538 INIT_LIST_HEAD(&hctx->hctx_list); 3539 3540 /* 3541 * Allocate space for all possible cpus to avoid allocation at 3542 * runtime 3543 */ 3544 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), 3545 gfp, node); 3546 if (!hctx->ctxs) 3547 goto free_cpumask; 3548 3549 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), 3550 gfp, node, false, false)) 3551 goto free_ctxs; 3552 hctx->nr_ctx = 0; 3553 3554 spin_lock_init(&hctx->dispatch_wait_lock); 3555 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); 3556 INIT_LIST_HEAD(&hctx->dispatch_wait.entry); 3557 3558 hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp); 3559 if (!hctx->fq) 3560 goto free_bitmap; 3561 3562 blk_mq_hctx_kobj_init(hctx); 3563 3564 return hctx; 3565 3566 free_bitmap: 3567 sbitmap_free(&hctx->ctx_map); 3568 free_ctxs: 3569 kfree(hctx->ctxs); 3570 free_cpumask: 3571 free_cpumask_var(hctx->cpumask); 3572 free_hctx: 3573 kfree(hctx); 3574 fail_alloc_hctx: 3575 return NULL; 3576 } 3577 3578 static void blk_mq_init_cpu_queues(struct request_queue *q, 3579 unsigned int nr_hw_queues) 3580 { 3581 struct blk_mq_tag_set *set = q->tag_set; 3582 unsigned int i, j; 3583 3584 for_each_possible_cpu(i) { 3585 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); 3586 struct blk_mq_hw_ctx *hctx; 3587 int k; 3588 3589 __ctx->cpu = i; 3590 spin_lock_init(&__ctx->lock); 3591 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++) 3592 INIT_LIST_HEAD(&__ctx->rq_lists[k]); 3593 3594 __ctx->queue = q; 3595 3596 /* 3597 * Set local node, IFF we have more than one hw queue. If 3598 * not, we remain on the home node of the device 3599 */ 3600 for (j = 0; j < set->nr_maps; j++) { 3601 hctx = blk_mq_map_queue_type(q, j, i); 3602 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) 3603 hctx->numa_node = cpu_to_node(i); 3604 } 3605 } 3606 } 3607 3608 struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, 3609 unsigned int hctx_idx, 3610 unsigned int depth) 3611 { 3612 struct blk_mq_tags *tags; 3613 int ret; 3614 3615 tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags); 3616 if (!tags) 3617 return NULL; 3618 3619 ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth); 3620 if (ret) { 3621 blk_mq_free_rq_map(tags); 3622 return NULL; 3623 } 3624 3625 return tags; 3626 } 3627 3628 static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, 3629 int hctx_idx) 3630 { 3631 if (blk_mq_is_shared_tags(set->flags)) { 3632 set->tags[hctx_idx] = set->shared_tags; 3633 3634 return true; 3635 } 3636 3637 set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx, 3638 set->queue_depth); 3639 3640 return set->tags[hctx_idx]; 3641 } 3642 3643 void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, 3644 struct blk_mq_tags *tags, 3645 unsigned int hctx_idx) 3646 { 3647 if (tags) { 3648 blk_mq_free_rqs(set, tags, hctx_idx); 3649 blk_mq_free_rq_map(tags); 3650 } 3651 } 3652 3653 static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, 3654 unsigned int hctx_idx) 3655 { 3656 if (!blk_mq_is_shared_tags(set->flags)) 3657 blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx); 3658 3659 set->tags[hctx_idx] = NULL; 3660 } 3661 3662 static void blk_mq_map_swqueue(struct request_queue *q) 3663 { 3664 unsigned int j, hctx_idx; 3665 unsigned long i; 3666 struct blk_mq_hw_ctx *hctx; 3667 struct blk_mq_ctx *ctx; 3668 struct blk_mq_tag_set *set = q->tag_set; 3669 3670 queue_for_each_hw_ctx(q, hctx, i) { 3671 cpumask_clear(hctx->cpumask); 3672 hctx->nr_ctx = 0; 3673 hctx->dispatch_from = NULL; 3674 } 3675 3676 /* 3677 * Map software to hardware queues. 3678 * 3679 * If the cpu isn't present, the cpu is mapped to first hctx. 3680 */ 3681 for_each_possible_cpu(i) { 3682 3683 ctx = per_cpu_ptr(q->queue_ctx, i); 3684 for (j = 0; j < set->nr_maps; j++) { 3685 if (!set->map[j].nr_queues) { 3686 ctx->hctxs[j] = blk_mq_map_queue_type(q, 3687 HCTX_TYPE_DEFAULT, i); 3688 continue; 3689 } 3690 hctx_idx = set->map[j].mq_map[i]; 3691 /* unmapped hw queue can be remapped after CPU topo changed */ 3692 if (!set->tags[hctx_idx] && 3693 !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) { 3694 /* 3695 * If tags initialization fail for some hctx, 3696 * that hctx won't be brought online. In this 3697 * case, remap the current ctx to hctx[0] which 3698 * is guaranteed to always have tags allocated 3699 */ 3700 set->map[j].mq_map[i] = 0; 3701 } 3702 3703 hctx = blk_mq_map_queue_type(q, j, i); 3704 ctx->hctxs[j] = hctx; 3705 /* 3706 * If the CPU is already set in the mask, then we've 3707 * mapped this one already. This can happen if 3708 * devices share queues across queue maps. 3709 */ 3710 if (cpumask_test_cpu(i, hctx->cpumask)) 3711 continue; 3712 3713 cpumask_set_cpu(i, hctx->cpumask); 3714 hctx->type = j; 3715 ctx->index_hw[hctx->type] = hctx->nr_ctx; 3716 hctx->ctxs[hctx->nr_ctx++] = ctx; 3717 3718 /* 3719 * If the nr_ctx type overflows, we have exceeded the 3720 * amount of sw queues we can support. 3721 */ 3722 BUG_ON(!hctx->nr_ctx); 3723 } 3724 3725 for (; j < HCTX_MAX_TYPES; j++) 3726 ctx->hctxs[j] = blk_mq_map_queue_type(q, 3727 HCTX_TYPE_DEFAULT, i); 3728 } 3729 3730 queue_for_each_hw_ctx(q, hctx, i) { 3731 /* 3732 * If no software queues are mapped to this hardware queue, 3733 * disable it and free the request entries. 3734 */ 3735 if (!hctx->nr_ctx) { 3736 /* Never unmap queue 0. We need it as a 3737 * fallback in case of a new remap fails 3738 * allocation 3739 */ 3740 if (i) 3741 __blk_mq_free_map_and_rqs(set, i); 3742 3743 hctx->tags = NULL; 3744 continue; 3745 } 3746 3747 hctx->tags = set->tags[i]; 3748 WARN_ON(!hctx->tags); 3749 3750 /* 3751 * Set the map size to the number of mapped software queues. 3752 * This is more accurate and more efficient than looping 3753 * over all possibly mapped software queues. 3754 */ 3755 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx); 3756 3757 /* 3758 * Initialize batch roundrobin counts 3759 */ 3760 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); 3761 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 3762 } 3763 } 3764 3765 /* 3766 * Caller needs to ensure that we're either frozen/quiesced, or that 3767 * the queue isn't live yet. 3768 */ 3769 static void queue_set_hctx_shared(struct request_queue *q, bool shared) 3770 { 3771 struct blk_mq_hw_ctx *hctx; 3772 unsigned long i; 3773 3774 queue_for_each_hw_ctx(q, hctx, i) { 3775 if (shared) { 3776 hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; 3777 } else { 3778 blk_mq_tag_idle(hctx); 3779 hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; 3780 } 3781 } 3782 } 3783 3784 static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set, 3785 bool shared) 3786 { 3787 struct request_queue *q; 3788 3789 lockdep_assert_held(&set->tag_list_lock); 3790 3791 list_for_each_entry(q, &set->tag_list, tag_set_list) { 3792 blk_mq_freeze_queue(q); 3793 queue_set_hctx_shared(q, shared); 3794 blk_mq_unfreeze_queue(q); 3795 } 3796 } 3797 3798 static void blk_mq_del_queue_tag_set(struct request_queue *q) 3799 { 3800 struct blk_mq_tag_set *set = q->tag_set; 3801 3802 mutex_lock(&set->tag_list_lock); 3803 list_del(&q->tag_set_list); 3804 if (list_is_singular(&set->tag_list)) { 3805 /* just transitioned to unshared */ 3806 set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; 3807 /* update existing queue */ 3808 blk_mq_update_tag_set_shared(set, false); 3809 } 3810 mutex_unlock(&set->tag_list_lock); 3811 INIT_LIST_HEAD(&q->tag_set_list); 3812 } 3813 3814 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, 3815 struct request_queue *q) 3816 { 3817 mutex_lock(&set->tag_list_lock); 3818 3819 /* 3820 * Check to see if we're transitioning to shared (from 1 to 2 queues). 3821 */ 3822 if (!list_empty(&set->tag_list) && 3823 !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { 3824 set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; 3825 /* update existing queue */ 3826 blk_mq_update_tag_set_shared(set, true); 3827 } 3828 if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED) 3829 queue_set_hctx_shared(q, true); 3830 list_add_tail(&q->tag_set_list, &set->tag_list); 3831 3832 mutex_unlock(&set->tag_list_lock); 3833 } 3834 3835 /* All allocations will be freed in release handler of q->mq_kobj */ 3836 static int blk_mq_alloc_ctxs(struct request_queue *q) 3837 { 3838 struct blk_mq_ctxs *ctxs; 3839 int cpu; 3840 3841 ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL); 3842 if (!ctxs) 3843 return -ENOMEM; 3844 3845 ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx); 3846 if (!ctxs->queue_ctx) 3847 goto fail; 3848 3849 for_each_possible_cpu(cpu) { 3850 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu); 3851 ctx->ctxs = ctxs; 3852 } 3853 3854 q->mq_kobj = &ctxs->kobj; 3855 q->queue_ctx = ctxs->queue_ctx; 3856 3857 return 0; 3858 fail: 3859 kfree(ctxs); 3860 return -ENOMEM; 3861 } 3862 3863 /* 3864 * It is the actual release handler for mq, but we do it from 3865 * request queue's release handler for avoiding use-after-free 3866 * and headache because q->mq_kobj shouldn't have been introduced, 3867 * but we can't group ctx/kctx kobj without it. 3868 */ 3869 void blk_mq_release(struct request_queue *q) 3870 { 3871 struct blk_mq_hw_ctx *hctx, *next; 3872 unsigned long i; 3873 3874 queue_for_each_hw_ctx(q, hctx, i) 3875 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list)); 3876 3877 /* all hctx are in .unused_hctx_list now */ 3878 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) { 3879 list_del_init(&hctx->hctx_list); 3880 kobject_put(&hctx->kobj); 3881 } 3882 3883 xa_destroy(&q->hctx_table); 3884 3885 /* 3886 * release .mq_kobj and sw queue's kobject now because 3887 * both share lifetime with request queue. 3888 */ 3889 blk_mq_sysfs_deinit(q); 3890 } 3891 3892 static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set, 3893 void *queuedata) 3894 { 3895 struct request_queue *q; 3896 int ret; 3897 3898 q = blk_alloc_queue(set->numa_node, set->flags & BLK_MQ_F_BLOCKING); 3899 if (!q) 3900 return ERR_PTR(-ENOMEM); 3901 q->queuedata = queuedata; 3902 ret = blk_mq_init_allocated_queue(set, q); 3903 if (ret) { 3904 blk_cleanup_queue(q); 3905 return ERR_PTR(ret); 3906 } 3907 return q; 3908 } 3909 3910 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) 3911 { 3912 return blk_mq_init_queue_data(set, NULL); 3913 } 3914 EXPORT_SYMBOL(blk_mq_init_queue); 3915 3916 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata, 3917 struct lock_class_key *lkclass) 3918 { 3919 struct request_queue *q; 3920 struct gendisk *disk; 3921 3922 q = blk_mq_init_queue_data(set, queuedata); 3923 if (IS_ERR(q)) 3924 return ERR_CAST(q); 3925 3926 disk = __alloc_disk_node(q, set->numa_node, lkclass); 3927 if (!disk) { 3928 blk_cleanup_queue(q); 3929 return ERR_PTR(-ENOMEM); 3930 } 3931 return disk; 3932 } 3933 EXPORT_SYMBOL(__blk_mq_alloc_disk); 3934 3935 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx( 3936 struct blk_mq_tag_set *set, struct request_queue *q, 3937 int hctx_idx, int node) 3938 { 3939 struct blk_mq_hw_ctx *hctx = NULL, *tmp; 3940 3941 /* reuse dead hctx first */ 3942 spin_lock(&q->unused_hctx_lock); 3943 list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) { 3944 if (tmp->numa_node == node) { 3945 hctx = tmp; 3946 break; 3947 } 3948 } 3949 if (hctx) 3950 list_del_init(&hctx->hctx_list); 3951 spin_unlock(&q->unused_hctx_lock); 3952 3953 if (!hctx) 3954 hctx = blk_mq_alloc_hctx(q, set, node); 3955 if (!hctx) 3956 goto fail; 3957 3958 if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) 3959 goto free_hctx; 3960 3961 return hctx; 3962 3963 free_hctx: 3964 kobject_put(&hctx->kobj); 3965 fail: 3966 return NULL; 3967 } 3968 3969 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, 3970 struct request_queue *q) 3971 { 3972 struct blk_mq_hw_ctx *hctx; 3973 unsigned long i, j; 3974 3975 /* protect against switching io scheduler */ 3976 mutex_lock(&q->sysfs_lock); 3977 for (i = 0; i < set->nr_hw_queues; i++) { 3978 int old_node; 3979 int node = blk_mq_get_hctx_node(set, i); 3980 struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i); 3981 3982 if (old_hctx) { 3983 old_node = old_hctx->numa_node; 3984 blk_mq_exit_hctx(q, set, old_hctx, i); 3985 } 3986 3987 if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) { 3988 if (!old_hctx) 3989 break; 3990 pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n", 3991 node, old_node); 3992 hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node); 3993 WARN_ON_ONCE(!hctx); 3994 } 3995 } 3996 /* 3997 * Increasing nr_hw_queues fails. Free the newly allocated 3998 * hctxs and keep the previous q->nr_hw_queues. 3999 */ 4000 if (i != set->nr_hw_queues) { 4001 j = q->nr_hw_queues; 4002 } else { 4003 j = i; 4004 q->nr_hw_queues = set->nr_hw_queues; 4005 } 4006 4007 xa_for_each_start(&q->hctx_table, j, hctx, j) 4008 blk_mq_exit_hctx(q, set, hctx, j); 4009 mutex_unlock(&q->sysfs_lock); 4010 } 4011 4012 static void blk_mq_update_poll_flag(struct request_queue *q) 4013 { 4014 struct blk_mq_tag_set *set = q->tag_set; 4015 4016 if (set->nr_maps > HCTX_TYPE_POLL && 4017 set->map[HCTX_TYPE_POLL].nr_queues) 4018 blk_queue_flag_set(QUEUE_FLAG_POLL, q); 4019 else 4020 blk_queue_flag_clear(QUEUE_FLAG_POLL, q); 4021 } 4022 4023 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 4024 struct request_queue *q) 4025 { 4026 WARN_ON_ONCE(blk_queue_has_srcu(q) != 4027 !!(set->flags & BLK_MQ_F_BLOCKING)); 4028 4029 /* mark the queue as mq asap */ 4030 q->mq_ops = set->ops; 4031 4032 q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn, 4033 blk_mq_poll_stats_bkt, 4034 BLK_MQ_POLL_STATS_BKTS, q); 4035 if (!q->poll_cb) 4036 goto err_exit; 4037 4038 if (blk_mq_alloc_ctxs(q)) 4039 goto err_poll; 4040 4041 /* init q->mq_kobj and sw queues' kobjects */ 4042 blk_mq_sysfs_init(q); 4043 4044 INIT_LIST_HEAD(&q->unused_hctx_list); 4045 spin_lock_init(&q->unused_hctx_lock); 4046 4047 xa_init(&q->hctx_table); 4048 4049 blk_mq_realloc_hw_ctxs(set, q); 4050 if (!q->nr_hw_queues) 4051 goto err_hctxs; 4052 4053 INIT_WORK(&q->timeout_work, blk_mq_timeout_work); 4054 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); 4055 4056 q->tag_set = set; 4057 4058 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; 4059 blk_mq_update_poll_flag(q); 4060 4061 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); 4062 INIT_LIST_HEAD(&q->requeue_list); 4063 spin_lock_init(&q->requeue_lock); 4064 4065 q->nr_requests = set->queue_depth; 4066 4067 /* 4068 * Default to classic polling 4069 */ 4070 q->poll_nsec = BLK_MQ_POLL_CLASSIC; 4071 4072 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 4073 blk_mq_add_queue_tag_set(set, q); 4074 blk_mq_map_swqueue(q); 4075 return 0; 4076 4077 err_hctxs: 4078 xa_destroy(&q->hctx_table); 4079 q->nr_hw_queues = 0; 4080 blk_mq_sysfs_deinit(q); 4081 err_poll: 4082 blk_stat_free_callback(q->poll_cb); 4083 q->poll_cb = NULL; 4084 err_exit: 4085 q->mq_ops = NULL; 4086 return -ENOMEM; 4087 } 4088 EXPORT_SYMBOL(blk_mq_init_allocated_queue); 4089 4090 /* tags can _not_ be used after returning from blk_mq_exit_queue */ 4091 void blk_mq_exit_queue(struct request_queue *q) 4092 { 4093 struct blk_mq_tag_set *set = q->tag_set; 4094 4095 /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */ 4096 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); 4097 /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */ 4098 blk_mq_del_queue_tag_set(q); 4099 } 4100 4101 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 4102 { 4103 int i; 4104 4105 if (blk_mq_is_shared_tags(set->flags)) { 4106 set->shared_tags = blk_mq_alloc_map_and_rqs(set, 4107 BLK_MQ_NO_HCTX_IDX, 4108 set->queue_depth); 4109 if (!set->shared_tags) 4110 return -ENOMEM; 4111 } 4112 4113 for (i = 0; i < set->nr_hw_queues; i++) { 4114 if (!__blk_mq_alloc_map_and_rqs(set, i)) 4115 goto out_unwind; 4116 cond_resched(); 4117 } 4118 4119 return 0; 4120 4121 out_unwind: 4122 while (--i >= 0) 4123 __blk_mq_free_map_and_rqs(set, i); 4124 4125 if (blk_mq_is_shared_tags(set->flags)) { 4126 blk_mq_free_map_and_rqs(set, set->shared_tags, 4127 BLK_MQ_NO_HCTX_IDX); 4128 } 4129 4130 return -ENOMEM; 4131 } 4132 4133 /* 4134 * Allocate the request maps associated with this tag_set. Note that this 4135 * may reduce the depth asked for, if memory is tight. set->queue_depth 4136 * will be updated to reflect the allocated depth. 4137 */ 4138 static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set) 4139 { 4140 unsigned int depth; 4141 int err; 4142 4143 depth = set->queue_depth; 4144 do { 4145 err = __blk_mq_alloc_rq_maps(set); 4146 if (!err) 4147 break; 4148 4149 set->queue_depth >>= 1; 4150 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { 4151 err = -ENOMEM; 4152 break; 4153 } 4154 } while (set->queue_depth); 4155 4156 if (!set->queue_depth || err) { 4157 pr_err("blk-mq: failed to allocate request map\n"); 4158 return -ENOMEM; 4159 } 4160 4161 if (depth != set->queue_depth) 4162 pr_info("blk-mq: reduced tag depth (%u -> %u)\n", 4163 depth, set->queue_depth); 4164 4165 return 0; 4166 } 4167 4168 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) 4169 { 4170 /* 4171 * blk_mq_map_queues() and multiple .map_queues() implementations 4172 * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the 4173 * number of hardware queues. 4174 */ 4175 if (set->nr_maps == 1) 4176 set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues; 4177 4178 if (set->ops->map_queues && !is_kdump_kernel()) { 4179 int i; 4180 4181 /* 4182 * transport .map_queues is usually done in the following 4183 * way: 4184 * 4185 * for (queue = 0; queue < set->nr_hw_queues; queue++) { 4186 * mask = get_cpu_mask(queue) 4187 * for_each_cpu(cpu, mask) 4188 * set->map[x].mq_map[cpu] = queue; 4189 * } 4190 * 4191 * When we need to remap, the table has to be cleared for 4192 * killing stale mapping since one CPU may not be mapped 4193 * to any hw queue. 4194 */ 4195 for (i = 0; i < set->nr_maps; i++) 4196 blk_mq_clear_mq_map(&set->map[i]); 4197 4198 return set->ops->map_queues(set); 4199 } else { 4200 BUG_ON(set->nr_maps > 1); 4201 return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); 4202 } 4203 } 4204 4205 static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set, 4206 int cur_nr_hw_queues, int new_nr_hw_queues) 4207 { 4208 struct blk_mq_tags **new_tags; 4209 4210 if (cur_nr_hw_queues >= new_nr_hw_queues) 4211 return 0; 4212 4213 new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *), 4214 GFP_KERNEL, set->numa_node); 4215 if (!new_tags) 4216 return -ENOMEM; 4217 4218 if (set->tags) 4219 memcpy(new_tags, set->tags, cur_nr_hw_queues * 4220 sizeof(*set->tags)); 4221 kfree(set->tags); 4222 set->tags = new_tags; 4223 set->nr_hw_queues = new_nr_hw_queues; 4224 4225 return 0; 4226 } 4227 4228 static int blk_mq_alloc_tag_set_tags(struct blk_mq_tag_set *set, 4229 int new_nr_hw_queues) 4230 { 4231 return blk_mq_realloc_tag_set_tags(set, 0, new_nr_hw_queues); 4232 } 4233 4234 /* 4235 * Alloc a tag set to be associated with one or more request queues. 4236 * May fail with EINVAL for various error conditions. May adjust the 4237 * requested depth down, if it's too large. In that case, the set 4238 * value will be stored in set->queue_depth. 4239 */ 4240 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) 4241 { 4242 int i, ret; 4243 4244 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS); 4245 4246 if (!set->nr_hw_queues) 4247 return -EINVAL; 4248 if (!set->queue_depth) 4249 return -EINVAL; 4250 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) 4251 return -EINVAL; 4252 4253 if (!set->ops->queue_rq) 4254 return -EINVAL; 4255 4256 if (!set->ops->get_budget ^ !set->ops->put_budget) 4257 return -EINVAL; 4258 4259 if (set->queue_depth > BLK_MQ_MAX_DEPTH) { 4260 pr_info("blk-mq: reduced tag depth to %u\n", 4261 BLK_MQ_MAX_DEPTH); 4262 set->queue_depth = BLK_MQ_MAX_DEPTH; 4263 } 4264 4265 if (!set->nr_maps) 4266 set->nr_maps = 1; 4267 else if (set->nr_maps > HCTX_MAX_TYPES) 4268 return -EINVAL; 4269 4270 /* 4271 * If a crashdump is active, then we are potentially in a very 4272 * memory constrained environment. Limit us to 1 queue and 4273 * 64 tags to prevent using too much memory. 4274 */ 4275 if (is_kdump_kernel()) { 4276 set->nr_hw_queues = 1; 4277 set->nr_maps = 1; 4278 set->queue_depth = min(64U, set->queue_depth); 4279 } 4280 /* 4281 * There is no use for more h/w queues than cpus if we just have 4282 * a single map 4283 */ 4284 if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids) 4285 set->nr_hw_queues = nr_cpu_ids; 4286 4287 if (blk_mq_alloc_tag_set_tags(set, set->nr_hw_queues) < 0) 4288 return -ENOMEM; 4289 4290 ret = -ENOMEM; 4291 for (i = 0; i < set->nr_maps; i++) { 4292 set->map[i].mq_map = kcalloc_node(nr_cpu_ids, 4293 sizeof(set->map[i].mq_map[0]), 4294 GFP_KERNEL, set->numa_node); 4295 if (!set->map[i].mq_map) 4296 goto out_free_mq_map; 4297 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues; 4298 } 4299 4300 ret = blk_mq_update_queue_map(set); 4301 if (ret) 4302 goto out_free_mq_map; 4303 4304 ret = blk_mq_alloc_set_map_and_rqs(set); 4305 if (ret) 4306 goto out_free_mq_map; 4307 4308 mutex_init(&set->tag_list_lock); 4309 INIT_LIST_HEAD(&set->tag_list); 4310 4311 return 0; 4312 4313 out_free_mq_map: 4314 for (i = 0; i < set->nr_maps; i++) { 4315 kfree(set->map[i].mq_map); 4316 set->map[i].mq_map = NULL; 4317 } 4318 kfree(set->tags); 4319 set->tags = NULL; 4320 return ret; 4321 } 4322 EXPORT_SYMBOL(blk_mq_alloc_tag_set); 4323 4324 /* allocate and initialize a tagset for a simple single-queue device */ 4325 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set, 4326 const struct blk_mq_ops *ops, unsigned int queue_depth, 4327 unsigned int set_flags) 4328 { 4329 memset(set, 0, sizeof(*set)); 4330 set->ops = ops; 4331 set->nr_hw_queues = 1; 4332 set->nr_maps = 1; 4333 set->queue_depth = queue_depth; 4334 set->numa_node = NUMA_NO_NODE; 4335 set->flags = set_flags; 4336 return blk_mq_alloc_tag_set(set); 4337 } 4338 EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set); 4339 4340 void blk_mq_free_tag_set(struct blk_mq_tag_set *set) 4341 { 4342 int i, j; 4343 4344 for (i = 0; i < set->nr_hw_queues; i++) 4345 __blk_mq_free_map_and_rqs(set, i); 4346 4347 if (blk_mq_is_shared_tags(set->flags)) { 4348 blk_mq_free_map_and_rqs(set, set->shared_tags, 4349 BLK_MQ_NO_HCTX_IDX); 4350 } 4351 4352 for (j = 0; j < set->nr_maps; j++) { 4353 kfree(set->map[j].mq_map); 4354 set->map[j].mq_map = NULL; 4355 } 4356 4357 kfree(set->tags); 4358 set->tags = NULL; 4359 } 4360 EXPORT_SYMBOL(blk_mq_free_tag_set); 4361 4362 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) 4363 { 4364 struct blk_mq_tag_set *set = q->tag_set; 4365 struct blk_mq_hw_ctx *hctx; 4366 int ret; 4367 unsigned long i; 4368 4369 if (!set) 4370 return -EINVAL; 4371 4372 if (q->nr_requests == nr) 4373 return 0; 4374 4375 blk_mq_freeze_queue(q); 4376 blk_mq_quiesce_queue(q); 4377 4378 ret = 0; 4379 queue_for_each_hw_ctx(q, hctx, i) { 4380 if (!hctx->tags) 4381 continue; 4382 /* 4383 * If we're using an MQ scheduler, just update the scheduler 4384 * queue depth. This is similar to what the old code would do. 4385 */ 4386 if (hctx->sched_tags) { 4387 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, 4388 nr, true); 4389 } else { 4390 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, 4391 false); 4392 } 4393 if (ret) 4394 break; 4395 if (q->elevator && q->elevator->type->ops.depth_updated) 4396 q->elevator->type->ops.depth_updated(hctx); 4397 } 4398 if (!ret) { 4399 q->nr_requests = nr; 4400 if (blk_mq_is_shared_tags(set->flags)) { 4401 if (q->elevator) 4402 blk_mq_tag_update_sched_shared_tags(q); 4403 else 4404 blk_mq_tag_resize_shared_tags(set, nr); 4405 } 4406 } 4407 4408 blk_mq_unquiesce_queue(q); 4409 blk_mq_unfreeze_queue(q); 4410 4411 return ret; 4412 } 4413 4414 /* 4415 * request_queue and elevator_type pair. 4416 * It is just used by __blk_mq_update_nr_hw_queues to cache 4417 * the elevator_type associated with a request_queue. 4418 */ 4419 struct blk_mq_qe_pair { 4420 struct list_head node; 4421 struct request_queue *q; 4422 struct elevator_type *type; 4423 }; 4424 4425 /* 4426 * Cache the elevator_type in qe pair list and switch the 4427 * io scheduler to 'none' 4428 */ 4429 static bool blk_mq_elv_switch_none(struct list_head *head, 4430 struct request_queue *q) 4431 { 4432 struct blk_mq_qe_pair *qe; 4433 4434 if (!q->elevator) 4435 return true; 4436 4437 qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY); 4438 if (!qe) 4439 return false; 4440 4441 INIT_LIST_HEAD(&qe->node); 4442 qe->q = q; 4443 qe->type = q->elevator->type; 4444 list_add(&qe->node, head); 4445 4446 mutex_lock(&q->sysfs_lock); 4447 /* 4448 * After elevator_switch_mq, the previous elevator_queue will be 4449 * released by elevator_release. The reference of the io scheduler 4450 * module get by elevator_get will also be put. So we need to get 4451 * a reference of the io scheduler module here to prevent it to be 4452 * removed. 4453 */ 4454 __module_get(qe->type->elevator_owner); 4455 elevator_switch_mq(q, NULL); 4456 mutex_unlock(&q->sysfs_lock); 4457 4458 return true; 4459 } 4460 4461 static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head, 4462 struct request_queue *q) 4463 { 4464 struct blk_mq_qe_pair *qe; 4465 4466 list_for_each_entry(qe, head, node) 4467 if (qe->q == q) 4468 return qe; 4469 4470 return NULL; 4471 } 4472 4473 static void blk_mq_elv_switch_back(struct list_head *head, 4474 struct request_queue *q) 4475 { 4476 struct blk_mq_qe_pair *qe; 4477 struct elevator_type *t; 4478 4479 qe = blk_lookup_qe_pair(head, q); 4480 if (!qe) 4481 return; 4482 t = qe->type; 4483 list_del(&qe->node); 4484 kfree(qe); 4485 4486 mutex_lock(&q->sysfs_lock); 4487 elevator_switch_mq(q, t); 4488 mutex_unlock(&q->sysfs_lock); 4489 } 4490 4491 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, 4492 int nr_hw_queues) 4493 { 4494 struct request_queue *q; 4495 LIST_HEAD(head); 4496 int prev_nr_hw_queues; 4497 4498 lockdep_assert_held(&set->tag_list_lock); 4499 4500 if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids) 4501 nr_hw_queues = nr_cpu_ids; 4502 if (nr_hw_queues < 1) 4503 return; 4504 if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues) 4505 return; 4506 4507 list_for_each_entry(q, &set->tag_list, tag_set_list) 4508 blk_mq_freeze_queue(q); 4509 /* 4510 * Switch IO scheduler to 'none', cleaning up the data associated 4511 * with the previous scheduler. We will switch back once we are done 4512 * updating the new sw to hw queue mappings. 4513 */ 4514 list_for_each_entry(q, &set->tag_list, tag_set_list) 4515 if (!blk_mq_elv_switch_none(&head, q)) 4516 goto switch_back; 4517 4518 list_for_each_entry(q, &set->tag_list, tag_set_list) { 4519 blk_mq_debugfs_unregister_hctxs(q); 4520 blk_mq_sysfs_unregister(q); 4521 } 4522 4523 prev_nr_hw_queues = set->nr_hw_queues; 4524 if (blk_mq_realloc_tag_set_tags(set, set->nr_hw_queues, nr_hw_queues) < 4525 0) 4526 goto reregister; 4527 4528 set->nr_hw_queues = nr_hw_queues; 4529 fallback: 4530 blk_mq_update_queue_map(set); 4531 list_for_each_entry(q, &set->tag_list, tag_set_list) { 4532 blk_mq_realloc_hw_ctxs(set, q); 4533 blk_mq_update_poll_flag(q); 4534 if (q->nr_hw_queues != set->nr_hw_queues) { 4535 int i = prev_nr_hw_queues; 4536 4537 pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n", 4538 nr_hw_queues, prev_nr_hw_queues); 4539 for (; i < set->nr_hw_queues; i++) 4540 __blk_mq_free_map_and_rqs(set, i); 4541 4542 set->nr_hw_queues = prev_nr_hw_queues; 4543 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); 4544 goto fallback; 4545 } 4546 blk_mq_map_swqueue(q); 4547 } 4548 4549 reregister: 4550 list_for_each_entry(q, &set->tag_list, tag_set_list) { 4551 blk_mq_sysfs_register(q); 4552 blk_mq_debugfs_register_hctxs(q); 4553 } 4554 4555 switch_back: 4556 list_for_each_entry(q, &set->tag_list, tag_set_list) 4557 blk_mq_elv_switch_back(&head, q); 4558 4559 list_for_each_entry(q, &set->tag_list, tag_set_list) 4560 blk_mq_unfreeze_queue(q); 4561 } 4562 4563 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) 4564 { 4565 mutex_lock(&set->tag_list_lock); 4566 __blk_mq_update_nr_hw_queues(set, nr_hw_queues); 4567 mutex_unlock(&set->tag_list_lock); 4568 } 4569 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); 4570 4571 /* Enable polling stats and return whether they were already enabled. */ 4572 static bool blk_poll_stats_enable(struct request_queue *q) 4573 { 4574 if (q->poll_stat) 4575 return true; 4576 4577 return blk_stats_alloc_enable(q); 4578 } 4579 4580 static void blk_mq_poll_stats_start(struct request_queue *q) 4581 { 4582 /* 4583 * We don't arm the callback if polling stats are not enabled or the 4584 * callback is already active. 4585 */ 4586 if (!q->poll_stat || blk_stat_is_active(q->poll_cb)) 4587 return; 4588 4589 blk_stat_activate_msecs(q->poll_cb, 100); 4590 } 4591 4592 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb) 4593 { 4594 struct request_queue *q = cb->data; 4595 int bucket; 4596 4597 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) { 4598 if (cb->stat[bucket].nr_samples) 4599 q->poll_stat[bucket] = cb->stat[bucket]; 4600 } 4601 } 4602 4603 static unsigned long blk_mq_poll_nsecs(struct request_queue *q, 4604 struct request *rq) 4605 { 4606 unsigned long ret = 0; 4607 int bucket; 4608 4609 /* 4610 * If stats collection isn't on, don't sleep but turn it on for 4611 * future users 4612 */ 4613 if (!blk_poll_stats_enable(q)) 4614 return 0; 4615 4616 /* 4617 * As an optimistic guess, use half of the mean service time 4618 * for this type of request. We can (and should) make this smarter. 4619 * For instance, if the completion latencies are tight, we can 4620 * get closer than just half the mean. This is especially 4621 * important on devices where the completion latencies are longer 4622 * than ~10 usec. We do use the stats for the relevant IO size 4623 * if available which does lead to better estimates. 4624 */ 4625 bucket = blk_mq_poll_stats_bkt(rq); 4626 if (bucket < 0) 4627 return ret; 4628 4629 if (q->poll_stat[bucket].nr_samples) 4630 ret = (q->poll_stat[bucket].mean + 1) / 2; 4631 4632 return ret; 4633 } 4634 4635 static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc) 4636 { 4637 struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, qc); 4638 struct request *rq = blk_qc_to_rq(hctx, qc); 4639 struct hrtimer_sleeper hs; 4640 enum hrtimer_mode mode; 4641 unsigned int nsecs; 4642 ktime_t kt; 4643 4644 /* 4645 * If a request has completed on queue that uses an I/O scheduler, we 4646 * won't get back a request from blk_qc_to_rq. 4647 */ 4648 if (!rq || (rq->rq_flags & RQF_MQ_POLL_SLEPT)) 4649 return false; 4650 4651 /* 4652 * If we get here, hybrid polling is enabled. Hence poll_nsec can be: 4653 * 4654 * 0: use half of prev avg 4655 * >0: use this specific value 4656 */ 4657 if (q->poll_nsec > 0) 4658 nsecs = q->poll_nsec; 4659 else 4660 nsecs = blk_mq_poll_nsecs(q, rq); 4661 4662 if (!nsecs) 4663 return false; 4664 4665 rq->rq_flags |= RQF_MQ_POLL_SLEPT; 4666 4667 /* 4668 * This will be replaced with the stats tracking code, using 4669 * 'avg_completion_time / 2' as the pre-sleep target. 4670 */ 4671 kt = nsecs; 4672 4673 mode = HRTIMER_MODE_REL; 4674 hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode); 4675 hrtimer_set_expires(&hs.timer, kt); 4676 4677 do { 4678 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE) 4679 break; 4680 set_current_state(TASK_UNINTERRUPTIBLE); 4681 hrtimer_sleeper_start_expires(&hs, mode); 4682 if (hs.task) 4683 io_schedule(); 4684 hrtimer_cancel(&hs.timer); 4685 mode = HRTIMER_MODE_ABS; 4686 } while (hs.task && !signal_pending(current)); 4687 4688 __set_current_state(TASK_RUNNING); 4689 destroy_hrtimer_on_stack(&hs.timer); 4690 4691 /* 4692 * If we sleep, have the caller restart the poll loop to reset the 4693 * state. Like for the other success return cases, the caller is 4694 * responsible for checking if the IO completed. If the IO isn't 4695 * complete, we'll get called again and will go straight to the busy 4696 * poll loop. 4697 */ 4698 return true; 4699 } 4700 4701 static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, 4702 struct io_comp_batch *iob, unsigned int flags) 4703 { 4704 struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie); 4705 long state = get_current_state(); 4706 int ret; 4707 4708 do { 4709 ret = q->mq_ops->poll(hctx, iob); 4710 if (ret > 0) { 4711 __set_current_state(TASK_RUNNING); 4712 return ret; 4713 } 4714 4715 if (signal_pending_state(state, current)) 4716 __set_current_state(TASK_RUNNING); 4717 if (task_is_running(current)) 4718 return 1; 4719 4720 if (ret < 0 || (flags & BLK_POLL_ONESHOT)) 4721 break; 4722 cpu_relax(); 4723 } while (!need_resched()); 4724 4725 __set_current_state(TASK_RUNNING); 4726 return 0; 4727 } 4728 4729 int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob, 4730 unsigned int flags) 4731 { 4732 if (!(flags & BLK_POLL_NOSLEEP) && 4733 q->poll_nsec != BLK_MQ_POLL_CLASSIC) { 4734 if (blk_mq_poll_hybrid(q, cookie)) 4735 return 1; 4736 } 4737 return blk_mq_poll_classic(q, cookie, iob, flags); 4738 } 4739 4740 unsigned int blk_mq_rq_cpu(struct request *rq) 4741 { 4742 return rq->mq_ctx->cpu; 4743 } 4744 EXPORT_SYMBOL(blk_mq_rq_cpu); 4745 4746 void blk_mq_cancel_work_sync(struct request_queue *q) 4747 { 4748 if (queue_is_mq(q)) { 4749 struct blk_mq_hw_ctx *hctx; 4750 unsigned long i; 4751 4752 cancel_delayed_work_sync(&q->requeue_work); 4753 4754 queue_for_each_hw_ctx(q, hctx, i) 4755 cancel_delayed_work_sync(&hctx->run_work); 4756 } 4757 } 4758 4759 static int __init blk_mq_init(void) 4760 { 4761 int i; 4762 4763 for_each_possible_cpu(i) 4764 init_llist_head(&per_cpu(blk_cpu_done, i)); 4765 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); 4766 4767 cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, 4768 "block/softirq:dead", NULL, 4769 blk_softirq_cpu_dead); 4770 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, 4771 blk_mq_hctx_notify_dead); 4772 cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online", 4773 blk_mq_hctx_notify_online, 4774 blk_mq_hctx_notify_offline); 4775 return 0; 4776 } 4777 subsys_initcall(blk_mq_init); 4778