1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Block multiqueue core code 4 * 5 * Copyright (C) 2013-2014 Jens Axboe 6 * Copyright (C) 2013-2014 Christoph Hellwig 7 */ 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/backing-dev.h> 11 #include <linux/bio.h> 12 #include <linux/blkdev.h> 13 #include <linux/blk-integrity.h> 14 #include <linux/kmemleak.h> 15 #include <linux/mm.h> 16 #include <linux/init.h> 17 #include <linux/slab.h> 18 #include <linux/workqueue.h> 19 #include <linux/smp.h> 20 #include <linux/interrupt.h> 21 #include <linux/llist.h> 22 #include <linux/cpu.h> 23 #include <linux/cache.h> 24 #include <linux/sched/topology.h> 25 #include <linux/sched/signal.h> 26 #include <linux/delay.h> 27 #include <linux/crash_dump.h> 28 #include <linux/prefetch.h> 29 #include <linux/blk-crypto.h> 30 #include <linux/part_stat.h> 31 #include <linux/sched/isolation.h> 32 33 #include <trace/events/block.h> 34 35 #include <linux/t10-pi.h> 36 #include "blk.h" 37 #include "blk-mq.h" 38 #include "blk-mq-debugfs.h" 39 #include "blk-pm.h" 40 #include "blk-stat.h" 41 #include "blk-mq-sched.h" 42 #include "blk-rq-qos.h" 43 44 static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); 45 static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd); 46 static DEFINE_MUTEX(blk_mq_cpuhp_lock); 47 48 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags); 49 static void blk_mq_request_bypass_insert(struct request *rq, 50 blk_insert_t flags); 51 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 52 struct list_head *list); 53 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx, 54 struct io_comp_batch *iob, unsigned int flags); 55 56 /* 57 * Check if any of the ctx, dispatch list or elevator 58 * have pending work in this hardware queue. 59 */ 60 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) 61 { 62 return !list_empty_careful(&hctx->dispatch) || 63 sbitmap_any_bit_set(&hctx->ctx_map) || 64 blk_mq_sched_has_work(hctx); 65 } 66 67 /* 68 * Mark this ctx as having pending work in this hardware queue 69 */ 70 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, 71 struct blk_mq_ctx *ctx) 72 { 73 const int bit = ctx->index_hw[hctx->type]; 74 75 if (!sbitmap_test_bit(&hctx->ctx_map, bit)) 76 sbitmap_set_bit(&hctx->ctx_map, bit); 77 } 78 79 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, 80 struct blk_mq_ctx *ctx) 81 { 82 const int bit = ctx->index_hw[hctx->type]; 83 84 sbitmap_clear_bit(&hctx->ctx_map, bit); 85 } 86 87 struct mq_inflight { 88 struct block_device *part; 89 unsigned int inflight[2]; 90 }; 91 92 static bool blk_mq_check_inflight(struct request *rq, void *priv) 93 { 94 struct mq_inflight *mi = priv; 95 96 if (rq->rq_flags & RQF_IO_STAT && 97 (!bdev_is_partition(mi->part) || rq->part == mi->part) && 98 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) 99 mi->inflight[rq_data_dir(rq)]++; 100 101 return true; 102 } 103 104 unsigned int blk_mq_in_flight(struct request_queue *q, 105 struct block_device *part) 106 { 107 struct mq_inflight mi = { .part = part }; 108 109 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 110 111 return mi.inflight[0] + mi.inflight[1]; 112 } 113 114 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, 115 unsigned int inflight[2]) 116 { 117 struct mq_inflight mi = { .part = part }; 118 119 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 120 inflight[0] = mi.inflight[0]; 121 inflight[1] = mi.inflight[1]; 122 } 123 124 #ifdef CONFIG_LOCKDEP 125 static bool blk_freeze_set_owner(struct request_queue *q, 126 struct task_struct *owner) 127 { 128 if (!owner) 129 return false; 130 131 if (!q->mq_freeze_depth) { 132 q->mq_freeze_owner = owner; 133 q->mq_freeze_owner_depth = 1; 134 q->mq_freeze_disk_dead = !q->disk || 135 test_bit(GD_DEAD, &q->disk->state) || 136 !blk_queue_registered(q); 137 q->mq_freeze_queue_dying = blk_queue_dying(q); 138 return true; 139 } 140 141 if (owner == q->mq_freeze_owner) 142 q->mq_freeze_owner_depth += 1; 143 return false; 144 } 145 146 /* verify the last unfreeze in owner context */ 147 static bool blk_unfreeze_check_owner(struct request_queue *q) 148 { 149 if (q->mq_freeze_owner != current) 150 return false; 151 if (--q->mq_freeze_owner_depth == 0) { 152 q->mq_freeze_owner = NULL; 153 return true; 154 } 155 return false; 156 } 157 158 #else 159 160 static bool blk_freeze_set_owner(struct request_queue *q, 161 struct task_struct *owner) 162 { 163 return false; 164 } 165 166 static bool blk_unfreeze_check_owner(struct request_queue *q) 167 { 168 return false; 169 } 170 #endif 171 172 bool __blk_freeze_queue_start(struct request_queue *q, 173 struct task_struct *owner) 174 { 175 bool freeze; 176 177 mutex_lock(&q->mq_freeze_lock); 178 freeze = blk_freeze_set_owner(q, owner); 179 if (++q->mq_freeze_depth == 1) { 180 percpu_ref_kill(&q->q_usage_counter); 181 mutex_unlock(&q->mq_freeze_lock); 182 if (queue_is_mq(q)) 183 blk_mq_run_hw_queues(q, false); 184 } else { 185 mutex_unlock(&q->mq_freeze_lock); 186 } 187 188 return freeze; 189 } 190 191 void blk_freeze_queue_start(struct request_queue *q) 192 { 193 if (__blk_freeze_queue_start(q, current)) 194 blk_freeze_acquire_lock(q); 195 } 196 EXPORT_SYMBOL_GPL(blk_freeze_queue_start); 197 198 void blk_mq_freeze_queue_wait(struct request_queue *q) 199 { 200 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); 201 } 202 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait); 203 204 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, 205 unsigned long timeout) 206 { 207 return wait_event_timeout(q->mq_freeze_wq, 208 percpu_ref_is_zero(&q->q_usage_counter), 209 timeout); 210 } 211 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout); 212 213 void blk_mq_freeze_queue(struct request_queue *q) 214 { 215 blk_freeze_queue_start(q); 216 blk_mq_freeze_queue_wait(q); 217 } 218 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); 219 220 bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic) 221 { 222 bool unfreeze; 223 224 mutex_lock(&q->mq_freeze_lock); 225 if (force_atomic) 226 q->q_usage_counter.data->force_atomic = true; 227 q->mq_freeze_depth--; 228 WARN_ON_ONCE(q->mq_freeze_depth < 0); 229 if (!q->mq_freeze_depth) { 230 percpu_ref_resurrect(&q->q_usage_counter); 231 wake_up_all(&q->mq_freeze_wq); 232 } 233 unfreeze = blk_unfreeze_check_owner(q); 234 mutex_unlock(&q->mq_freeze_lock); 235 236 return unfreeze; 237 } 238 239 void blk_mq_unfreeze_queue(struct request_queue *q) 240 { 241 if (__blk_mq_unfreeze_queue(q, false)) 242 blk_unfreeze_release_lock(q); 243 } 244 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); 245 246 /* 247 * non_owner variant of blk_freeze_queue_start 248 * 249 * Unlike blk_freeze_queue_start, the queue doesn't need to be unfrozen 250 * by the same task. This is fragile and should not be used if at all 251 * possible. 252 */ 253 void blk_freeze_queue_start_non_owner(struct request_queue *q) 254 { 255 __blk_freeze_queue_start(q, NULL); 256 } 257 EXPORT_SYMBOL_GPL(blk_freeze_queue_start_non_owner); 258 259 /* non_owner variant of blk_mq_unfreeze_queue */ 260 void blk_mq_unfreeze_queue_non_owner(struct request_queue *q) 261 { 262 __blk_mq_unfreeze_queue(q, false); 263 } 264 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue_non_owner); 265 266 /* 267 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the 268 * mpt3sas driver such that this function can be removed. 269 */ 270 void blk_mq_quiesce_queue_nowait(struct request_queue *q) 271 { 272 unsigned long flags; 273 274 spin_lock_irqsave(&q->queue_lock, flags); 275 if (!q->quiesce_depth++) 276 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); 277 spin_unlock_irqrestore(&q->queue_lock, flags); 278 } 279 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); 280 281 /** 282 * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done 283 * @set: tag_set to wait on 284 * 285 * Note: it is driver's responsibility for making sure that quiesce has 286 * been started on or more of the request_queues of the tag_set. This 287 * function only waits for the quiesce on those request_queues that had 288 * the quiesce flag set using blk_mq_quiesce_queue_nowait. 289 */ 290 void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set) 291 { 292 if (set->flags & BLK_MQ_F_BLOCKING) 293 synchronize_srcu(set->srcu); 294 else 295 synchronize_rcu(); 296 } 297 EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done); 298 299 /** 300 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished 301 * @q: request queue. 302 * 303 * Note: this function does not prevent that the struct request end_io() 304 * callback function is invoked. Once this function is returned, we make 305 * sure no dispatch can happen until the queue is unquiesced via 306 * blk_mq_unquiesce_queue(). 307 */ 308 void blk_mq_quiesce_queue(struct request_queue *q) 309 { 310 blk_mq_quiesce_queue_nowait(q); 311 /* nothing to wait for non-mq queues */ 312 if (queue_is_mq(q)) 313 blk_mq_wait_quiesce_done(q->tag_set); 314 } 315 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); 316 317 /* 318 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue() 319 * @q: request queue. 320 * 321 * This function recovers queue into the state before quiescing 322 * which is done by blk_mq_quiesce_queue. 323 */ 324 void blk_mq_unquiesce_queue(struct request_queue *q) 325 { 326 unsigned long flags; 327 bool run_queue = false; 328 329 spin_lock_irqsave(&q->queue_lock, flags); 330 if (WARN_ON_ONCE(q->quiesce_depth <= 0)) { 331 ; 332 } else if (!--q->quiesce_depth) { 333 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); 334 run_queue = true; 335 } 336 spin_unlock_irqrestore(&q->queue_lock, flags); 337 338 /* dispatch requests which are inserted during quiescing */ 339 if (run_queue) 340 blk_mq_run_hw_queues(q, true); 341 } 342 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue); 343 344 void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set) 345 { 346 struct request_queue *q; 347 348 mutex_lock(&set->tag_list_lock); 349 list_for_each_entry(q, &set->tag_list, tag_set_list) { 350 if (!blk_queue_skip_tagset_quiesce(q)) 351 blk_mq_quiesce_queue_nowait(q); 352 } 353 mutex_unlock(&set->tag_list_lock); 354 355 blk_mq_wait_quiesce_done(set); 356 } 357 EXPORT_SYMBOL_GPL(blk_mq_quiesce_tagset); 358 359 void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set) 360 { 361 struct request_queue *q; 362 363 mutex_lock(&set->tag_list_lock); 364 list_for_each_entry(q, &set->tag_list, tag_set_list) { 365 if (!blk_queue_skip_tagset_quiesce(q)) 366 blk_mq_unquiesce_queue(q); 367 } 368 mutex_unlock(&set->tag_list_lock); 369 } 370 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_tagset); 371 372 void blk_mq_wake_waiters(struct request_queue *q) 373 { 374 struct blk_mq_hw_ctx *hctx; 375 unsigned long i; 376 377 queue_for_each_hw_ctx(q, hctx, i) 378 if (blk_mq_hw_queue_mapped(hctx)) 379 blk_mq_tag_wakeup_all(hctx->tags, true); 380 } 381 382 void blk_rq_init(struct request_queue *q, struct request *rq) 383 { 384 memset(rq, 0, sizeof(*rq)); 385 386 INIT_LIST_HEAD(&rq->queuelist); 387 rq->q = q; 388 rq->__sector = (sector_t) -1; 389 INIT_HLIST_NODE(&rq->hash); 390 RB_CLEAR_NODE(&rq->rb_node); 391 rq->tag = BLK_MQ_NO_TAG; 392 rq->internal_tag = BLK_MQ_NO_TAG; 393 rq->start_time_ns = blk_time_get_ns(); 394 blk_crypto_rq_set_defaults(rq); 395 } 396 EXPORT_SYMBOL(blk_rq_init); 397 398 /* Set start and alloc time when the allocated request is actually used */ 399 static inline void blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns) 400 { 401 #ifdef CONFIG_BLK_RQ_ALLOC_TIME 402 if (blk_queue_rq_alloc_time(rq->q)) 403 rq->alloc_time_ns = alloc_time_ns; 404 else 405 rq->alloc_time_ns = 0; 406 #endif 407 } 408 409 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, 410 struct blk_mq_tags *tags, unsigned int tag) 411 { 412 struct blk_mq_ctx *ctx = data->ctx; 413 struct blk_mq_hw_ctx *hctx = data->hctx; 414 struct request_queue *q = data->q; 415 struct request *rq = tags->static_rqs[tag]; 416 417 rq->q = q; 418 rq->mq_ctx = ctx; 419 rq->mq_hctx = hctx; 420 rq->cmd_flags = data->cmd_flags; 421 422 if (data->flags & BLK_MQ_REQ_PM) 423 data->rq_flags |= RQF_PM; 424 rq->rq_flags = data->rq_flags; 425 426 if (data->rq_flags & RQF_SCHED_TAGS) { 427 rq->tag = BLK_MQ_NO_TAG; 428 rq->internal_tag = tag; 429 } else { 430 rq->tag = tag; 431 rq->internal_tag = BLK_MQ_NO_TAG; 432 } 433 rq->timeout = 0; 434 435 rq->part = NULL; 436 rq->io_start_time_ns = 0; 437 rq->stats_sectors = 0; 438 rq->nr_phys_segments = 0; 439 rq->nr_integrity_segments = 0; 440 rq->end_io = NULL; 441 rq->end_io_data = NULL; 442 443 blk_crypto_rq_set_defaults(rq); 444 INIT_LIST_HEAD(&rq->queuelist); 445 /* tag was already set */ 446 WRITE_ONCE(rq->deadline, 0); 447 req_ref_set(rq, 1); 448 449 if (rq->rq_flags & RQF_USE_SCHED) { 450 struct elevator_queue *e = data->q->elevator; 451 452 INIT_HLIST_NODE(&rq->hash); 453 RB_CLEAR_NODE(&rq->rb_node); 454 455 if (e->type->ops.prepare_request) 456 e->type->ops.prepare_request(rq); 457 } 458 459 return rq; 460 } 461 462 static inline struct request * 463 __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data) 464 { 465 unsigned int tag, tag_offset; 466 struct blk_mq_tags *tags; 467 struct request *rq; 468 unsigned long tag_mask; 469 int i, nr = 0; 470 471 tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset); 472 if (unlikely(!tag_mask)) 473 return NULL; 474 475 tags = blk_mq_tags_from_data(data); 476 for (i = 0; tag_mask; i++) { 477 if (!(tag_mask & (1UL << i))) 478 continue; 479 tag = tag_offset + i; 480 prefetch(tags->static_rqs[tag]); 481 tag_mask &= ~(1UL << i); 482 rq = blk_mq_rq_ctx_init(data, tags, tag); 483 rq_list_add_head(data->cached_rqs, rq); 484 nr++; 485 } 486 if (!(data->rq_flags & RQF_SCHED_TAGS)) 487 blk_mq_add_active_requests(data->hctx, nr); 488 /* caller already holds a reference, add for remainder */ 489 percpu_ref_get_many(&data->q->q_usage_counter, nr - 1); 490 data->nr_tags -= nr; 491 492 return rq_list_pop(data->cached_rqs); 493 } 494 495 static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) 496 { 497 struct request_queue *q = data->q; 498 u64 alloc_time_ns = 0; 499 struct request *rq; 500 unsigned int tag; 501 502 /* alloc_time includes depth and tag waits */ 503 if (blk_queue_rq_alloc_time(q)) 504 alloc_time_ns = blk_time_get_ns(); 505 506 if (data->cmd_flags & REQ_NOWAIT) 507 data->flags |= BLK_MQ_REQ_NOWAIT; 508 509 retry: 510 data->ctx = blk_mq_get_ctx(q); 511 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); 512 513 if (q->elevator) { 514 /* 515 * All requests use scheduler tags when an I/O scheduler is 516 * enabled for the queue. 517 */ 518 data->rq_flags |= RQF_SCHED_TAGS; 519 520 /* 521 * Flush/passthrough requests are special and go directly to the 522 * dispatch list. 523 */ 524 if ((data->cmd_flags & REQ_OP_MASK) != REQ_OP_FLUSH && 525 !blk_op_is_passthrough(data->cmd_flags)) { 526 struct elevator_mq_ops *ops = &q->elevator->type->ops; 527 528 WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED); 529 530 data->rq_flags |= RQF_USE_SCHED; 531 if (ops->limit_depth) 532 ops->limit_depth(data->cmd_flags, data); 533 } 534 } else { 535 blk_mq_tag_busy(data->hctx); 536 } 537 538 if (data->flags & BLK_MQ_REQ_RESERVED) 539 data->rq_flags |= RQF_RESV; 540 541 /* 542 * Try batched alloc if we want more than 1 tag. 543 */ 544 if (data->nr_tags > 1) { 545 rq = __blk_mq_alloc_requests_batch(data); 546 if (rq) { 547 blk_mq_rq_time_init(rq, alloc_time_ns); 548 return rq; 549 } 550 data->nr_tags = 1; 551 } 552 553 /* 554 * Waiting allocations only fail because of an inactive hctx. In that 555 * case just retry the hctx assignment and tag allocation as CPU hotplug 556 * should have migrated us to an online CPU by now. 557 */ 558 tag = blk_mq_get_tag(data); 559 if (tag == BLK_MQ_NO_TAG) { 560 if (data->flags & BLK_MQ_REQ_NOWAIT) 561 return NULL; 562 /* 563 * Give up the CPU and sleep for a random short time to 564 * ensure that thread using a realtime scheduling class 565 * are migrated off the CPU, and thus off the hctx that 566 * is going away. 567 */ 568 msleep(3); 569 goto retry; 570 } 571 572 if (!(data->rq_flags & RQF_SCHED_TAGS)) 573 blk_mq_inc_active_requests(data->hctx); 574 rq = blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag); 575 blk_mq_rq_time_init(rq, alloc_time_ns); 576 return rq; 577 } 578 579 static struct request *blk_mq_rq_cache_fill(struct request_queue *q, 580 struct blk_plug *plug, 581 blk_opf_t opf, 582 blk_mq_req_flags_t flags) 583 { 584 struct blk_mq_alloc_data data = { 585 .q = q, 586 .flags = flags, 587 .cmd_flags = opf, 588 .nr_tags = plug->nr_ios, 589 .cached_rqs = &plug->cached_rqs, 590 }; 591 struct request *rq; 592 593 if (blk_queue_enter(q, flags)) 594 return NULL; 595 596 plug->nr_ios = 1; 597 598 rq = __blk_mq_alloc_requests(&data); 599 if (unlikely(!rq)) 600 blk_queue_exit(q); 601 return rq; 602 } 603 604 static struct request *blk_mq_alloc_cached_request(struct request_queue *q, 605 blk_opf_t opf, 606 blk_mq_req_flags_t flags) 607 { 608 struct blk_plug *plug = current->plug; 609 struct request *rq; 610 611 if (!plug) 612 return NULL; 613 614 if (rq_list_empty(&plug->cached_rqs)) { 615 if (plug->nr_ios == 1) 616 return NULL; 617 rq = blk_mq_rq_cache_fill(q, plug, opf, flags); 618 if (!rq) 619 return NULL; 620 } else { 621 rq = rq_list_peek(&plug->cached_rqs); 622 if (!rq || rq->q != q) 623 return NULL; 624 625 if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type) 626 return NULL; 627 if (op_is_flush(rq->cmd_flags) != op_is_flush(opf)) 628 return NULL; 629 630 rq_list_pop(&plug->cached_rqs); 631 blk_mq_rq_time_init(rq, blk_time_get_ns()); 632 } 633 634 rq->cmd_flags = opf; 635 INIT_LIST_HEAD(&rq->queuelist); 636 return rq; 637 } 638 639 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf, 640 blk_mq_req_flags_t flags) 641 { 642 struct request *rq; 643 644 rq = blk_mq_alloc_cached_request(q, opf, flags); 645 if (!rq) { 646 struct blk_mq_alloc_data data = { 647 .q = q, 648 .flags = flags, 649 .cmd_flags = opf, 650 .nr_tags = 1, 651 }; 652 int ret; 653 654 ret = blk_queue_enter(q, flags); 655 if (ret) 656 return ERR_PTR(ret); 657 658 rq = __blk_mq_alloc_requests(&data); 659 if (!rq) 660 goto out_queue_exit; 661 } 662 rq->__data_len = 0; 663 rq->__sector = (sector_t) -1; 664 rq->bio = rq->biotail = NULL; 665 return rq; 666 out_queue_exit: 667 blk_queue_exit(q); 668 return ERR_PTR(-EWOULDBLOCK); 669 } 670 EXPORT_SYMBOL(blk_mq_alloc_request); 671 672 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, 673 blk_opf_t opf, blk_mq_req_flags_t flags, unsigned int hctx_idx) 674 { 675 struct blk_mq_alloc_data data = { 676 .q = q, 677 .flags = flags, 678 .cmd_flags = opf, 679 .nr_tags = 1, 680 }; 681 u64 alloc_time_ns = 0; 682 struct request *rq; 683 unsigned int cpu; 684 unsigned int tag; 685 int ret; 686 687 /* alloc_time includes depth and tag waits */ 688 if (blk_queue_rq_alloc_time(q)) 689 alloc_time_ns = blk_time_get_ns(); 690 691 /* 692 * If the tag allocator sleeps we could get an allocation for a 693 * different hardware context. No need to complicate the low level 694 * allocator for this for the rare use case of a command tied to 695 * a specific queue. 696 */ 697 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)) || 698 WARN_ON_ONCE(!(flags & BLK_MQ_REQ_RESERVED))) 699 return ERR_PTR(-EINVAL); 700 701 if (hctx_idx >= q->nr_hw_queues) 702 return ERR_PTR(-EIO); 703 704 ret = blk_queue_enter(q, flags); 705 if (ret) 706 return ERR_PTR(ret); 707 708 /* 709 * Check if the hardware context is actually mapped to anything. 710 * If not tell the caller that it should skip this queue. 711 */ 712 ret = -EXDEV; 713 data.hctx = xa_load(&q->hctx_table, hctx_idx); 714 if (!blk_mq_hw_queue_mapped(data.hctx)) 715 goto out_queue_exit; 716 cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); 717 if (cpu >= nr_cpu_ids) 718 goto out_queue_exit; 719 data.ctx = __blk_mq_get_ctx(q, cpu); 720 721 if (q->elevator) 722 data.rq_flags |= RQF_SCHED_TAGS; 723 else 724 blk_mq_tag_busy(data.hctx); 725 726 if (flags & BLK_MQ_REQ_RESERVED) 727 data.rq_flags |= RQF_RESV; 728 729 ret = -EWOULDBLOCK; 730 tag = blk_mq_get_tag(&data); 731 if (tag == BLK_MQ_NO_TAG) 732 goto out_queue_exit; 733 if (!(data.rq_flags & RQF_SCHED_TAGS)) 734 blk_mq_inc_active_requests(data.hctx); 735 rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag); 736 blk_mq_rq_time_init(rq, alloc_time_ns); 737 rq->__data_len = 0; 738 rq->__sector = (sector_t) -1; 739 rq->bio = rq->biotail = NULL; 740 return rq; 741 742 out_queue_exit: 743 blk_queue_exit(q); 744 return ERR_PTR(ret); 745 } 746 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 747 748 static void blk_mq_finish_request(struct request *rq) 749 { 750 struct request_queue *q = rq->q; 751 752 blk_zone_finish_request(rq); 753 754 if (rq->rq_flags & RQF_USE_SCHED) { 755 q->elevator->type->ops.finish_request(rq); 756 /* 757 * For postflush request that may need to be 758 * completed twice, we should clear this flag 759 * to avoid double finish_request() on the rq. 760 */ 761 rq->rq_flags &= ~RQF_USE_SCHED; 762 } 763 } 764 765 static void __blk_mq_free_request(struct request *rq) 766 { 767 struct request_queue *q = rq->q; 768 struct blk_mq_ctx *ctx = rq->mq_ctx; 769 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 770 const int sched_tag = rq->internal_tag; 771 772 blk_crypto_free_request(rq); 773 blk_pm_mark_last_busy(rq); 774 rq->mq_hctx = NULL; 775 776 if (rq->tag != BLK_MQ_NO_TAG) { 777 blk_mq_dec_active_requests(hctx); 778 blk_mq_put_tag(hctx->tags, ctx, rq->tag); 779 } 780 if (sched_tag != BLK_MQ_NO_TAG) 781 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag); 782 blk_mq_sched_restart(hctx); 783 blk_queue_exit(q); 784 } 785 786 void blk_mq_free_request(struct request *rq) 787 { 788 struct request_queue *q = rq->q; 789 790 blk_mq_finish_request(rq); 791 792 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) 793 laptop_io_completion(q->disk->bdi); 794 795 rq_qos_done(q, rq); 796 797 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 798 if (req_ref_put_and_test(rq)) 799 __blk_mq_free_request(rq); 800 } 801 EXPORT_SYMBOL_GPL(blk_mq_free_request); 802 803 void blk_mq_free_plug_rqs(struct blk_plug *plug) 804 { 805 struct request *rq; 806 807 while ((rq = rq_list_pop(&plug->cached_rqs)) != NULL) 808 blk_mq_free_request(rq); 809 } 810 811 void blk_dump_rq_flags(struct request *rq, char *msg) 812 { 813 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg, 814 rq->q->disk ? rq->q->disk->disk_name : "?", 815 (__force unsigned long long) rq->cmd_flags); 816 817 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 818 (unsigned long long)blk_rq_pos(rq), 819 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 820 printk(KERN_INFO " bio %p, biotail %p, len %u\n", 821 rq->bio, rq->biotail, blk_rq_bytes(rq)); 822 } 823 EXPORT_SYMBOL(blk_dump_rq_flags); 824 825 static void blk_account_io_completion(struct request *req, unsigned int bytes) 826 { 827 if (req->rq_flags & RQF_IO_STAT) { 828 const int sgrp = op_stat_group(req_op(req)); 829 830 part_stat_lock(); 831 part_stat_add(req->part, sectors[sgrp], bytes >> 9); 832 part_stat_unlock(); 833 } 834 } 835 836 static void blk_print_req_error(struct request *req, blk_status_t status) 837 { 838 printk_ratelimited(KERN_ERR 839 "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x " 840 "phys_seg %u prio class %u\n", 841 blk_status_to_str(status), 842 req->q->disk ? req->q->disk->disk_name : "?", 843 blk_rq_pos(req), (__force u32)req_op(req), 844 blk_op_str(req_op(req)), 845 (__force u32)(req->cmd_flags & ~REQ_OP_MASK), 846 req->nr_phys_segments, 847 IOPRIO_PRIO_CLASS(req_get_ioprio(req))); 848 } 849 850 /* 851 * Fully end IO on a request. Does not support partial completions, or 852 * errors. 853 */ 854 static void blk_complete_request(struct request *req) 855 { 856 const bool is_flush = (req->rq_flags & RQF_FLUSH_SEQ) != 0; 857 int total_bytes = blk_rq_bytes(req); 858 struct bio *bio = req->bio; 859 860 trace_block_rq_complete(req, BLK_STS_OK, total_bytes); 861 862 if (!bio) 863 return; 864 865 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ) 866 blk_integrity_complete(req, total_bytes); 867 868 /* 869 * Upper layers may call blk_crypto_evict_key() anytime after the last 870 * bio_endio(). Therefore, the keyslot must be released before that. 871 */ 872 blk_crypto_rq_put_keyslot(req); 873 874 blk_account_io_completion(req, total_bytes); 875 876 do { 877 struct bio *next = bio->bi_next; 878 879 /* Completion has already been traced */ 880 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 881 882 blk_zone_update_request_bio(req, bio); 883 884 if (!is_flush) 885 bio_endio(bio); 886 bio = next; 887 } while (bio); 888 889 /* 890 * Reset counters so that the request stacking driver 891 * can find how many bytes remain in the request 892 * later. 893 */ 894 if (!req->end_io) { 895 req->bio = NULL; 896 req->__data_len = 0; 897 } 898 } 899 900 /** 901 * blk_update_request - Complete multiple bytes without completing the request 902 * @req: the request being processed 903 * @error: block status code 904 * @nr_bytes: number of bytes to complete for @req 905 * 906 * Description: 907 * Ends I/O on a number of bytes attached to @req, but doesn't complete 908 * the request structure even if @req doesn't have leftover. 909 * If @req has leftover, sets it up for the next range of segments. 910 * 911 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 912 * %false return from this function. 913 * 914 * Note: 915 * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function 916 * except in the consistency check at the end of this function. 917 * 918 * Return: 919 * %false - this request doesn't have any more data 920 * %true - this request has more data 921 **/ 922 bool blk_update_request(struct request *req, blk_status_t error, 923 unsigned int nr_bytes) 924 { 925 bool is_flush = req->rq_flags & RQF_FLUSH_SEQ; 926 bool quiet = req->rq_flags & RQF_QUIET; 927 int total_bytes; 928 929 trace_block_rq_complete(req, error, nr_bytes); 930 931 if (!req->bio) 932 return false; 933 934 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && 935 error == BLK_STS_OK) 936 blk_integrity_complete(req, nr_bytes); 937 938 /* 939 * Upper layers may call blk_crypto_evict_key() anytime after the last 940 * bio_endio(). Therefore, the keyslot must be released before that. 941 */ 942 if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req)) 943 __blk_crypto_rq_put_keyslot(req); 944 945 if (unlikely(error && !blk_rq_is_passthrough(req) && !quiet) && 946 !test_bit(GD_DEAD, &req->q->disk->state)) { 947 blk_print_req_error(req, error); 948 trace_block_rq_error(req, error, nr_bytes); 949 } 950 951 blk_account_io_completion(req, nr_bytes); 952 953 total_bytes = 0; 954 while (req->bio) { 955 struct bio *bio = req->bio; 956 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); 957 958 if (unlikely(error)) 959 bio->bi_status = error; 960 961 if (bio_bytes == bio->bi_iter.bi_size) { 962 req->bio = bio->bi_next; 963 } else if (bio_is_zone_append(bio) && error == BLK_STS_OK) { 964 /* 965 * Partial zone append completions cannot be supported 966 * as the BIO fragments may end up not being written 967 * sequentially. 968 */ 969 bio->bi_status = BLK_STS_IOERR; 970 } 971 972 /* Completion has already been traced */ 973 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 974 if (unlikely(quiet)) 975 bio_set_flag(bio, BIO_QUIET); 976 977 bio_advance(bio, bio_bytes); 978 979 /* Don't actually finish bio if it's part of flush sequence */ 980 if (!bio->bi_iter.bi_size) { 981 blk_zone_update_request_bio(req, bio); 982 if (!is_flush) 983 bio_endio(bio); 984 } 985 986 total_bytes += bio_bytes; 987 nr_bytes -= bio_bytes; 988 989 if (!nr_bytes) 990 break; 991 } 992 993 /* 994 * completely done 995 */ 996 if (!req->bio) { 997 /* 998 * Reset counters so that the request stacking driver 999 * can find how many bytes remain in the request 1000 * later. 1001 */ 1002 req->__data_len = 0; 1003 return false; 1004 } 1005 1006 req->__data_len -= total_bytes; 1007 1008 /* update sector only for requests with clear definition of sector */ 1009 if (!blk_rq_is_passthrough(req)) 1010 req->__sector += total_bytes >> 9; 1011 1012 /* mixed attributes always follow the first bio */ 1013 if (req->rq_flags & RQF_MIXED_MERGE) { 1014 req->cmd_flags &= ~REQ_FAILFAST_MASK; 1015 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; 1016 } 1017 1018 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) { 1019 /* 1020 * If total number of sectors is less than the first segment 1021 * size, something has gone terribly wrong. 1022 */ 1023 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 1024 blk_dump_rq_flags(req, "request botched"); 1025 req->__data_len = blk_rq_cur_bytes(req); 1026 } 1027 1028 /* recalculate the number of segments */ 1029 req->nr_phys_segments = blk_recalc_rq_segments(req); 1030 } 1031 1032 return true; 1033 } 1034 EXPORT_SYMBOL_GPL(blk_update_request); 1035 1036 static inline void blk_account_io_done(struct request *req, u64 now) 1037 { 1038 trace_block_io_done(req); 1039 1040 /* 1041 * Account IO completion. flush_rq isn't accounted as a 1042 * normal IO on queueing nor completion. Accounting the 1043 * containing request is enough. 1044 */ 1045 if ((req->rq_flags & (RQF_IO_STAT|RQF_FLUSH_SEQ)) == RQF_IO_STAT) { 1046 const int sgrp = op_stat_group(req_op(req)); 1047 1048 part_stat_lock(); 1049 update_io_ticks(req->part, jiffies, true); 1050 part_stat_inc(req->part, ios[sgrp]); 1051 part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns); 1052 part_stat_local_dec(req->part, 1053 in_flight[op_is_write(req_op(req))]); 1054 part_stat_unlock(); 1055 } 1056 } 1057 1058 static inline bool blk_rq_passthrough_stats(struct request *req) 1059 { 1060 struct bio *bio = req->bio; 1061 1062 if (!blk_queue_passthrough_stat(req->q)) 1063 return false; 1064 1065 /* Requests without a bio do not transfer data. */ 1066 if (!bio) 1067 return false; 1068 1069 /* 1070 * Stats are accumulated in the bdev, so must have one attached to a 1071 * bio to track stats. Most drivers do not set the bdev for passthrough 1072 * requests, but nvme is one that will set it. 1073 */ 1074 if (!bio->bi_bdev) 1075 return false; 1076 1077 /* 1078 * We don't know what a passthrough command does, but we know the 1079 * payload size and data direction. Ensuring the size is aligned to the 1080 * block size filters out most commands with payloads that don't 1081 * represent sector access. 1082 */ 1083 if (blk_rq_bytes(req) & (bdev_logical_block_size(bio->bi_bdev) - 1)) 1084 return false; 1085 return true; 1086 } 1087 1088 static inline void blk_account_io_start(struct request *req) 1089 { 1090 trace_block_io_start(req); 1091 1092 if (!blk_queue_io_stat(req->q)) 1093 return; 1094 if (blk_rq_is_passthrough(req) && !blk_rq_passthrough_stats(req)) 1095 return; 1096 1097 req->rq_flags |= RQF_IO_STAT; 1098 req->start_time_ns = blk_time_get_ns(); 1099 1100 /* 1101 * All non-passthrough requests are created from a bio with one 1102 * exception: when a flush command that is part of a flush sequence 1103 * generated by the state machine in blk-flush.c is cloned onto the 1104 * lower device by dm-multipath we can get here without a bio. 1105 */ 1106 if (req->bio) 1107 req->part = req->bio->bi_bdev; 1108 else 1109 req->part = req->q->disk->part0; 1110 1111 part_stat_lock(); 1112 update_io_ticks(req->part, jiffies, false); 1113 part_stat_local_inc(req->part, in_flight[op_is_write(req_op(req))]); 1114 part_stat_unlock(); 1115 } 1116 1117 static inline void __blk_mq_end_request_acct(struct request *rq, u64 now) 1118 { 1119 if (rq->rq_flags & RQF_STATS) 1120 blk_stat_add(rq, now); 1121 1122 blk_mq_sched_completed_request(rq, now); 1123 blk_account_io_done(rq, now); 1124 } 1125 1126 inline void __blk_mq_end_request(struct request *rq, blk_status_t error) 1127 { 1128 if (blk_mq_need_time_stamp(rq)) 1129 __blk_mq_end_request_acct(rq, blk_time_get_ns()); 1130 1131 blk_mq_finish_request(rq); 1132 1133 if (rq->end_io) { 1134 rq_qos_done(rq->q, rq); 1135 if (rq->end_io(rq, error) == RQ_END_IO_FREE) 1136 blk_mq_free_request(rq); 1137 } else { 1138 blk_mq_free_request(rq); 1139 } 1140 } 1141 EXPORT_SYMBOL(__blk_mq_end_request); 1142 1143 void blk_mq_end_request(struct request *rq, blk_status_t error) 1144 { 1145 if (blk_update_request(rq, error, blk_rq_bytes(rq))) 1146 BUG(); 1147 __blk_mq_end_request(rq, error); 1148 } 1149 EXPORT_SYMBOL(blk_mq_end_request); 1150 1151 #define TAG_COMP_BATCH 32 1152 1153 static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx, 1154 int *tag_array, int nr_tags) 1155 { 1156 struct request_queue *q = hctx->queue; 1157 1158 blk_mq_sub_active_requests(hctx, nr_tags); 1159 1160 blk_mq_put_tags(hctx->tags, tag_array, nr_tags); 1161 percpu_ref_put_many(&q->q_usage_counter, nr_tags); 1162 } 1163 1164 void blk_mq_end_request_batch(struct io_comp_batch *iob) 1165 { 1166 int tags[TAG_COMP_BATCH], nr_tags = 0; 1167 struct blk_mq_hw_ctx *cur_hctx = NULL; 1168 struct request *rq; 1169 u64 now = 0; 1170 1171 if (iob->need_ts) 1172 now = blk_time_get_ns(); 1173 1174 while ((rq = rq_list_pop(&iob->req_list)) != NULL) { 1175 prefetch(rq->bio); 1176 prefetch(rq->rq_next); 1177 1178 blk_complete_request(rq); 1179 if (iob->need_ts) 1180 __blk_mq_end_request_acct(rq, now); 1181 1182 blk_mq_finish_request(rq); 1183 1184 rq_qos_done(rq->q, rq); 1185 1186 /* 1187 * If end_io handler returns NONE, then it still has 1188 * ownership of the request. 1189 */ 1190 if (rq->end_io && rq->end_io(rq, 0) == RQ_END_IO_NONE) 1191 continue; 1192 1193 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 1194 if (!req_ref_put_and_test(rq)) 1195 continue; 1196 1197 blk_crypto_free_request(rq); 1198 blk_pm_mark_last_busy(rq); 1199 1200 if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) { 1201 if (cur_hctx) 1202 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags); 1203 nr_tags = 0; 1204 cur_hctx = rq->mq_hctx; 1205 } 1206 tags[nr_tags++] = rq->tag; 1207 } 1208 1209 if (nr_tags) 1210 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags); 1211 } 1212 EXPORT_SYMBOL_GPL(blk_mq_end_request_batch); 1213 1214 static void blk_complete_reqs(struct llist_head *list) 1215 { 1216 struct llist_node *entry = llist_reverse_order(llist_del_all(list)); 1217 struct request *rq, *next; 1218 1219 llist_for_each_entry_safe(rq, next, entry, ipi_list) 1220 rq->q->mq_ops->complete(rq); 1221 } 1222 1223 static __latent_entropy void blk_done_softirq(void) 1224 { 1225 blk_complete_reqs(this_cpu_ptr(&blk_cpu_done)); 1226 } 1227 1228 static int blk_softirq_cpu_dead(unsigned int cpu) 1229 { 1230 blk_complete_reqs(&per_cpu(blk_cpu_done, cpu)); 1231 return 0; 1232 } 1233 1234 static void __blk_mq_complete_request_remote(void *data) 1235 { 1236 __raise_softirq_irqoff(BLOCK_SOFTIRQ); 1237 } 1238 1239 static inline bool blk_mq_complete_need_ipi(struct request *rq) 1240 { 1241 int cpu = raw_smp_processor_id(); 1242 1243 if (!IS_ENABLED(CONFIG_SMP) || 1244 !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) 1245 return false; 1246 /* 1247 * With force threaded interrupts enabled, raising softirq from an SMP 1248 * function call will always result in waking the ksoftirqd thread. 1249 * This is probably worse than completing the request on a different 1250 * cache domain. 1251 */ 1252 if (force_irqthreads()) 1253 return false; 1254 1255 /* same CPU or cache domain and capacity? Complete locally */ 1256 if (cpu == rq->mq_ctx->cpu || 1257 (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) && 1258 cpus_share_cache(cpu, rq->mq_ctx->cpu) && 1259 cpus_equal_capacity(cpu, rq->mq_ctx->cpu))) 1260 return false; 1261 1262 /* don't try to IPI to an offline CPU */ 1263 return cpu_online(rq->mq_ctx->cpu); 1264 } 1265 1266 static void blk_mq_complete_send_ipi(struct request *rq) 1267 { 1268 unsigned int cpu; 1269 1270 cpu = rq->mq_ctx->cpu; 1271 if (llist_add(&rq->ipi_list, &per_cpu(blk_cpu_done, cpu))) 1272 smp_call_function_single_async(cpu, &per_cpu(blk_cpu_csd, cpu)); 1273 } 1274 1275 static void blk_mq_raise_softirq(struct request *rq) 1276 { 1277 struct llist_head *list; 1278 1279 preempt_disable(); 1280 list = this_cpu_ptr(&blk_cpu_done); 1281 if (llist_add(&rq->ipi_list, list)) 1282 raise_softirq(BLOCK_SOFTIRQ); 1283 preempt_enable(); 1284 } 1285 1286 bool blk_mq_complete_request_remote(struct request *rq) 1287 { 1288 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); 1289 1290 /* 1291 * For request which hctx has only one ctx mapping, 1292 * or a polled request, always complete locally, 1293 * it's pointless to redirect the completion. 1294 */ 1295 if ((rq->mq_hctx->nr_ctx == 1 && 1296 rq->mq_ctx->cpu == raw_smp_processor_id()) || 1297 rq->cmd_flags & REQ_POLLED) 1298 return false; 1299 1300 if (blk_mq_complete_need_ipi(rq)) { 1301 blk_mq_complete_send_ipi(rq); 1302 return true; 1303 } 1304 1305 if (rq->q->nr_hw_queues == 1) { 1306 blk_mq_raise_softirq(rq); 1307 return true; 1308 } 1309 return false; 1310 } 1311 EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote); 1312 1313 /** 1314 * blk_mq_complete_request - end I/O on a request 1315 * @rq: the request being processed 1316 * 1317 * Description: 1318 * Complete a request by scheduling the ->complete_rq operation. 1319 **/ 1320 void blk_mq_complete_request(struct request *rq) 1321 { 1322 if (!blk_mq_complete_request_remote(rq)) 1323 rq->q->mq_ops->complete(rq); 1324 } 1325 EXPORT_SYMBOL(blk_mq_complete_request); 1326 1327 /** 1328 * blk_mq_start_request - Start processing a request 1329 * @rq: Pointer to request to be started 1330 * 1331 * Function used by device drivers to notify the block layer that a request 1332 * is going to be processed now, so blk layer can do proper initializations 1333 * such as starting the timeout timer. 1334 */ 1335 void blk_mq_start_request(struct request *rq) 1336 { 1337 struct request_queue *q = rq->q; 1338 1339 trace_block_rq_issue(rq); 1340 1341 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags) && 1342 !blk_rq_is_passthrough(rq)) { 1343 rq->io_start_time_ns = blk_time_get_ns(); 1344 rq->stats_sectors = blk_rq_sectors(rq); 1345 rq->rq_flags |= RQF_STATS; 1346 rq_qos_issue(q, rq); 1347 } 1348 1349 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE); 1350 1351 blk_add_timer(rq); 1352 WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT); 1353 rq->mq_hctx->tags->rqs[rq->tag] = rq; 1354 1355 if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE) 1356 blk_integrity_prepare(rq); 1357 1358 if (rq->bio && rq->bio->bi_opf & REQ_POLLED) 1359 WRITE_ONCE(rq->bio->bi_cookie, rq->mq_hctx->queue_num); 1360 } 1361 EXPORT_SYMBOL(blk_mq_start_request); 1362 1363 /* 1364 * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple 1365 * queues. This is important for md arrays to benefit from merging 1366 * requests. 1367 */ 1368 static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug) 1369 { 1370 if (plug->multiple_queues) 1371 return BLK_MAX_REQUEST_COUNT * 2; 1372 return BLK_MAX_REQUEST_COUNT; 1373 } 1374 1375 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) 1376 { 1377 struct request *last = rq_list_peek(&plug->mq_list); 1378 1379 if (!plug->rq_count) { 1380 trace_block_plug(rq->q); 1381 } else if (plug->rq_count >= blk_plug_max_rq_count(plug) || 1382 (!blk_queue_nomerges(rq->q) && 1383 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { 1384 blk_mq_flush_plug_list(plug, false); 1385 last = NULL; 1386 trace_block_plug(rq->q); 1387 } 1388 1389 if (!plug->multiple_queues && last && last->q != rq->q) 1390 plug->multiple_queues = true; 1391 /* 1392 * Any request allocated from sched tags can't be issued to 1393 * ->queue_rqs() directly 1394 */ 1395 if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS)) 1396 plug->has_elevator = true; 1397 rq_list_add_tail(&plug->mq_list, rq); 1398 plug->rq_count++; 1399 } 1400 1401 /** 1402 * blk_execute_rq_nowait - insert a request to I/O scheduler for execution 1403 * @rq: request to insert 1404 * @at_head: insert request at head or tail of queue 1405 * 1406 * Description: 1407 * Insert a fully prepared request at the back of the I/O scheduler queue 1408 * for execution. Don't wait for completion. 1409 * 1410 * Note: 1411 * This function will invoke @done directly if the queue is dead. 1412 */ 1413 void blk_execute_rq_nowait(struct request *rq, bool at_head) 1414 { 1415 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 1416 1417 WARN_ON(irqs_disabled()); 1418 WARN_ON(!blk_rq_is_passthrough(rq)); 1419 1420 blk_account_io_start(rq); 1421 1422 if (current->plug && !at_head) { 1423 blk_add_rq_to_plug(current->plug, rq); 1424 return; 1425 } 1426 1427 blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0); 1428 blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING); 1429 } 1430 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); 1431 1432 struct blk_rq_wait { 1433 struct completion done; 1434 blk_status_t ret; 1435 }; 1436 1437 static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret) 1438 { 1439 struct blk_rq_wait *wait = rq->end_io_data; 1440 1441 wait->ret = ret; 1442 complete(&wait->done); 1443 return RQ_END_IO_NONE; 1444 } 1445 1446 bool blk_rq_is_poll(struct request *rq) 1447 { 1448 if (!rq->mq_hctx) 1449 return false; 1450 if (rq->mq_hctx->type != HCTX_TYPE_POLL) 1451 return false; 1452 return true; 1453 } 1454 EXPORT_SYMBOL_GPL(blk_rq_is_poll); 1455 1456 static void blk_rq_poll_completion(struct request *rq, struct completion *wait) 1457 { 1458 do { 1459 blk_hctx_poll(rq->q, rq->mq_hctx, NULL, 0); 1460 cond_resched(); 1461 } while (!completion_done(wait)); 1462 } 1463 1464 /** 1465 * blk_execute_rq - insert a request into queue for execution 1466 * @rq: request to insert 1467 * @at_head: insert request at head or tail of queue 1468 * 1469 * Description: 1470 * Insert a fully prepared request at the back of the I/O scheduler queue 1471 * for execution and wait for completion. 1472 * Return: The blk_status_t result provided to blk_mq_end_request(). 1473 */ 1474 blk_status_t blk_execute_rq(struct request *rq, bool at_head) 1475 { 1476 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 1477 struct blk_rq_wait wait = { 1478 .done = COMPLETION_INITIALIZER_ONSTACK(wait.done), 1479 }; 1480 1481 WARN_ON(irqs_disabled()); 1482 WARN_ON(!blk_rq_is_passthrough(rq)); 1483 1484 rq->end_io_data = &wait; 1485 rq->end_io = blk_end_sync_rq; 1486 1487 blk_account_io_start(rq); 1488 blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0); 1489 blk_mq_run_hw_queue(hctx, false); 1490 1491 if (blk_rq_is_poll(rq)) 1492 blk_rq_poll_completion(rq, &wait.done); 1493 else 1494 blk_wait_io(&wait.done); 1495 1496 return wait.ret; 1497 } 1498 EXPORT_SYMBOL(blk_execute_rq); 1499 1500 static void __blk_mq_requeue_request(struct request *rq) 1501 { 1502 struct request_queue *q = rq->q; 1503 1504 blk_mq_put_driver_tag(rq); 1505 1506 trace_block_rq_requeue(rq); 1507 rq_qos_requeue(q, rq); 1508 1509 if (blk_mq_request_started(rq)) { 1510 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 1511 rq->rq_flags &= ~RQF_TIMED_OUT; 1512 } 1513 } 1514 1515 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) 1516 { 1517 struct request_queue *q = rq->q; 1518 unsigned long flags; 1519 1520 __blk_mq_requeue_request(rq); 1521 1522 /* this request will be re-inserted to io scheduler queue */ 1523 blk_mq_sched_requeue_request(rq); 1524 1525 spin_lock_irqsave(&q->requeue_lock, flags); 1526 list_add_tail(&rq->queuelist, &q->requeue_list); 1527 spin_unlock_irqrestore(&q->requeue_lock, flags); 1528 1529 if (kick_requeue_list) 1530 blk_mq_kick_requeue_list(q); 1531 } 1532 EXPORT_SYMBOL(blk_mq_requeue_request); 1533 1534 static void blk_mq_requeue_work(struct work_struct *work) 1535 { 1536 struct request_queue *q = 1537 container_of(work, struct request_queue, requeue_work.work); 1538 LIST_HEAD(rq_list); 1539 LIST_HEAD(flush_list); 1540 struct request *rq; 1541 1542 spin_lock_irq(&q->requeue_lock); 1543 list_splice_init(&q->requeue_list, &rq_list); 1544 list_splice_init(&q->flush_list, &flush_list); 1545 spin_unlock_irq(&q->requeue_lock); 1546 1547 while (!list_empty(&rq_list)) { 1548 rq = list_entry(rq_list.next, struct request, queuelist); 1549 list_del_init(&rq->queuelist); 1550 /* 1551 * If RQF_DONTPREP is set, the request has been started by the 1552 * driver already and might have driver-specific data allocated 1553 * already. Insert it into the hctx dispatch list to avoid 1554 * block layer merges for the request. 1555 */ 1556 if (rq->rq_flags & RQF_DONTPREP) 1557 blk_mq_request_bypass_insert(rq, 0); 1558 else 1559 blk_mq_insert_request(rq, BLK_MQ_INSERT_AT_HEAD); 1560 } 1561 1562 while (!list_empty(&flush_list)) { 1563 rq = list_entry(flush_list.next, struct request, queuelist); 1564 list_del_init(&rq->queuelist); 1565 blk_mq_insert_request(rq, 0); 1566 } 1567 1568 blk_mq_run_hw_queues(q, false); 1569 } 1570 1571 void blk_mq_kick_requeue_list(struct request_queue *q) 1572 { 1573 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0); 1574 } 1575 EXPORT_SYMBOL(blk_mq_kick_requeue_list); 1576 1577 void blk_mq_delay_kick_requeue_list(struct request_queue *q, 1578 unsigned long msecs) 1579 { 1580 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 1581 msecs_to_jiffies(msecs)); 1582 } 1583 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); 1584 1585 static bool blk_is_flush_data_rq(struct request *rq) 1586 { 1587 return (rq->rq_flags & RQF_FLUSH_SEQ) && !is_flush_rq(rq); 1588 } 1589 1590 static bool blk_mq_rq_inflight(struct request *rq, void *priv) 1591 { 1592 /* 1593 * If we find a request that isn't idle we know the queue is busy 1594 * as it's checked in the iter. 1595 * Return false to stop the iteration. 1596 * 1597 * In case of queue quiesce, if one flush data request is completed, 1598 * don't count it as inflight given the flush sequence is suspended, 1599 * and the original flush data request is invisible to driver, just 1600 * like other pending requests because of quiesce 1601 */ 1602 if (blk_mq_request_started(rq) && !(blk_queue_quiesced(rq->q) && 1603 blk_is_flush_data_rq(rq) && 1604 blk_mq_request_completed(rq))) { 1605 bool *busy = priv; 1606 1607 *busy = true; 1608 return false; 1609 } 1610 1611 return true; 1612 } 1613 1614 bool blk_mq_queue_inflight(struct request_queue *q) 1615 { 1616 bool busy = false; 1617 1618 blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy); 1619 return busy; 1620 } 1621 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight); 1622 1623 static void blk_mq_rq_timed_out(struct request *req) 1624 { 1625 req->rq_flags |= RQF_TIMED_OUT; 1626 if (req->q->mq_ops->timeout) { 1627 enum blk_eh_timer_return ret; 1628 1629 ret = req->q->mq_ops->timeout(req); 1630 if (ret == BLK_EH_DONE) 1631 return; 1632 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER); 1633 } 1634 1635 blk_add_timer(req); 1636 } 1637 1638 struct blk_expired_data { 1639 bool has_timedout_rq; 1640 unsigned long next; 1641 unsigned long timeout_start; 1642 }; 1643 1644 static bool blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired) 1645 { 1646 unsigned long deadline; 1647 1648 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT) 1649 return false; 1650 if (rq->rq_flags & RQF_TIMED_OUT) 1651 return false; 1652 1653 deadline = READ_ONCE(rq->deadline); 1654 if (time_after_eq(expired->timeout_start, deadline)) 1655 return true; 1656 1657 if (expired->next == 0) 1658 expired->next = deadline; 1659 else if (time_after(expired->next, deadline)) 1660 expired->next = deadline; 1661 return false; 1662 } 1663 1664 void blk_mq_put_rq_ref(struct request *rq) 1665 { 1666 if (is_flush_rq(rq)) { 1667 if (rq->end_io(rq, 0) == RQ_END_IO_FREE) 1668 blk_mq_free_request(rq); 1669 } else if (req_ref_put_and_test(rq)) { 1670 __blk_mq_free_request(rq); 1671 } 1672 } 1673 1674 static bool blk_mq_check_expired(struct request *rq, void *priv) 1675 { 1676 struct blk_expired_data *expired = priv; 1677 1678 /* 1679 * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot 1680 * be reallocated underneath the timeout handler's processing, then 1681 * the expire check is reliable. If the request is not expired, then 1682 * it was completed and reallocated as a new request after returning 1683 * from blk_mq_check_expired(). 1684 */ 1685 if (blk_mq_req_expired(rq, expired)) { 1686 expired->has_timedout_rq = true; 1687 return false; 1688 } 1689 return true; 1690 } 1691 1692 static bool blk_mq_handle_expired(struct request *rq, void *priv) 1693 { 1694 struct blk_expired_data *expired = priv; 1695 1696 if (blk_mq_req_expired(rq, expired)) 1697 blk_mq_rq_timed_out(rq); 1698 return true; 1699 } 1700 1701 static void blk_mq_timeout_work(struct work_struct *work) 1702 { 1703 struct request_queue *q = 1704 container_of(work, struct request_queue, timeout_work); 1705 struct blk_expired_data expired = { 1706 .timeout_start = jiffies, 1707 }; 1708 struct blk_mq_hw_ctx *hctx; 1709 unsigned long i; 1710 1711 /* A deadlock might occur if a request is stuck requiring a 1712 * timeout at the same time a queue freeze is waiting 1713 * completion, since the timeout code would not be able to 1714 * acquire the queue reference here. 1715 * 1716 * That's why we don't use blk_queue_enter here; instead, we use 1717 * percpu_ref_tryget directly, because we need to be able to 1718 * obtain a reference even in the short window between the queue 1719 * starting to freeze, by dropping the first reference in 1720 * blk_freeze_queue_start, and the moment the last request is 1721 * consumed, marked by the instant q_usage_counter reaches 1722 * zero. 1723 */ 1724 if (!percpu_ref_tryget(&q->q_usage_counter)) 1725 return; 1726 1727 /* check if there is any timed-out request */ 1728 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &expired); 1729 if (expired.has_timedout_rq) { 1730 /* 1731 * Before walking tags, we must ensure any submit started 1732 * before the current time has finished. Since the submit 1733 * uses srcu or rcu, wait for a synchronization point to 1734 * ensure all running submits have finished 1735 */ 1736 blk_mq_wait_quiesce_done(q->tag_set); 1737 1738 expired.next = 0; 1739 blk_mq_queue_tag_busy_iter(q, blk_mq_handle_expired, &expired); 1740 } 1741 1742 if (expired.next != 0) { 1743 mod_timer(&q->timeout, expired.next); 1744 } else { 1745 /* 1746 * Request timeouts are handled as a forward rolling timer. If 1747 * we end up here it means that no requests are pending and 1748 * also that no request has been pending for a while. Mark 1749 * each hctx as idle. 1750 */ 1751 queue_for_each_hw_ctx(q, hctx, i) { 1752 /* the hctx may be unmapped, so check it here */ 1753 if (blk_mq_hw_queue_mapped(hctx)) 1754 blk_mq_tag_idle(hctx); 1755 } 1756 } 1757 blk_queue_exit(q); 1758 } 1759 1760 struct flush_busy_ctx_data { 1761 struct blk_mq_hw_ctx *hctx; 1762 struct list_head *list; 1763 }; 1764 1765 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data) 1766 { 1767 struct flush_busy_ctx_data *flush_data = data; 1768 struct blk_mq_hw_ctx *hctx = flush_data->hctx; 1769 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 1770 enum hctx_type type = hctx->type; 1771 1772 spin_lock(&ctx->lock); 1773 list_splice_tail_init(&ctx->rq_lists[type], flush_data->list); 1774 sbitmap_clear_bit(sb, bitnr); 1775 spin_unlock(&ctx->lock); 1776 return true; 1777 } 1778 1779 /* 1780 * Process software queues that have been marked busy, splicing them 1781 * to the for-dispatch 1782 */ 1783 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) 1784 { 1785 struct flush_busy_ctx_data data = { 1786 .hctx = hctx, 1787 .list = list, 1788 }; 1789 1790 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data); 1791 } 1792 1793 struct dispatch_rq_data { 1794 struct blk_mq_hw_ctx *hctx; 1795 struct request *rq; 1796 }; 1797 1798 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr, 1799 void *data) 1800 { 1801 struct dispatch_rq_data *dispatch_data = data; 1802 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; 1803 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 1804 enum hctx_type type = hctx->type; 1805 1806 spin_lock(&ctx->lock); 1807 if (!list_empty(&ctx->rq_lists[type])) { 1808 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next); 1809 list_del_init(&dispatch_data->rq->queuelist); 1810 if (list_empty(&ctx->rq_lists[type])) 1811 sbitmap_clear_bit(sb, bitnr); 1812 } 1813 spin_unlock(&ctx->lock); 1814 1815 return !dispatch_data->rq; 1816 } 1817 1818 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 1819 struct blk_mq_ctx *start) 1820 { 1821 unsigned off = start ? start->index_hw[hctx->type] : 0; 1822 struct dispatch_rq_data data = { 1823 .hctx = hctx, 1824 .rq = NULL, 1825 }; 1826 1827 __sbitmap_for_each_set(&hctx->ctx_map, off, 1828 dispatch_rq_from_ctx, &data); 1829 1830 return data.rq; 1831 } 1832 1833 bool __blk_mq_alloc_driver_tag(struct request *rq) 1834 { 1835 struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags; 1836 unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags; 1837 int tag; 1838 1839 blk_mq_tag_busy(rq->mq_hctx); 1840 1841 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) { 1842 bt = &rq->mq_hctx->tags->breserved_tags; 1843 tag_offset = 0; 1844 } else { 1845 if (!hctx_may_queue(rq->mq_hctx, bt)) 1846 return false; 1847 } 1848 1849 tag = __sbitmap_queue_get(bt); 1850 if (tag == BLK_MQ_NO_TAG) 1851 return false; 1852 1853 rq->tag = tag + tag_offset; 1854 blk_mq_inc_active_requests(rq->mq_hctx); 1855 return true; 1856 } 1857 1858 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, 1859 int flags, void *key) 1860 { 1861 struct blk_mq_hw_ctx *hctx; 1862 1863 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); 1864 1865 spin_lock(&hctx->dispatch_wait_lock); 1866 if (!list_empty(&wait->entry)) { 1867 struct sbitmap_queue *sbq; 1868 1869 list_del_init(&wait->entry); 1870 sbq = &hctx->tags->bitmap_tags; 1871 atomic_dec(&sbq->ws_active); 1872 } 1873 spin_unlock(&hctx->dispatch_wait_lock); 1874 1875 blk_mq_run_hw_queue(hctx, true); 1876 return 1; 1877 } 1878 1879 /* 1880 * Mark us waiting for a tag. For shared tags, this involves hooking us into 1881 * the tag wakeups. For non-shared tags, we can simply mark us needing a 1882 * restart. For both cases, take care to check the condition again after 1883 * marking us as waiting. 1884 */ 1885 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, 1886 struct request *rq) 1887 { 1888 struct sbitmap_queue *sbq; 1889 struct wait_queue_head *wq; 1890 wait_queue_entry_t *wait; 1891 bool ret; 1892 1893 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) && 1894 !(blk_mq_is_shared_tags(hctx->flags))) { 1895 blk_mq_sched_mark_restart_hctx(hctx); 1896 1897 /* 1898 * It's possible that a tag was freed in the window between the 1899 * allocation failure and adding the hardware queue to the wait 1900 * queue. 1901 * 1902 * Don't clear RESTART here, someone else could have set it. 1903 * At most this will cost an extra queue run. 1904 */ 1905 return blk_mq_get_driver_tag(rq); 1906 } 1907 1908 wait = &hctx->dispatch_wait; 1909 if (!list_empty_careful(&wait->entry)) 1910 return false; 1911 1912 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) 1913 sbq = &hctx->tags->breserved_tags; 1914 else 1915 sbq = &hctx->tags->bitmap_tags; 1916 wq = &bt_wait_ptr(sbq, hctx)->wait; 1917 1918 spin_lock_irq(&wq->lock); 1919 spin_lock(&hctx->dispatch_wait_lock); 1920 if (!list_empty(&wait->entry)) { 1921 spin_unlock(&hctx->dispatch_wait_lock); 1922 spin_unlock_irq(&wq->lock); 1923 return false; 1924 } 1925 1926 atomic_inc(&sbq->ws_active); 1927 wait->flags &= ~WQ_FLAG_EXCLUSIVE; 1928 __add_wait_queue(wq, wait); 1929 1930 /* 1931 * Add one explicit barrier since blk_mq_get_driver_tag() may 1932 * not imply barrier in case of failure. 1933 * 1934 * Order adding us to wait queue and allocating driver tag. 1935 * 1936 * The pair is the one implied in sbitmap_queue_wake_up() which 1937 * orders clearing sbitmap tag bits and waitqueue_active() in 1938 * __sbitmap_queue_wake_up(), since waitqueue_active() is lockless 1939 * 1940 * Otherwise, re-order of adding wait queue and getting driver tag 1941 * may cause __sbitmap_queue_wake_up() to wake up nothing because 1942 * the waitqueue_active() may not observe us in wait queue. 1943 */ 1944 smp_mb(); 1945 1946 /* 1947 * It's possible that a tag was freed in the window between the 1948 * allocation failure and adding the hardware queue to the wait 1949 * queue. 1950 */ 1951 ret = blk_mq_get_driver_tag(rq); 1952 if (!ret) { 1953 spin_unlock(&hctx->dispatch_wait_lock); 1954 spin_unlock_irq(&wq->lock); 1955 return false; 1956 } 1957 1958 /* 1959 * We got a tag, remove ourselves from the wait queue to ensure 1960 * someone else gets the wakeup. 1961 */ 1962 list_del_init(&wait->entry); 1963 atomic_dec(&sbq->ws_active); 1964 spin_unlock(&hctx->dispatch_wait_lock); 1965 spin_unlock_irq(&wq->lock); 1966 1967 return true; 1968 } 1969 1970 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT 8 1971 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR 4 1972 /* 1973 * Update dispatch busy with the Exponential Weighted Moving Average(EWMA): 1974 * - EWMA is one simple way to compute running average value 1975 * - weight(7/8 and 1/8) is applied so that it can decrease exponentially 1976 * - take 4 as factor for avoiding to get too small(0) result, and this 1977 * factor doesn't matter because EWMA decreases exponentially 1978 */ 1979 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy) 1980 { 1981 unsigned int ewma; 1982 1983 ewma = hctx->dispatch_busy; 1984 1985 if (!ewma && !busy) 1986 return; 1987 1988 ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1; 1989 if (busy) 1990 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR; 1991 ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT; 1992 1993 hctx->dispatch_busy = ewma; 1994 } 1995 1996 #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */ 1997 1998 static void blk_mq_handle_dev_resource(struct request *rq, 1999 struct list_head *list) 2000 { 2001 list_add(&rq->queuelist, list); 2002 __blk_mq_requeue_request(rq); 2003 } 2004 2005 enum prep_dispatch { 2006 PREP_DISPATCH_OK, 2007 PREP_DISPATCH_NO_TAG, 2008 PREP_DISPATCH_NO_BUDGET, 2009 }; 2010 2011 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq, 2012 bool need_budget) 2013 { 2014 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 2015 int budget_token = -1; 2016 2017 if (need_budget) { 2018 budget_token = blk_mq_get_dispatch_budget(rq->q); 2019 if (budget_token < 0) { 2020 blk_mq_put_driver_tag(rq); 2021 return PREP_DISPATCH_NO_BUDGET; 2022 } 2023 blk_mq_set_rq_budget_token(rq, budget_token); 2024 } 2025 2026 if (!blk_mq_get_driver_tag(rq)) { 2027 /* 2028 * The initial allocation attempt failed, so we need to 2029 * rerun the hardware queue when a tag is freed. The 2030 * waitqueue takes care of that. If the queue is run 2031 * before we add this entry back on the dispatch list, 2032 * we'll re-run it below. 2033 */ 2034 if (!blk_mq_mark_tag_wait(hctx, rq)) { 2035 /* 2036 * All budgets not got from this function will be put 2037 * together during handling partial dispatch 2038 */ 2039 if (need_budget) 2040 blk_mq_put_dispatch_budget(rq->q, budget_token); 2041 return PREP_DISPATCH_NO_TAG; 2042 } 2043 } 2044 2045 return PREP_DISPATCH_OK; 2046 } 2047 2048 /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */ 2049 static void blk_mq_release_budgets(struct request_queue *q, 2050 struct list_head *list) 2051 { 2052 struct request *rq; 2053 2054 list_for_each_entry(rq, list, queuelist) { 2055 int budget_token = blk_mq_get_rq_budget_token(rq); 2056 2057 if (budget_token >= 0) 2058 blk_mq_put_dispatch_budget(q, budget_token); 2059 } 2060 } 2061 2062 /* 2063 * blk_mq_commit_rqs will notify driver using bd->last that there is no 2064 * more requests. (See comment in struct blk_mq_ops for commit_rqs for 2065 * details) 2066 * Attention, we should explicitly call this in unusual cases: 2067 * 1) did not queue everything initially scheduled to queue 2068 * 2) the last attempt to queue a request failed 2069 */ 2070 static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued, 2071 bool from_schedule) 2072 { 2073 if (hctx->queue->mq_ops->commit_rqs && queued) { 2074 trace_block_unplug(hctx->queue, queued, !from_schedule); 2075 hctx->queue->mq_ops->commit_rqs(hctx); 2076 } 2077 } 2078 2079 /* 2080 * Returns true if we did some work AND can potentially do more. 2081 */ 2082 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, 2083 unsigned int nr_budgets) 2084 { 2085 enum prep_dispatch prep; 2086 struct request_queue *q = hctx->queue; 2087 struct request *rq; 2088 int queued; 2089 blk_status_t ret = BLK_STS_OK; 2090 bool needs_resource = false; 2091 2092 if (list_empty(list)) 2093 return false; 2094 2095 /* 2096 * Now process all the entries, sending them to the driver. 2097 */ 2098 queued = 0; 2099 do { 2100 struct blk_mq_queue_data bd; 2101 2102 rq = list_first_entry(list, struct request, queuelist); 2103 2104 WARN_ON_ONCE(hctx != rq->mq_hctx); 2105 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets); 2106 if (prep != PREP_DISPATCH_OK) 2107 break; 2108 2109 list_del_init(&rq->queuelist); 2110 2111 bd.rq = rq; 2112 bd.last = list_empty(list); 2113 2114 /* 2115 * once the request is queued to lld, no need to cover the 2116 * budget any more 2117 */ 2118 if (nr_budgets) 2119 nr_budgets--; 2120 ret = q->mq_ops->queue_rq(hctx, &bd); 2121 switch (ret) { 2122 case BLK_STS_OK: 2123 queued++; 2124 break; 2125 case BLK_STS_RESOURCE: 2126 needs_resource = true; 2127 fallthrough; 2128 case BLK_STS_DEV_RESOURCE: 2129 blk_mq_handle_dev_resource(rq, list); 2130 goto out; 2131 default: 2132 blk_mq_end_request(rq, ret); 2133 } 2134 } while (!list_empty(list)); 2135 out: 2136 /* If we didn't flush the entire list, we could have told the driver 2137 * there was more coming, but that turned out to be a lie. 2138 */ 2139 if (!list_empty(list) || ret != BLK_STS_OK) 2140 blk_mq_commit_rqs(hctx, queued, false); 2141 2142 /* 2143 * Any items that need requeuing? Stuff them into hctx->dispatch, 2144 * that is where we will continue on next queue run. 2145 */ 2146 if (!list_empty(list)) { 2147 bool needs_restart; 2148 /* For non-shared tags, the RESTART check will suffice */ 2149 bool no_tag = prep == PREP_DISPATCH_NO_TAG && 2150 ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) || 2151 blk_mq_is_shared_tags(hctx->flags)); 2152 2153 if (nr_budgets) 2154 blk_mq_release_budgets(q, list); 2155 2156 spin_lock(&hctx->lock); 2157 list_splice_tail_init(list, &hctx->dispatch); 2158 spin_unlock(&hctx->lock); 2159 2160 /* 2161 * Order adding requests to hctx->dispatch and checking 2162 * SCHED_RESTART flag. The pair of this smp_mb() is the one 2163 * in blk_mq_sched_restart(). Avoid restart code path to 2164 * miss the new added requests to hctx->dispatch, meantime 2165 * SCHED_RESTART is observed here. 2166 */ 2167 smp_mb(); 2168 2169 /* 2170 * If SCHED_RESTART was set by the caller of this function and 2171 * it is no longer set that means that it was cleared by another 2172 * thread and hence that a queue rerun is needed. 2173 * 2174 * If 'no_tag' is set, that means that we failed getting 2175 * a driver tag with an I/O scheduler attached. If our dispatch 2176 * waitqueue is no longer active, ensure that we run the queue 2177 * AFTER adding our entries back to the list. 2178 * 2179 * If no I/O scheduler has been configured it is possible that 2180 * the hardware queue got stopped and restarted before requests 2181 * were pushed back onto the dispatch list. Rerun the queue to 2182 * avoid starvation. Notes: 2183 * - blk_mq_run_hw_queue() checks whether or not a queue has 2184 * been stopped before rerunning a queue. 2185 * - Some but not all block drivers stop a queue before 2186 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq 2187 * and dm-rq. 2188 * 2189 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART 2190 * bit is set, run queue after a delay to avoid IO stalls 2191 * that could otherwise occur if the queue is idle. We'll do 2192 * similar if we couldn't get budget or couldn't lock a zone 2193 * and SCHED_RESTART is set. 2194 */ 2195 needs_restart = blk_mq_sched_needs_restart(hctx); 2196 if (prep == PREP_DISPATCH_NO_BUDGET) 2197 needs_resource = true; 2198 if (!needs_restart || 2199 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) 2200 blk_mq_run_hw_queue(hctx, true); 2201 else if (needs_resource) 2202 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); 2203 2204 blk_mq_update_dispatch_busy(hctx, true); 2205 return false; 2206 } 2207 2208 blk_mq_update_dispatch_busy(hctx, false); 2209 return true; 2210 } 2211 2212 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) 2213 { 2214 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask); 2215 2216 if (cpu >= nr_cpu_ids) 2217 cpu = cpumask_first(hctx->cpumask); 2218 return cpu; 2219 } 2220 2221 /* 2222 * ->next_cpu is always calculated from hctx->cpumask, so simply use 2223 * it for speeding up the check 2224 */ 2225 static bool blk_mq_hctx_empty_cpumask(struct blk_mq_hw_ctx *hctx) 2226 { 2227 return hctx->next_cpu >= nr_cpu_ids; 2228 } 2229 2230 /* 2231 * It'd be great if the workqueue API had a way to pass 2232 * in a mask and had some smarts for more clever placement. 2233 * For now we just round-robin here, switching for every 2234 * BLK_MQ_CPU_WORK_BATCH queued items. 2235 */ 2236 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) 2237 { 2238 bool tried = false; 2239 int next_cpu = hctx->next_cpu; 2240 2241 /* Switch to unbound if no allowable CPUs in this hctx */ 2242 if (hctx->queue->nr_hw_queues == 1 || blk_mq_hctx_empty_cpumask(hctx)) 2243 return WORK_CPU_UNBOUND; 2244 2245 if (--hctx->next_cpu_batch <= 0) { 2246 select_cpu: 2247 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, 2248 cpu_online_mask); 2249 if (next_cpu >= nr_cpu_ids) 2250 next_cpu = blk_mq_first_mapped_cpu(hctx); 2251 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 2252 } 2253 2254 /* 2255 * Do unbound schedule if we can't find a online CPU for this hctx, 2256 * and it should only happen in the path of handling CPU DEAD. 2257 */ 2258 if (!cpu_online(next_cpu)) { 2259 if (!tried) { 2260 tried = true; 2261 goto select_cpu; 2262 } 2263 2264 /* 2265 * Make sure to re-select CPU next time once after CPUs 2266 * in hctx->cpumask become online again. 2267 */ 2268 hctx->next_cpu = next_cpu; 2269 hctx->next_cpu_batch = 1; 2270 return WORK_CPU_UNBOUND; 2271 } 2272 2273 hctx->next_cpu = next_cpu; 2274 return next_cpu; 2275 } 2276 2277 /** 2278 * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously. 2279 * @hctx: Pointer to the hardware queue to run. 2280 * @msecs: Milliseconds of delay to wait before running the queue. 2281 * 2282 * Run a hardware queue asynchronously with a delay of @msecs. 2283 */ 2284 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) 2285 { 2286 if (unlikely(blk_mq_hctx_stopped(hctx))) 2287 return; 2288 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, 2289 msecs_to_jiffies(msecs)); 2290 } 2291 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue); 2292 2293 static inline bool blk_mq_hw_queue_need_run(struct blk_mq_hw_ctx *hctx) 2294 { 2295 bool need_run; 2296 2297 /* 2298 * When queue is quiesced, we may be switching io scheduler, or 2299 * updating nr_hw_queues, or other things, and we can't run queue 2300 * any more, even blk_mq_hctx_has_pending() can't be called safely. 2301 * 2302 * And queue will be rerun in blk_mq_unquiesce_queue() if it is 2303 * quiesced. 2304 */ 2305 __blk_mq_run_dispatch_ops(hctx->queue, false, 2306 need_run = !blk_queue_quiesced(hctx->queue) && 2307 blk_mq_hctx_has_pending(hctx)); 2308 return need_run; 2309 } 2310 2311 /** 2312 * blk_mq_run_hw_queue - Start to run a hardware queue. 2313 * @hctx: Pointer to the hardware queue to run. 2314 * @async: If we want to run the queue asynchronously. 2315 * 2316 * Check if the request queue is not in a quiesced state and if there are 2317 * pending requests to be sent. If this is true, run the queue to send requests 2318 * to hardware. 2319 */ 2320 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 2321 { 2322 bool need_run; 2323 2324 /* 2325 * We can't run the queue inline with interrupts disabled. 2326 */ 2327 WARN_ON_ONCE(!async && in_interrupt()); 2328 2329 might_sleep_if(!async && hctx->flags & BLK_MQ_F_BLOCKING); 2330 2331 need_run = blk_mq_hw_queue_need_run(hctx); 2332 if (!need_run) { 2333 unsigned long flags; 2334 2335 /* 2336 * Synchronize with blk_mq_unquiesce_queue(), because we check 2337 * if hw queue is quiesced locklessly above, we need the use 2338 * ->queue_lock to make sure we see the up-to-date status to 2339 * not miss rerunning the hw queue. 2340 */ 2341 spin_lock_irqsave(&hctx->queue->queue_lock, flags); 2342 need_run = blk_mq_hw_queue_need_run(hctx); 2343 spin_unlock_irqrestore(&hctx->queue->queue_lock, flags); 2344 2345 if (!need_run) 2346 return; 2347 } 2348 2349 if (async || !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) { 2350 blk_mq_delay_run_hw_queue(hctx, 0); 2351 return; 2352 } 2353 2354 blk_mq_run_dispatch_ops(hctx->queue, 2355 blk_mq_sched_dispatch_requests(hctx)); 2356 } 2357 EXPORT_SYMBOL(blk_mq_run_hw_queue); 2358 2359 /* 2360 * Return prefered queue to dispatch from (if any) for non-mq aware IO 2361 * scheduler. 2362 */ 2363 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q) 2364 { 2365 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); 2366 /* 2367 * If the IO scheduler does not respect hardware queues when 2368 * dispatching, we just don't bother with multiple HW queues and 2369 * dispatch from hctx for the current CPU since running multiple queues 2370 * just causes lock contention inside the scheduler and pointless cache 2371 * bouncing. 2372 */ 2373 struct blk_mq_hw_ctx *hctx = ctx->hctxs[HCTX_TYPE_DEFAULT]; 2374 2375 if (!blk_mq_hctx_stopped(hctx)) 2376 return hctx; 2377 return NULL; 2378 } 2379 2380 /** 2381 * blk_mq_run_hw_queues - Run all hardware queues in a request queue. 2382 * @q: Pointer to the request queue to run. 2383 * @async: If we want to run the queue asynchronously. 2384 */ 2385 void blk_mq_run_hw_queues(struct request_queue *q, bool async) 2386 { 2387 struct blk_mq_hw_ctx *hctx, *sq_hctx; 2388 unsigned long i; 2389 2390 sq_hctx = NULL; 2391 if (blk_queue_sq_sched(q)) 2392 sq_hctx = blk_mq_get_sq_hctx(q); 2393 queue_for_each_hw_ctx(q, hctx, i) { 2394 if (blk_mq_hctx_stopped(hctx)) 2395 continue; 2396 /* 2397 * Dispatch from this hctx either if there's no hctx preferred 2398 * by IO scheduler or if it has requests that bypass the 2399 * scheduler. 2400 */ 2401 if (!sq_hctx || sq_hctx == hctx || 2402 !list_empty_careful(&hctx->dispatch)) 2403 blk_mq_run_hw_queue(hctx, async); 2404 } 2405 } 2406 EXPORT_SYMBOL(blk_mq_run_hw_queues); 2407 2408 /** 2409 * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously. 2410 * @q: Pointer to the request queue to run. 2411 * @msecs: Milliseconds of delay to wait before running the queues. 2412 */ 2413 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs) 2414 { 2415 struct blk_mq_hw_ctx *hctx, *sq_hctx; 2416 unsigned long i; 2417 2418 sq_hctx = NULL; 2419 if (blk_queue_sq_sched(q)) 2420 sq_hctx = blk_mq_get_sq_hctx(q); 2421 queue_for_each_hw_ctx(q, hctx, i) { 2422 if (blk_mq_hctx_stopped(hctx)) 2423 continue; 2424 /* 2425 * If there is already a run_work pending, leave the 2426 * pending delay untouched. Otherwise, a hctx can stall 2427 * if another hctx is re-delaying the other's work 2428 * before the work executes. 2429 */ 2430 if (delayed_work_pending(&hctx->run_work)) 2431 continue; 2432 /* 2433 * Dispatch from this hctx either if there's no hctx preferred 2434 * by IO scheduler or if it has requests that bypass the 2435 * scheduler. 2436 */ 2437 if (!sq_hctx || sq_hctx == hctx || 2438 !list_empty_careful(&hctx->dispatch)) 2439 blk_mq_delay_run_hw_queue(hctx, msecs); 2440 } 2441 } 2442 EXPORT_SYMBOL(blk_mq_delay_run_hw_queues); 2443 2444 /* 2445 * This function is often used for pausing .queue_rq() by driver when 2446 * there isn't enough resource or some conditions aren't satisfied, and 2447 * BLK_STS_RESOURCE is usually returned. 2448 * 2449 * We do not guarantee that dispatch can be drained or blocked 2450 * after blk_mq_stop_hw_queue() returns. Please use 2451 * blk_mq_quiesce_queue() for that requirement. 2452 */ 2453 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) 2454 { 2455 cancel_delayed_work(&hctx->run_work); 2456 2457 set_bit(BLK_MQ_S_STOPPED, &hctx->state); 2458 } 2459 EXPORT_SYMBOL(blk_mq_stop_hw_queue); 2460 2461 /* 2462 * This function is often used for pausing .queue_rq() by driver when 2463 * there isn't enough resource or some conditions aren't satisfied, and 2464 * BLK_STS_RESOURCE is usually returned. 2465 * 2466 * We do not guarantee that dispatch can be drained or blocked 2467 * after blk_mq_stop_hw_queues() returns. Please use 2468 * blk_mq_quiesce_queue() for that requirement. 2469 */ 2470 void blk_mq_stop_hw_queues(struct request_queue *q) 2471 { 2472 struct blk_mq_hw_ctx *hctx; 2473 unsigned long i; 2474 2475 queue_for_each_hw_ctx(q, hctx, i) 2476 blk_mq_stop_hw_queue(hctx); 2477 } 2478 EXPORT_SYMBOL(blk_mq_stop_hw_queues); 2479 2480 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) 2481 { 2482 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 2483 2484 blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING); 2485 } 2486 EXPORT_SYMBOL(blk_mq_start_hw_queue); 2487 2488 void blk_mq_start_hw_queues(struct request_queue *q) 2489 { 2490 struct blk_mq_hw_ctx *hctx; 2491 unsigned long i; 2492 2493 queue_for_each_hw_ctx(q, hctx, i) 2494 blk_mq_start_hw_queue(hctx); 2495 } 2496 EXPORT_SYMBOL(blk_mq_start_hw_queues); 2497 2498 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 2499 { 2500 if (!blk_mq_hctx_stopped(hctx)) 2501 return; 2502 2503 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 2504 /* 2505 * Pairs with the smp_mb() in blk_mq_hctx_stopped() to order the 2506 * clearing of BLK_MQ_S_STOPPED above and the checking of dispatch 2507 * list in the subsequent routine. 2508 */ 2509 smp_mb__after_atomic(); 2510 blk_mq_run_hw_queue(hctx, async); 2511 } 2512 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue); 2513 2514 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) 2515 { 2516 struct blk_mq_hw_ctx *hctx; 2517 unsigned long i; 2518 2519 queue_for_each_hw_ctx(q, hctx, i) 2520 blk_mq_start_stopped_hw_queue(hctx, async || 2521 (hctx->flags & BLK_MQ_F_BLOCKING)); 2522 } 2523 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); 2524 2525 static void blk_mq_run_work_fn(struct work_struct *work) 2526 { 2527 struct blk_mq_hw_ctx *hctx = 2528 container_of(work, struct blk_mq_hw_ctx, run_work.work); 2529 2530 blk_mq_run_dispatch_ops(hctx->queue, 2531 blk_mq_sched_dispatch_requests(hctx)); 2532 } 2533 2534 /** 2535 * blk_mq_request_bypass_insert - Insert a request at dispatch list. 2536 * @rq: Pointer to request to be inserted. 2537 * @flags: BLK_MQ_INSERT_* 2538 * 2539 * Should only be used carefully, when the caller knows we want to 2540 * bypass a potential IO scheduler on the target device. 2541 */ 2542 static void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags) 2543 { 2544 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 2545 2546 spin_lock(&hctx->lock); 2547 if (flags & BLK_MQ_INSERT_AT_HEAD) 2548 list_add(&rq->queuelist, &hctx->dispatch); 2549 else 2550 list_add_tail(&rq->queuelist, &hctx->dispatch); 2551 spin_unlock(&hctx->lock); 2552 } 2553 2554 static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, 2555 struct blk_mq_ctx *ctx, struct list_head *list, 2556 bool run_queue_async) 2557 { 2558 struct request *rq; 2559 enum hctx_type type = hctx->type; 2560 2561 /* 2562 * Try to issue requests directly if the hw queue isn't busy to save an 2563 * extra enqueue & dequeue to the sw queue. 2564 */ 2565 if (!hctx->dispatch_busy && !run_queue_async) { 2566 blk_mq_run_dispatch_ops(hctx->queue, 2567 blk_mq_try_issue_list_directly(hctx, list)); 2568 if (list_empty(list)) 2569 goto out; 2570 } 2571 2572 /* 2573 * preemption doesn't flush plug list, so it's possible ctx->cpu is 2574 * offline now 2575 */ 2576 list_for_each_entry(rq, list, queuelist) { 2577 BUG_ON(rq->mq_ctx != ctx); 2578 trace_block_rq_insert(rq); 2579 if (rq->cmd_flags & REQ_NOWAIT) 2580 run_queue_async = true; 2581 } 2582 2583 spin_lock(&ctx->lock); 2584 list_splice_tail_init(list, &ctx->rq_lists[type]); 2585 blk_mq_hctx_mark_pending(hctx, ctx); 2586 spin_unlock(&ctx->lock); 2587 out: 2588 blk_mq_run_hw_queue(hctx, run_queue_async); 2589 } 2590 2591 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags) 2592 { 2593 struct request_queue *q = rq->q; 2594 struct blk_mq_ctx *ctx = rq->mq_ctx; 2595 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 2596 2597 if (blk_rq_is_passthrough(rq)) { 2598 /* 2599 * Passthrough request have to be added to hctx->dispatch 2600 * directly. The device may be in a situation where it can't 2601 * handle FS request, and always returns BLK_STS_RESOURCE for 2602 * them, which gets them added to hctx->dispatch. 2603 * 2604 * If a passthrough request is required to unblock the queues, 2605 * and it is added to the scheduler queue, there is no chance to 2606 * dispatch it given we prioritize requests in hctx->dispatch. 2607 */ 2608 blk_mq_request_bypass_insert(rq, flags); 2609 } else if (req_op(rq) == REQ_OP_FLUSH) { 2610 /* 2611 * Firstly normal IO request is inserted to scheduler queue or 2612 * sw queue, meantime we add flush request to dispatch queue( 2613 * hctx->dispatch) directly and there is at most one in-flight 2614 * flush request for each hw queue, so it doesn't matter to add 2615 * flush request to tail or front of the dispatch queue. 2616 * 2617 * Secondly in case of NCQ, flush request belongs to non-NCQ 2618 * command, and queueing it will fail when there is any 2619 * in-flight normal IO request(NCQ command). When adding flush 2620 * rq to the front of hctx->dispatch, it is easier to introduce 2621 * extra time to flush rq's latency because of S_SCHED_RESTART 2622 * compared with adding to the tail of dispatch queue, then 2623 * chance of flush merge is increased, and less flush requests 2624 * will be issued to controller. It is observed that ~10% time 2625 * is saved in blktests block/004 on disk attached to AHCI/NCQ 2626 * drive when adding flush rq to the front of hctx->dispatch. 2627 * 2628 * Simply queue flush rq to the front of hctx->dispatch so that 2629 * intensive flush workloads can benefit in case of NCQ HW. 2630 */ 2631 blk_mq_request_bypass_insert(rq, BLK_MQ_INSERT_AT_HEAD); 2632 } else if (q->elevator) { 2633 LIST_HEAD(list); 2634 2635 WARN_ON_ONCE(rq->tag != BLK_MQ_NO_TAG); 2636 2637 list_add(&rq->queuelist, &list); 2638 q->elevator->type->ops.insert_requests(hctx, &list, flags); 2639 } else { 2640 trace_block_rq_insert(rq); 2641 2642 spin_lock(&ctx->lock); 2643 if (flags & BLK_MQ_INSERT_AT_HEAD) 2644 list_add(&rq->queuelist, &ctx->rq_lists[hctx->type]); 2645 else 2646 list_add_tail(&rq->queuelist, 2647 &ctx->rq_lists[hctx->type]); 2648 blk_mq_hctx_mark_pending(hctx, ctx); 2649 spin_unlock(&ctx->lock); 2650 } 2651 } 2652 2653 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, 2654 unsigned int nr_segs) 2655 { 2656 int err; 2657 2658 if (bio->bi_opf & REQ_RAHEAD) 2659 rq->cmd_flags |= REQ_FAILFAST_MASK; 2660 2661 rq->bio = rq->biotail = bio; 2662 rq->__sector = bio->bi_iter.bi_sector; 2663 rq->__data_len = bio->bi_iter.bi_size; 2664 rq->nr_phys_segments = nr_segs; 2665 if (bio_integrity(bio)) 2666 rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q, 2667 bio); 2668 2669 /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */ 2670 err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO); 2671 WARN_ON_ONCE(err); 2672 2673 blk_account_io_start(rq); 2674 } 2675 2676 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, 2677 struct request *rq, bool last) 2678 { 2679 struct request_queue *q = rq->q; 2680 struct blk_mq_queue_data bd = { 2681 .rq = rq, 2682 .last = last, 2683 }; 2684 blk_status_t ret; 2685 2686 /* 2687 * For OK queue, we are done. For error, caller may kill it. 2688 * Any other error (busy), just add it to our list as we 2689 * previously would have done. 2690 */ 2691 ret = q->mq_ops->queue_rq(hctx, &bd); 2692 switch (ret) { 2693 case BLK_STS_OK: 2694 blk_mq_update_dispatch_busy(hctx, false); 2695 break; 2696 case BLK_STS_RESOURCE: 2697 case BLK_STS_DEV_RESOURCE: 2698 blk_mq_update_dispatch_busy(hctx, true); 2699 __blk_mq_requeue_request(rq); 2700 break; 2701 default: 2702 blk_mq_update_dispatch_busy(hctx, false); 2703 break; 2704 } 2705 2706 return ret; 2707 } 2708 2709 static bool blk_mq_get_budget_and_tag(struct request *rq) 2710 { 2711 int budget_token; 2712 2713 budget_token = blk_mq_get_dispatch_budget(rq->q); 2714 if (budget_token < 0) 2715 return false; 2716 blk_mq_set_rq_budget_token(rq, budget_token); 2717 if (!blk_mq_get_driver_tag(rq)) { 2718 blk_mq_put_dispatch_budget(rq->q, budget_token); 2719 return false; 2720 } 2721 return true; 2722 } 2723 2724 /** 2725 * blk_mq_try_issue_directly - Try to send a request directly to device driver. 2726 * @hctx: Pointer of the associated hardware queue. 2727 * @rq: Pointer to request to be sent. 2728 * 2729 * If the device has enough resources to accept a new request now, send the 2730 * request directly to device driver. Else, insert at hctx->dispatch queue, so 2731 * we can try send it another time in the future. Requests inserted at this 2732 * queue have higher priority. 2733 */ 2734 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 2735 struct request *rq) 2736 { 2737 blk_status_t ret; 2738 2739 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) { 2740 blk_mq_insert_request(rq, 0); 2741 blk_mq_run_hw_queue(hctx, false); 2742 return; 2743 } 2744 2745 if ((rq->rq_flags & RQF_USE_SCHED) || !blk_mq_get_budget_and_tag(rq)) { 2746 blk_mq_insert_request(rq, 0); 2747 blk_mq_run_hw_queue(hctx, rq->cmd_flags & REQ_NOWAIT); 2748 return; 2749 } 2750 2751 ret = __blk_mq_issue_directly(hctx, rq, true); 2752 switch (ret) { 2753 case BLK_STS_OK: 2754 break; 2755 case BLK_STS_RESOURCE: 2756 case BLK_STS_DEV_RESOURCE: 2757 blk_mq_request_bypass_insert(rq, 0); 2758 blk_mq_run_hw_queue(hctx, false); 2759 break; 2760 default: 2761 blk_mq_end_request(rq, ret); 2762 break; 2763 } 2764 } 2765 2766 static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) 2767 { 2768 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 2769 2770 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) { 2771 blk_mq_insert_request(rq, 0); 2772 blk_mq_run_hw_queue(hctx, false); 2773 return BLK_STS_OK; 2774 } 2775 2776 if (!blk_mq_get_budget_and_tag(rq)) 2777 return BLK_STS_RESOURCE; 2778 return __blk_mq_issue_directly(hctx, rq, last); 2779 } 2780 2781 static void blk_mq_plug_issue_direct(struct blk_plug *plug) 2782 { 2783 struct blk_mq_hw_ctx *hctx = NULL; 2784 struct request *rq; 2785 int queued = 0; 2786 blk_status_t ret = BLK_STS_OK; 2787 2788 while ((rq = rq_list_pop(&plug->mq_list))) { 2789 bool last = rq_list_empty(&plug->mq_list); 2790 2791 if (hctx != rq->mq_hctx) { 2792 if (hctx) { 2793 blk_mq_commit_rqs(hctx, queued, false); 2794 queued = 0; 2795 } 2796 hctx = rq->mq_hctx; 2797 } 2798 2799 ret = blk_mq_request_issue_directly(rq, last); 2800 switch (ret) { 2801 case BLK_STS_OK: 2802 queued++; 2803 break; 2804 case BLK_STS_RESOURCE: 2805 case BLK_STS_DEV_RESOURCE: 2806 blk_mq_request_bypass_insert(rq, 0); 2807 blk_mq_run_hw_queue(hctx, false); 2808 goto out; 2809 default: 2810 blk_mq_end_request(rq, ret); 2811 break; 2812 } 2813 } 2814 2815 out: 2816 if (ret != BLK_STS_OK) 2817 blk_mq_commit_rqs(hctx, queued, false); 2818 } 2819 2820 static void __blk_mq_flush_plug_list(struct request_queue *q, 2821 struct blk_plug *plug) 2822 { 2823 if (blk_queue_quiesced(q)) 2824 return; 2825 q->mq_ops->queue_rqs(&plug->mq_list); 2826 } 2827 2828 static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) 2829 { 2830 struct blk_mq_hw_ctx *this_hctx = NULL; 2831 struct blk_mq_ctx *this_ctx = NULL; 2832 struct rq_list requeue_list = {}; 2833 unsigned int depth = 0; 2834 bool is_passthrough = false; 2835 LIST_HEAD(list); 2836 2837 do { 2838 struct request *rq = rq_list_pop(&plug->mq_list); 2839 2840 if (!this_hctx) { 2841 this_hctx = rq->mq_hctx; 2842 this_ctx = rq->mq_ctx; 2843 is_passthrough = blk_rq_is_passthrough(rq); 2844 } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx || 2845 is_passthrough != blk_rq_is_passthrough(rq)) { 2846 rq_list_add_tail(&requeue_list, rq); 2847 continue; 2848 } 2849 list_add_tail(&rq->queuelist, &list); 2850 depth++; 2851 } while (!rq_list_empty(&plug->mq_list)); 2852 2853 plug->mq_list = requeue_list; 2854 trace_block_unplug(this_hctx->queue, depth, !from_sched); 2855 2856 percpu_ref_get(&this_hctx->queue->q_usage_counter); 2857 /* passthrough requests should never be issued to the I/O scheduler */ 2858 if (is_passthrough) { 2859 spin_lock(&this_hctx->lock); 2860 list_splice_tail_init(&list, &this_hctx->dispatch); 2861 spin_unlock(&this_hctx->lock); 2862 blk_mq_run_hw_queue(this_hctx, from_sched); 2863 } else if (this_hctx->queue->elevator) { 2864 this_hctx->queue->elevator->type->ops.insert_requests(this_hctx, 2865 &list, 0); 2866 blk_mq_run_hw_queue(this_hctx, from_sched); 2867 } else { 2868 blk_mq_insert_requests(this_hctx, this_ctx, &list, from_sched); 2869 } 2870 percpu_ref_put(&this_hctx->queue->q_usage_counter); 2871 } 2872 2873 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) 2874 { 2875 struct request *rq; 2876 unsigned int depth; 2877 2878 /* 2879 * We may have been called recursively midway through handling 2880 * plug->mq_list via a schedule() in the driver's queue_rq() callback. 2881 * To avoid mq_list changing under our feet, clear rq_count early and 2882 * bail out specifically if rq_count is 0 rather than checking 2883 * whether the mq_list is empty. 2884 */ 2885 if (plug->rq_count == 0) 2886 return; 2887 depth = plug->rq_count; 2888 plug->rq_count = 0; 2889 2890 if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) { 2891 struct request_queue *q; 2892 2893 rq = rq_list_peek(&plug->mq_list); 2894 q = rq->q; 2895 trace_block_unplug(q, depth, true); 2896 2897 /* 2898 * Peek first request and see if we have a ->queue_rqs() hook. 2899 * If we do, we can dispatch the whole plug list in one go. We 2900 * already know at this point that all requests belong to the 2901 * same queue, caller must ensure that's the case. 2902 */ 2903 if (q->mq_ops->queue_rqs) { 2904 blk_mq_run_dispatch_ops(q, 2905 __blk_mq_flush_plug_list(q, plug)); 2906 if (rq_list_empty(&plug->mq_list)) 2907 return; 2908 } 2909 2910 blk_mq_run_dispatch_ops(q, 2911 blk_mq_plug_issue_direct(plug)); 2912 if (rq_list_empty(&plug->mq_list)) 2913 return; 2914 } 2915 2916 do { 2917 blk_mq_dispatch_plug_list(plug, from_schedule); 2918 } while (!rq_list_empty(&plug->mq_list)); 2919 } 2920 2921 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 2922 struct list_head *list) 2923 { 2924 int queued = 0; 2925 blk_status_t ret = BLK_STS_OK; 2926 2927 while (!list_empty(list)) { 2928 struct request *rq = list_first_entry(list, struct request, 2929 queuelist); 2930 2931 list_del_init(&rq->queuelist); 2932 ret = blk_mq_request_issue_directly(rq, list_empty(list)); 2933 switch (ret) { 2934 case BLK_STS_OK: 2935 queued++; 2936 break; 2937 case BLK_STS_RESOURCE: 2938 case BLK_STS_DEV_RESOURCE: 2939 blk_mq_request_bypass_insert(rq, 0); 2940 if (list_empty(list)) 2941 blk_mq_run_hw_queue(hctx, false); 2942 goto out; 2943 default: 2944 blk_mq_end_request(rq, ret); 2945 break; 2946 } 2947 } 2948 2949 out: 2950 if (ret != BLK_STS_OK) 2951 blk_mq_commit_rqs(hctx, queued, false); 2952 } 2953 2954 static bool blk_mq_attempt_bio_merge(struct request_queue *q, 2955 struct bio *bio, unsigned int nr_segs) 2956 { 2957 if (!blk_queue_nomerges(q) && bio_mergeable(bio)) { 2958 if (blk_attempt_plug_merge(q, bio, nr_segs)) 2959 return true; 2960 if (blk_mq_sched_bio_merge(q, bio, nr_segs)) 2961 return true; 2962 } 2963 return false; 2964 } 2965 2966 static struct request *blk_mq_get_new_requests(struct request_queue *q, 2967 struct blk_plug *plug, 2968 struct bio *bio, 2969 unsigned int nsegs) 2970 { 2971 struct blk_mq_alloc_data data = { 2972 .q = q, 2973 .nr_tags = 1, 2974 .cmd_flags = bio->bi_opf, 2975 }; 2976 struct request *rq; 2977 2978 rq_qos_throttle(q, bio); 2979 2980 if (plug) { 2981 data.nr_tags = plug->nr_ios; 2982 plug->nr_ios = 1; 2983 data.cached_rqs = &plug->cached_rqs; 2984 } 2985 2986 rq = __blk_mq_alloc_requests(&data); 2987 if (unlikely(!rq)) 2988 rq_qos_cleanup(q, bio); 2989 return rq; 2990 } 2991 2992 /* 2993 * Check if there is a suitable cached request and return it. 2994 */ 2995 static struct request *blk_mq_peek_cached_request(struct blk_plug *plug, 2996 struct request_queue *q, blk_opf_t opf) 2997 { 2998 enum hctx_type type = blk_mq_get_hctx_type(opf); 2999 struct request *rq; 3000 3001 if (!plug) 3002 return NULL; 3003 rq = rq_list_peek(&plug->cached_rqs); 3004 if (!rq || rq->q != q) 3005 return NULL; 3006 if (type != rq->mq_hctx->type && 3007 (type != HCTX_TYPE_READ || rq->mq_hctx->type != HCTX_TYPE_DEFAULT)) 3008 return NULL; 3009 if (op_is_flush(rq->cmd_flags) != op_is_flush(opf)) 3010 return NULL; 3011 return rq; 3012 } 3013 3014 static void blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug, 3015 struct bio *bio) 3016 { 3017 if (rq_list_pop(&plug->cached_rqs) != rq) 3018 WARN_ON_ONCE(1); 3019 3020 /* 3021 * If any qos ->throttle() end up blocking, we will have flushed the 3022 * plug and hence killed the cached_rq list as well. Pop this entry 3023 * before we throttle. 3024 */ 3025 rq_qos_throttle(rq->q, bio); 3026 3027 blk_mq_rq_time_init(rq, blk_time_get_ns()); 3028 rq->cmd_flags = bio->bi_opf; 3029 INIT_LIST_HEAD(&rq->queuelist); 3030 } 3031 3032 static bool bio_unaligned(const struct bio *bio, struct request_queue *q) 3033 { 3034 unsigned int bs_mask = queue_logical_block_size(q) - 1; 3035 3036 /* .bi_sector of any zero sized bio need to be initialized */ 3037 if ((bio->bi_iter.bi_size & bs_mask) || 3038 ((bio->bi_iter.bi_sector << SECTOR_SHIFT) & bs_mask)) 3039 return true; 3040 return false; 3041 } 3042 3043 /** 3044 * blk_mq_submit_bio - Create and send a request to block device. 3045 * @bio: Bio pointer. 3046 * 3047 * Builds up a request structure from @q and @bio and send to the device. The 3048 * request may not be queued directly to hardware if: 3049 * * This request can be merged with another one 3050 * * We want to place request at plug queue for possible future merging 3051 * * There is an IO scheduler active at this queue 3052 * 3053 * It will not queue the request if there is an error with the bio, or at the 3054 * request creation. 3055 */ 3056 void blk_mq_submit_bio(struct bio *bio) 3057 { 3058 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 3059 struct blk_plug *plug = current->plug; 3060 const int is_sync = op_is_sync(bio->bi_opf); 3061 struct blk_mq_hw_ctx *hctx; 3062 unsigned int nr_segs; 3063 struct request *rq; 3064 blk_status_t ret; 3065 3066 /* 3067 * If the plug has a cached request for this queue, try to use it. 3068 */ 3069 rq = blk_mq_peek_cached_request(plug, q, bio->bi_opf); 3070 3071 /* 3072 * A BIO that was released from a zone write plug has already been 3073 * through the preparation in this function, already holds a reference 3074 * on the queue usage counter, and is the only write BIO in-flight for 3075 * the target zone. Go straight to preparing a request for it. 3076 */ 3077 if (bio_zone_write_plugging(bio)) { 3078 nr_segs = bio->__bi_nr_segments; 3079 if (rq) 3080 blk_queue_exit(q); 3081 goto new_request; 3082 } 3083 3084 bio = blk_queue_bounce(bio, q); 3085 3086 /* 3087 * The cached request already holds a q_usage_counter reference and we 3088 * don't have to acquire a new one if we use it. 3089 */ 3090 if (!rq) { 3091 if (unlikely(bio_queue_enter(bio))) 3092 return; 3093 } 3094 3095 /* 3096 * Device reconfiguration may change logical block size or reduce the 3097 * number of poll queues, so the checks for alignment and poll support 3098 * have to be done with queue usage counter held. 3099 */ 3100 if (unlikely(bio_unaligned(bio, q))) { 3101 bio_io_error(bio); 3102 goto queue_exit; 3103 } 3104 3105 if ((bio->bi_opf & REQ_POLLED) && !blk_mq_can_poll(q)) { 3106 bio->bi_status = BLK_STS_NOTSUPP; 3107 bio_endio(bio); 3108 goto queue_exit; 3109 } 3110 3111 bio = __bio_split_to_limits(bio, &q->limits, &nr_segs); 3112 if (!bio) 3113 goto queue_exit; 3114 3115 if (!bio_integrity_prep(bio)) 3116 goto queue_exit; 3117 3118 if (blk_mq_attempt_bio_merge(q, bio, nr_segs)) 3119 goto queue_exit; 3120 3121 if (blk_queue_is_zoned(q) && blk_zone_plug_bio(bio, nr_segs)) 3122 goto queue_exit; 3123 3124 new_request: 3125 if (rq) { 3126 blk_mq_use_cached_rq(rq, plug, bio); 3127 } else { 3128 rq = blk_mq_get_new_requests(q, plug, bio, nr_segs); 3129 if (unlikely(!rq)) { 3130 if (bio->bi_opf & REQ_NOWAIT) 3131 bio_wouldblock_error(bio); 3132 goto queue_exit; 3133 } 3134 } 3135 3136 trace_block_getrq(bio); 3137 3138 rq_qos_track(q, rq, bio); 3139 3140 blk_mq_bio_to_request(rq, bio, nr_segs); 3141 3142 ret = blk_crypto_rq_get_keyslot(rq); 3143 if (ret != BLK_STS_OK) { 3144 bio->bi_status = ret; 3145 bio_endio(bio); 3146 blk_mq_free_request(rq); 3147 return; 3148 } 3149 3150 if (bio_zone_write_plugging(bio)) 3151 blk_zone_write_plug_init_request(rq); 3152 3153 if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq)) 3154 return; 3155 3156 if (plug) { 3157 blk_add_rq_to_plug(plug, rq); 3158 return; 3159 } 3160 3161 hctx = rq->mq_hctx; 3162 if ((rq->rq_flags & RQF_USE_SCHED) || 3163 (hctx->dispatch_busy && (q->nr_hw_queues == 1 || !is_sync))) { 3164 blk_mq_insert_request(rq, 0); 3165 blk_mq_run_hw_queue(hctx, true); 3166 } else { 3167 blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq)); 3168 } 3169 return; 3170 3171 queue_exit: 3172 /* 3173 * Don't drop the queue reference if we were trying to use a cached 3174 * request and thus didn't acquire one. 3175 */ 3176 if (!rq) 3177 blk_queue_exit(q); 3178 } 3179 3180 #ifdef CONFIG_BLK_MQ_STACKING 3181 /** 3182 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 3183 * @rq: the request being queued 3184 */ 3185 blk_status_t blk_insert_cloned_request(struct request *rq) 3186 { 3187 struct request_queue *q = rq->q; 3188 unsigned int max_sectors = blk_queue_get_max_sectors(rq); 3189 unsigned int max_segments = blk_rq_get_max_segments(rq); 3190 blk_status_t ret; 3191 3192 if (blk_rq_sectors(rq) > max_sectors) { 3193 /* 3194 * SCSI device does not have a good way to return if 3195 * Write Same/Zero is actually supported. If a device rejects 3196 * a non-read/write command (discard, write same,etc.) the 3197 * low-level device driver will set the relevant queue limit to 3198 * 0 to prevent blk-lib from issuing more of the offending 3199 * operations. Commands queued prior to the queue limit being 3200 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O 3201 * errors being propagated to upper layers. 3202 */ 3203 if (max_sectors == 0) 3204 return BLK_STS_NOTSUPP; 3205 3206 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n", 3207 __func__, blk_rq_sectors(rq), max_sectors); 3208 return BLK_STS_IOERR; 3209 } 3210 3211 /* 3212 * The queue settings related to segment counting may differ from the 3213 * original queue. 3214 */ 3215 rq->nr_phys_segments = blk_recalc_rq_segments(rq); 3216 if (rq->nr_phys_segments > max_segments) { 3217 printk(KERN_ERR "%s: over max segments limit. (%u > %u)\n", 3218 __func__, rq->nr_phys_segments, max_segments); 3219 return BLK_STS_IOERR; 3220 } 3221 3222 if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq))) 3223 return BLK_STS_IOERR; 3224 3225 ret = blk_crypto_rq_get_keyslot(rq); 3226 if (ret != BLK_STS_OK) 3227 return ret; 3228 3229 blk_account_io_start(rq); 3230 3231 /* 3232 * Since we have a scheduler attached on the top device, 3233 * bypass a potential scheduler on the bottom device for 3234 * insert. 3235 */ 3236 blk_mq_run_dispatch_ops(q, 3237 ret = blk_mq_request_issue_directly(rq, true)); 3238 if (ret) 3239 blk_account_io_done(rq, blk_time_get_ns()); 3240 return ret; 3241 } 3242 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 3243 3244 /** 3245 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 3246 * @rq: the clone request to be cleaned up 3247 * 3248 * Description: 3249 * Free all bios in @rq for a cloned request. 3250 */ 3251 void blk_rq_unprep_clone(struct request *rq) 3252 { 3253 struct bio *bio; 3254 3255 while ((bio = rq->bio) != NULL) { 3256 rq->bio = bio->bi_next; 3257 3258 bio_put(bio); 3259 } 3260 } 3261 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 3262 3263 /** 3264 * blk_rq_prep_clone - Helper function to setup clone request 3265 * @rq: the request to be setup 3266 * @rq_src: original request to be cloned 3267 * @bs: bio_set that bios for clone are allocated from 3268 * @gfp_mask: memory allocation mask for bio 3269 * @bio_ctr: setup function to be called for each clone bio. 3270 * Returns %0 for success, non %0 for failure. 3271 * @data: private data to be passed to @bio_ctr 3272 * 3273 * Description: 3274 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 3275 * Also, pages which the original bios are pointing to are not copied 3276 * and the cloned bios just point same pages. 3277 * So cloned bios must be completed before original bios, which means 3278 * the caller must complete @rq before @rq_src. 3279 */ 3280 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 3281 struct bio_set *bs, gfp_t gfp_mask, 3282 int (*bio_ctr)(struct bio *, struct bio *, void *), 3283 void *data) 3284 { 3285 struct bio *bio_src; 3286 3287 if (!bs) 3288 bs = &fs_bio_set; 3289 3290 __rq_for_each_bio(bio_src, rq_src) { 3291 struct bio *bio = bio_alloc_clone(rq->q->disk->part0, bio_src, 3292 gfp_mask, bs); 3293 if (!bio) 3294 goto free_and_out; 3295 3296 if (bio_ctr && bio_ctr(bio, bio_src, data)) { 3297 bio_put(bio); 3298 goto free_and_out; 3299 } 3300 3301 if (rq->bio) { 3302 rq->biotail->bi_next = bio; 3303 rq->biotail = bio; 3304 } else { 3305 rq->bio = rq->biotail = bio; 3306 } 3307 } 3308 3309 /* Copy attributes of the original request to the clone request. */ 3310 rq->__sector = blk_rq_pos(rq_src); 3311 rq->__data_len = blk_rq_bytes(rq_src); 3312 if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) { 3313 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; 3314 rq->special_vec = rq_src->special_vec; 3315 } 3316 rq->nr_phys_segments = rq_src->nr_phys_segments; 3317 3318 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0) 3319 goto free_and_out; 3320 3321 return 0; 3322 3323 free_and_out: 3324 blk_rq_unprep_clone(rq); 3325 3326 return -ENOMEM; 3327 } 3328 EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 3329 #endif /* CONFIG_BLK_MQ_STACKING */ 3330 3331 /* 3332 * Steal bios from a request and add them to a bio list. 3333 * The request must not have been partially completed before. 3334 */ 3335 void blk_steal_bios(struct bio_list *list, struct request *rq) 3336 { 3337 if (rq->bio) { 3338 if (list->tail) 3339 list->tail->bi_next = rq->bio; 3340 else 3341 list->head = rq->bio; 3342 list->tail = rq->biotail; 3343 3344 rq->bio = NULL; 3345 rq->biotail = NULL; 3346 } 3347 3348 rq->__data_len = 0; 3349 } 3350 EXPORT_SYMBOL_GPL(blk_steal_bios); 3351 3352 static size_t order_to_size(unsigned int order) 3353 { 3354 return (size_t)PAGE_SIZE << order; 3355 } 3356 3357 /* called before freeing request pool in @tags */ 3358 static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags, 3359 struct blk_mq_tags *tags) 3360 { 3361 struct page *page; 3362 unsigned long flags; 3363 3364 /* 3365 * There is no need to clear mapping if driver tags is not initialized 3366 * or the mapping belongs to the driver tags. 3367 */ 3368 if (!drv_tags || drv_tags == tags) 3369 return; 3370 3371 list_for_each_entry(page, &tags->page_list, lru) { 3372 unsigned long start = (unsigned long)page_address(page); 3373 unsigned long end = start + order_to_size(page->private); 3374 int i; 3375 3376 for (i = 0; i < drv_tags->nr_tags; i++) { 3377 struct request *rq = drv_tags->rqs[i]; 3378 unsigned long rq_addr = (unsigned long)rq; 3379 3380 if (rq_addr >= start && rq_addr < end) { 3381 WARN_ON_ONCE(req_ref_read(rq) != 0); 3382 cmpxchg(&drv_tags->rqs[i], rq, NULL); 3383 } 3384 } 3385 } 3386 3387 /* 3388 * Wait until all pending iteration is done. 3389 * 3390 * Request reference is cleared and it is guaranteed to be observed 3391 * after the ->lock is released. 3392 */ 3393 spin_lock_irqsave(&drv_tags->lock, flags); 3394 spin_unlock_irqrestore(&drv_tags->lock, flags); 3395 } 3396 3397 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 3398 unsigned int hctx_idx) 3399 { 3400 struct blk_mq_tags *drv_tags; 3401 struct page *page; 3402 3403 if (list_empty(&tags->page_list)) 3404 return; 3405 3406 if (blk_mq_is_shared_tags(set->flags)) 3407 drv_tags = set->shared_tags; 3408 else 3409 drv_tags = set->tags[hctx_idx]; 3410 3411 if (tags->static_rqs && set->ops->exit_request) { 3412 int i; 3413 3414 for (i = 0; i < tags->nr_tags; i++) { 3415 struct request *rq = tags->static_rqs[i]; 3416 3417 if (!rq) 3418 continue; 3419 set->ops->exit_request(set, rq, hctx_idx); 3420 tags->static_rqs[i] = NULL; 3421 } 3422 } 3423 3424 blk_mq_clear_rq_mapping(drv_tags, tags); 3425 3426 while (!list_empty(&tags->page_list)) { 3427 page = list_first_entry(&tags->page_list, struct page, lru); 3428 list_del_init(&page->lru); 3429 /* 3430 * Remove kmemleak object previously allocated in 3431 * blk_mq_alloc_rqs(). 3432 */ 3433 kmemleak_free(page_address(page)); 3434 __free_pages(page, page->private); 3435 } 3436 } 3437 3438 void blk_mq_free_rq_map(struct blk_mq_tags *tags) 3439 { 3440 kfree(tags->rqs); 3441 tags->rqs = NULL; 3442 kfree(tags->static_rqs); 3443 tags->static_rqs = NULL; 3444 3445 blk_mq_free_tags(tags); 3446 } 3447 3448 static enum hctx_type hctx_idx_to_type(struct blk_mq_tag_set *set, 3449 unsigned int hctx_idx) 3450 { 3451 int i; 3452 3453 for (i = 0; i < set->nr_maps; i++) { 3454 unsigned int start = set->map[i].queue_offset; 3455 unsigned int end = start + set->map[i].nr_queues; 3456 3457 if (hctx_idx >= start && hctx_idx < end) 3458 break; 3459 } 3460 3461 if (i >= set->nr_maps) 3462 i = HCTX_TYPE_DEFAULT; 3463 3464 return i; 3465 } 3466 3467 static int blk_mq_get_hctx_node(struct blk_mq_tag_set *set, 3468 unsigned int hctx_idx) 3469 { 3470 enum hctx_type type = hctx_idx_to_type(set, hctx_idx); 3471 3472 return blk_mq_hw_queue_to_node(&set->map[type], hctx_idx); 3473 } 3474 3475 static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, 3476 unsigned int hctx_idx, 3477 unsigned int nr_tags, 3478 unsigned int reserved_tags) 3479 { 3480 int node = blk_mq_get_hctx_node(set, hctx_idx); 3481 struct blk_mq_tags *tags; 3482 3483 if (node == NUMA_NO_NODE) 3484 node = set->numa_node; 3485 3486 tags = blk_mq_init_tags(nr_tags, reserved_tags, set->flags, node); 3487 if (!tags) 3488 return NULL; 3489 3490 tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *), 3491 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 3492 node); 3493 if (!tags->rqs) 3494 goto err_free_tags; 3495 3496 tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *), 3497 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 3498 node); 3499 if (!tags->static_rqs) 3500 goto err_free_rqs; 3501 3502 return tags; 3503 3504 err_free_rqs: 3505 kfree(tags->rqs); 3506 err_free_tags: 3507 blk_mq_free_tags(tags); 3508 return NULL; 3509 } 3510 3511 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, 3512 unsigned int hctx_idx, int node) 3513 { 3514 int ret; 3515 3516 if (set->ops->init_request) { 3517 ret = set->ops->init_request(set, rq, hctx_idx, node); 3518 if (ret) 3519 return ret; 3520 } 3521 3522 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 3523 return 0; 3524 } 3525 3526 static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, 3527 struct blk_mq_tags *tags, 3528 unsigned int hctx_idx, unsigned int depth) 3529 { 3530 unsigned int i, j, entries_per_page, max_order = 4; 3531 int node = blk_mq_get_hctx_node(set, hctx_idx); 3532 size_t rq_size, left; 3533 3534 if (node == NUMA_NO_NODE) 3535 node = set->numa_node; 3536 3537 INIT_LIST_HEAD(&tags->page_list); 3538 3539 /* 3540 * rq_size is the size of the request plus driver payload, rounded 3541 * to the cacheline size 3542 */ 3543 rq_size = round_up(sizeof(struct request) + set->cmd_size, 3544 cache_line_size()); 3545 left = rq_size * depth; 3546 3547 for (i = 0; i < depth; ) { 3548 int this_order = max_order; 3549 struct page *page; 3550 int to_do; 3551 void *p; 3552 3553 while (this_order && left < order_to_size(this_order - 1)) 3554 this_order--; 3555 3556 do { 3557 page = alloc_pages_node(node, 3558 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, 3559 this_order); 3560 if (page) 3561 break; 3562 if (!this_order--) 3563 break; 3564 if (order_to_size(this_order) < rq_size) 3565 break; 3566 } while (1); 3567 3568 if (!page) 3569 goto fail; 3570 3571 page->private = this_order; 3572 list_add_tail(&page->lru, &tags->page_list); 3573 3574 p = page_address(page); 3575 /* 3576 * Allow kmemleak to scan these pages as they contain pointers 3577 * to additional allocations like via ops->init_request(). 3578 */ 3579 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO); 3580 entries_per_page = order_to_size(this_order) / rq_size; 3581 to_do = min(entries_per_page, depth - i); 3582 left -= to_do * rq_size; 3583 for (j = 0; j < to_do; j++) { 3584 struct request *rq = p; 3585 3586 tags->static_rqs[i] = rq; 3587 if (blk_mq_init_request(set, rq, hctx_idx, node)) { 3588 tags->static_rqs[i] = NULL; 3589 goto fail; 3590 } 3591 3592 p += rq_size; 3593 i++; 3594 } 3595 } 3596 return 0; 3597 3598 fail: 3599 blk_mq_free_rqs(set, tags, hctx_idx); 3600 return -ENOMEM; 3601 } 3602 3603 struct rq_iter_data { 3604 struct blk_mq_hw_ctx *hctx; 3605 bool has_rq; 3606 }; 3607 3608 static bool blk_mq_has_request(struct request *rq, void *data) 3609 { 3610 struct rq_iter_data *iter_data = data; 3611 3612 if (rq->mq_hctx != iter_data->hctx) 3613 return true; 3614 iter_data->has_rq = true; 3615 return false; 3616 } 3617 3618 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx) 3619 { 3620 struct blk_mq_tags *tags = hctx->sched_tags ? 3621 hctx->sched_tags : hctx->tags; 3622 struct rq_iter_data data = { 3623 .hctx = hctx, 3624 }; 3625 3626 blk_mq_all_tag_iter(tags, blk_mq_has_request, &data); 3627 return data.has_rq; 3628 } 3629 3630 static bool blk_mq_hctx_has_online_cpu(struct blk_mq_hw_ctx *hctx, 3631 unsigned int this_cpu) 3632 { 3633 enum hctx_type type = hctx->type; 3634 int cpu; 3635 3636 /* 3637 * hctx->cpumask has to rule out isolated CPUs, but userspace still 3638 * might submit IOs on these isolated CPUs, so use the queue map to 3639 * check if all CPUs mapped to this hctx are offline 3640 */ 3641 for_each_online_cpu(cpu) { 3642 struct blk_mq_hw_ctx *h = blk_mq_map_queue_type(hctx->queue, 3643 type, cpu); 3644 3645 if (h != hctx) 3646 continue; 3647 3648 /* this hctx has at least one online CPU */ 3649 if (this_cpu != cpu) 3650 return true; 3651 } 3652 3653 return false; 3654 } 3655 3656 static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node) 3657 { 3658 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, 3659 struct blk_mq_hw_ctx, cpuhp_online); 3660 3661 if (blk_mq_hctx_has_online_cpu(hctx, cpu)) 3662 return 0; 3663 3664 /* 3665 * Prevent new request from being allocated on the current hctx. 3666 * 3667 * The smp_mb__after_atomic() Pairs with the implied barrier in 3668 * test_and_set_bit_lock in sbitmap_get(). Ensures the inactive flag is 3669 * seen once we return from the tag allocator. 3670 */ 3671 set_bit(BLK_MQ_S_INACTIVE, &hctx->state); 3672 smp_mb__after_atomic(); 3673 3674 /* 3675 * Try to grab a reference to the queue and wait for any outstanding 3676 * requests. If we could not grab a reference the queue has been 3677 * frozen and there are no requests. 3678 */ 3679 if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) { 3680 while (blk_mq_hctx_has_requests(hctx)) 3681 msleep(5); 3682 percpu_ref_put(&hctx->queue->q_usage_counter); 3683 } 3684 3685 return 0; 3686 } 3687 3688 /* 3689 * Check if one CPU is mapped to the specified hctx 3690 * 3691 * Isolated CPUs have been ruled out from hctx->cpumask, which is supposed 3692 * to be used for scheduling kworker only. For other usage, please call this 3693 * helper for checking if one CPU belongs to the specified hctx 3694 */ 3695 static bool blk_mq_cpu_mapped_to_hctx(unsigned int cpu, 3696 const struct blk_mq_hw_ctx *hctx) 3697 { 3698 struct blk_mq_hw_ctx *mapped_hctx = blk_mq_map_queue_type(hctx->queue, 3699 hctx->type, cpu); 3700 3701 return mapped_hctx == hctx; 3702 } 3703 3704 static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node) 3705 { 3706 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, 3707 struct blk_mq_hw_ctx, cpuhp_online); 3708 3709 if (blk_mq_cpu_mapped_to_hctx(cpu, hctx)) 3710 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state); 3711 return 0; 3712 } 3713 3714 /* 3715 * 'cpu' is going away. splice any existing rq_list entries from this 3716 * software queue to the hw queue dispatch list, and ensure that it 3717 * gets run. 3718 */ 3719 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node) 3720 { 3721 struct blk_mq_hw_ctx *hctx; 3722 struct blk_mq_ctx *ctx; 3723 LIST_HEAD(tmp); 3724 enum hctx_type type; 3725 3726 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead); 3727 if (!blk_mq_cpu_mapped_to_hctx(cpu, hctx)) 3728 return 0; 3729 3730 ctx = __blk_mq_get_ctx(hctx->queue, cpu); 3731 type = hctx->type; 3732 3733 spin_lock(&ctx->lock); 3734 if (!list_empty(&ctx->rq_lists[type])) { 3735 list_splice_init(&ctx->rq_lists[type], &tmp); 3736 blk_mq_hctx_clear_pending(hctx, ctx); 3737 } 3738 spin_unlock(&ctx->lock); 3739 3740 if (list_empty(&tmp)) 3741 return 0; 3742 3743 spin_lock(&hctx->lock); 3744 list_splice_tail_init(&tmp, &hctx->dispatch); 3745 spin_unlock(&hctx->lock); 3746 3747 blk_mq_run_hw_queue(hctx, true); 3748 return 0; 3749 } 3750 3751 static void __blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) 3752 { 3753 lockdep_assert_held(&blk_mq_cpuhp_lock); 3754 3755 if (!(hctx->flags & BLK_MQ_F_STACKING) && 3756 !hlist_unhashed(&hctx->cpuhp_online)) { 3757 cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, 3758 &hctx->cpuhp_online); 3759 INIT_HLIST_NODE(&hctx->cpuhp_online); 3760 } 3761 3762 if (!hlist_unhashed(&hctx->cpuhp_dead)) { 3763 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD, 3764 &hctx->cpuhp_dead); 3765 INIT_HLIST_NODE(&hctx->cpuhp_dead); 3766 } 3767 } 3768 3769 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) 3770 { 3771 mutex_lock(&blk_mq_cpuhp_lock); 3772 __blk_mq_remove_cpuhp(hctx); 3773 mutex_unlock(&blk_mq_cpuhp_lock); 3774 } 3775 3776 static void __blk_mq_add_cpuhp(struct blk_mq_hw_ctx *hctx) 3777 { 3778 lockdep_assert_held(&blk_mq_cpuhp_lock); 3779 3780 if (!(hctx->flags & BLK_MQ_F_STACKING) && 3781 hlist_unhashed(&hctx->cpuhp_online)) 3782 cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, 3783 &hctx->cpuhp_online); 3784 3785 if (hlist_unhashed(&hctx->cpuhp_dead)) 3786 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, 3787 &hctx->cpuhp_dead); 3788 } 3789 3790 static void __blk_mq_remove_cpuhp_list(struct list_head *head) 3791 { 3792 struct blk_mq_hw_ctx *hctx; 3793 3794 lockdep_assert_held(&blk_mq_cpuhp_lock); 3795 3796 list_for_each_entry(hctx, head, hctx_list) 3797 __blk_mq_remove_cpuhp(hctx); 3798 } 3799 3800 /* 3801 * Unregister cpuhp callbacks from exited hw queues 3802 * 3803 * Safe to call if this `request_queue` is live 3804 */ 3805 static void blk_mq_remove_hw_queues_cpuhp(struct request_queue *q) 3806 { 3807 LIST_HEAD(hctx_list); 3808 3809 spin_lock(&q->unused_hctx_lock); 3810 list_splice_init(&q->unused_hctx_list, &hctx_list); 3811 spin_unlock(&q->unused_hctx_lock); 3812 3813 mutex_lock(&blk_mq_cpuhp_lock); 3814 __blk_mq_remove_cpuhp_list(&hctx_list); 3815 mutex_unlock(&blk_mq_cpuhp_lock); 3816 3817 spin_lock(&q->unused_hctx_lock); 3818 list_splice(&hctx_list, &q->unused_hctx_list); 3819 spin_unlock(&q->unused_hctx_lock); 3820 } 3821 3822 /* 3823 * Register cpuhp callbacks from all hw queues 3824 * 3825 * Safe to call if this `request_queue` is live 3826 */ 3827 static void blk_mq_add_hw_queues_cpuhp(struct request_queue *q) 3828 { 3829 struct blk_mq_hw_ctx *hctx; 3830 unsigned long i; 3831 3832 mutex_lock(&blk_mq_cpuhp_lock); 3833 queue_for_each_hw_ctx(q, hctx, i) 3834 __blk_mq_add_cpuhp(hctx); 3835 mutex_unlock(&blk_mq_cpuhp_lock); 3836 } 3837 3838 /* 3839 * Before freeing hw queue, clearing the flush request reference in 3840 * tags->rqs[] for avoiding potential UAF. 3841 */ 3842 static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags, 3843 unsigned int queue_depth, struct request *flush_rq) 3844 { 3845 int i; 3846 unsigned long flags; 3847 3848 /* The hw queue may not be mapped yet */ 3849 if (!tags) 3850 return; 3851 3852 WARN_ON_ONCE(req_ref_read(flush_rq) != 0); 3853 3854 for (i = 0; i < queue_depth; i++) 3855 cmpxchg(&tags->rqs[i], flush_rq, NULL); 3856 3857 /* 3858 * Wait until all pending iteration is done. 3859 * 3860 * Request reference is cleared and it is guaranteed to be observed 3861 * after the ->lock is released. 3862 */ 3863 spin_lock_irqsave(&tags->lock, flags); 3864 spin_unlock_irqrestore(&tags->lock, flags); 3865 } 3866 3867 /* hctx->ctxs will be freed in queue's release handler */ 3868 static void blk_mq_exit_hctx(struct request_queue *q, 3869 struct blk_mq_tag_set *set, 3870 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 3871 { 3872 struct request *flush_rq = hctx->fq->flush_rq; 3873 3874 if (blk_mq_hw_queue_mapped(hctx)) 3875 blk_mq_tag_idle(hctx); 3876 3877 if (blk_queue_init_done(q)) 3878 blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx], 3879 set->queue_depth, flush_rq); 3880 if (set->ops->exit_request) 3881 set->ops->exit_request(set, flush_rq, hctx_idx); 3882 3883 if (set->ops->exit_hctx) 3884 set->ops->exit_hctx(hctx, hctx_idx); 3885 3886 xa_erase(&q->hctx_table, hctx_idx); 3887 3888 spin_lock(&q->unused_hctx_lock); 3889 list_add(&hctx->hctx_list, &q->unused_hctx_list); 3890 spin_unlock(&q->unused_hctx_lock); 3891 } 3892 3893 static void blk_mq_exit_hw_queues(struct request_queue *q, 3894 struct blk_mq_tag_set *set, int nr_queue) 3895 { 3896 struct blk_mq_hw_ctx *hctx; 3897 unsigned long i; 3898 3899 queue_for_each_hw_ctx(q, hctx, i) { 3900 if (i == nr_queue) 3901 break; 3902 blk_mq_remove_cpuhp(hctx); 3903 blk_mq_exit_hctx(q, set, hctx, i); 3904 } 3905 } 3906 3907 static int blk_mq_init_hctx(struct request_queue *q, 3908 struct blk_mq_tag_set *set, 3909 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) 3910 { 3911 hctx->queue_num = hctx_idx; 3912 3913 hctx->tags = set->tags[hctx_idx]; 3914 3915 if (set->ops->init_hctx && 3916 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) 3917 goto fail; 3918 3919 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, 3920 hctx->numa_node)) 3921 goto exit_hctx; 3922 3923 if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL)) 3924 goto exit_flush_rq; 3925 3926 return 0; 3927 3928 exit_flush_rq: 3929 if (set->ops->exit_request) 3930 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); 3931 exit_hctx: 3932 if (set->ops->exit_hctx) 3933 set->ops->exit_hctx(hctx, hctx_idx); 3934 fail: 3935 return -1; 3936 } 3937 3938 static struct blk_mq_hw_ctx * 3939 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, 3940 int node) 3941 { 3942 struct blk_mq_hw_ctx *hctx; 3943 gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY; 3944 3945 hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node); 3946 if (!hctx) 3947 goto fail_alloc_hctx; 3948 3949 if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node)) 3950 goto free_hctx; 3951 3952 atomic_set(&hctx->nr_active, 0); 3953 if (node == NUMA_NO_NODE) 3954 node = set->numa_node; 3955 hctx->numa_node = node; 3956 3957 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); 3958 spin_lock_init(&hctx->lock); 3959 INIT_LIST_HEAD(&hctx->dispatch); 3960 INIT_HLIST_NODE(&hctx->cpuhp_dead); 3961 INIT_HLIST_NODE(&hctx->cpuhp_online); 3962 hctx->queue = q; 3963 hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED; 3964 3965 INIT_LIST_HEAD(&hctx->hctx_list); 3966 3967 /* 3968 * Allocate space for all possible cpus to avoid allocation at 3969 * runtime 3970 */ 3971 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), 3972 gfp, node); 3973 if (!hctx->ctxs) 3974 goto free_cpumask; 3975 3976 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), 3977 gfp, node, false, false)) 3978 goto free_ctxs; 3979 hctx->nr_ctx = 0; 3980 3981 spin_lock_init(&hctx->dispatch_wait_lock); 3982 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); 3983 INIT_LIST_HEAD(&hctx->dispatch_wait.entry); 3984 3985 hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp); 3986 if (!hctx->fq) 3987 goto free_bitmap; 3988 3989 blk_mq_hctx_kobj_init(hctx); 3990 3991 return hctx; 3992 3993 free_bitmap: 3994 sbitmap_free(&hctx->ctx_map); 3995 free_ctxs: 3996 kfree(hctx->ctxs); 3997 free_cpumask: 3998 free_cpumask_var(hctx->cpumask); 3999 free_hctx: 4000 kfree(hctx); 4001 fail_alloc_hctx: 4002 return NULL; 4003 } 4004 4005 static void blk_mq_init_cpu_queues(struct request_queue *q, 4006 unsigned int nr_hw_queues) 4007 { 4008 struct blk_mq_tag_set *set = q->tag_set; 4009 unsigned int i, j; 4010 4011 for_each_possible_cpu(i) { 4012 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); 4013 struct blk_mq_hw_ctx *hctx; 4014 int k; 4015 4016 __ctx->cpu = i; 4017 spin_lock_init(&__ctx->lock); 4018 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++) 4019 INIT_LIST_HEAD(&__ctx->rq_lists[k]); 4020 4021 __ctx->queue = q; 4022 4023 /* 4024 * Set local node, IFF we have more than one hw queue. If 4025 * not, we remain on the home node of the device 4026 */ 4027 for (j = 0; j < set->nr_maps; j++) { 4028 hctx = blk_mq_map_queue_type(q, j, i); 4029 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) 4030 hctx->numa_node = cpu_to_node(i); 4031 } 4032 } 4033 } 4034 4035 struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, 4036 unsigned int hctx_idx, 4037 unsigned int depth) 4038 { 4039 struct blk_mq_tags *tags; 4040 int ret; 4041 4042 tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags); 4043 if (!tags) 4044 return NULL; 4045 4046 ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth); 4047 if (ret) { 4048 blk_mq_free_rq_map(tags); 4049 return NULL; 4050 } 4051 4052 return tags; 4053 } 4054 4055 static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, 4056 int hctx_idx) 4057 { 4058 if (blk_mq_is_shared_tags(set->flags)) { 4059 set->tags[hctx_idx] = set->shared_tags; 4060 4061 return true; 4062 } 4063 4064 set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx, 4065 set->queue_depth); 4066 4067 return set->tags[hctx_idx]; 4068 } 4069 4070 void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, 4071 struct blk_mq_tags *tags, 4072 unsigned int hctx_idx) 4073 { 4074 if (tags) { 4075 blk_mq_free_rqs(set, tags, hctx_idx); 4076 blk_mq_free_rq_map(tags); 4077 } 4078 } 4079 4080 static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, 4081 unsigned int hctx_idx) 4082 { 4083 if (!blk_mq_is_shared_tags(set->flags)) 4084 blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx); 4085 4086 set->tags[hctx_idx] = NULL; 4087 } 4088 4089 static void blk_mq_map_swqueue(struct request_queue *q) 4090 { 4091 unsigned int j, hctx_idx; 4092 unsigned long i; 4093 struct blk_mq_hw_ctx *hctx; 4094 struct blk_mq_ctx *ctx; 4095 struct blk_mq_tag_set *set = q->tag_set; 4096 4097 queue_for_each_hw_ctx(q, hctx, i) { 4098 cpumask_clear(hctx->cpumask); 4099 hctx->nr_ctx = 0; 4100 hctx->dispatch_from = NULL; 4101 } 4102 4103 /* 4104 * Map software to hardware queues. 4105 * 4106 * If the cpu isn't present, the cpu is mapped to first hctx. 4107 */ 4108 for_each_possible_cpu(i) { 4109 4110 ctx = per_cpu_ptr(q->queue_ctx, i); 4111 for (j = 0; j < set->nr_maps; j++) { 4112 if (!set->map[j].nr_queues) { 4113 ctx->hctxs[j] = blk_mq_map_queue_type(q, 4114 HCTX_TYPE_DEFAULT, i); 4115 continue; 4116 } 4117 hctx_idx = set->map[j].mq_map[i]; 4118 /* unmapped hw queue can be remapped after CPU topo changed */ 4119 if (!set->tags[hctx_idx] && 4120 !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) { 4121 /* 4122 * If tags initialization fail for some hctx, 4123 * that hctx won't be brought online. In this 4124 * case, remap the current ctx to hctx[0] which 4125 * is guaranteed to always have tags allocated 4126 */ 4127 set->map[j].mq_map[i] = 0; 4128 } 4129 4130 hctx = blk_mq_map_queue_type(q, j, i); 4131 ctx->hctxs[j] = hctx; 4132 /* 4133 * If the CPU is already set in the mask, then we've 4134 * mapped this one already. This can happen if 4135 * devices share queues across queue maps. 4136 */ 4137 if (cpumask_test_cpu(i, hctx->cpumask)) 4138 continue; 4139 4140 cpumask_set_cpu(i, hctx->cpumask); 4141 hctx->type = j; 4142 ctx->index_hw[hctx->type] = hctx->nr_ctx; 4143 hctx->ctxs[hctx->nr_ctx++] = ctx; 4144 4145 /* 4146 * If the nr_ctx type overflows, we have exceeded the 4147 * amount of sw queues we can support. 4148 */ 4149 BUG_ON(!hctx->nr_ctx); 4150 } 4151 4152 for (; j < HCTX_MAX_TYPES; j++) 4153 ctx->hctxs[j] = blk_mq_map_queue_type(q, 4154 HCTX_TYPE_DEFAULT, i); 4155 } 4156 4157 queue_for_each_hw_ctx(q, hctx, i) { 4158 int cpu; 4159 4160 /* 4161 * If no software queues are mapped to this hardware queue, 4162 * disable it and free the request entries. 4163 */ 4164 if (!hctx->nr_ctx) { 4165 /* Never unmap queue 0. We need it as a 4166 * fallback in case of a new remap fails 4167 * allocation 4168 */ 4169 if (i) 4170 __blk_mq_free_map_and_rqs(set, i); 4171 4172 hctx->tags = NULL; 4173 continue; 4174 } 4175 4176 hctx->tags = set->tags[i]; 4177 WARN_ON(!hctx->tags); 4178 4179 /* 4180 * Set the map size to the number of mapped software queues. 4181 * This is more accurate and more efficient than looping 4182 * over all possibly mapped software queues. 4183 */ 4184 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx); 4185 4186 /* 4187 * Rule out isolated CPUs from hctx->cpumask to avoid 4188 * running block kworker on isolated CPUs 4189 */ 4190 for_each_cpu(cpu, hctx->cpumask) { 4191 if (cpu_is_isolated(cpu)) 4192 cpumask_clear_cpu(cpu, hctx->cpumask); 4193 } 4194 4195 /* 4196 * Initialize batch roundrobin counts 4197 */ 4198 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); 4199 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 4200 } 4201 } 4202 4203 /* 4204 * Caller needs to ensure that we're either frozen/quiesced, or that 4205 * the queue isn't live yet. 4206 */ 4207 static void queue_set_hctx_shared(struct request_queue *q, bool shared) 4208 { 4209 struct blk_mq_hw_ctx *hctx; 4210 unsigned long i; 4211 4212 queue_for_each_hw_ctx(q, hctx, i) { 4213 if (shared) { 4214 hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; 4215 } else { 4216 blk_mq_tag_idle(hctx); 4217 hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; 4218 } 4219 } 4220 } 4221 4222 static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set, 4223 bool shared) 4224 { 4225 struct request_queue *q; 4226 4227 lockdep_assert_held(&set->tag_list_lock); 4228 4229 list_for_each_entry(q, &set->tag_list, tag_set_list) { 4230 blk_mq_freeze_queue(q); 4231 queue_set_hctx_shared(q, shared); 4232 blk_mq_unfreeze_queue(q); 4233 } 4234 } 4235 4236 static void blk_mq_del_queue_tag_set(struct request_queue *q) 4237 { 4238 struct blk_mq_tag_set *set = q->tag_set; 4239 4240 mutex_lock(&set->tag_list_lock); 4241 list_del(&q->tag_set_list); 4242 if (list_is_singular(&set->tag_list)) { 4243 /* just transitioned to unshared */ 4244 set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; 4245 /* update existing queue */ 4246 blk_mq_update_tag_set_shared(set, false); 4247 } 4248 mutex_unlock(&set->tag_list_lock); 4249 INIT_LIST_HEAD(&q->tag_set_list); 4250 } 4251 4252 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, 4253 struct request_queue *q) 4254 { 4255 mutex_lock(&set->tag_list_lock); 4256 4257 /* 4258 * Check to see if we're transitioning to shared (from 1 to 2 queues). 4259 */ 4260 if (!list_empty(&set->tag_list) && 4261 !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { 4262 set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; 4263 /* update existing queue */ 4264 blk_mq_update_tag_set_shared(set, true); 4265 } 4266 if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED) 4267 queue_set_hctx_shared(q, true); 4268 list_add_tail(&q->tag_set_list, &set->tag_list); 4269 4270 mutex_unlock(&set->tag_list_lock); 4271 } 4272 4273 /* All allocations will be freed in release handler of q->mq_kobj */ 4274 static int blk_mq_alloc_ctxs(struct request_queue *q) 4275 { 4276 struct blk_mq_ctxs *ctxs; 4277 int cpu; 4278 4279 ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL); 4280 if (!ctxs) 4281 return -ENOMEM; 4282 4283 ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx); 4284 if (!ctxs->queue_ctx) 4285 goto fail; 4286 4287 for_each_possible_cpu(cpu) { 4288 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu); 4289 ctx->ctxs = ctxs; 4290 } 4291 4292 q->mq_kobj = &ctxs->kobj; 4293 q->queue_ctx = ctxs->queue_ctx; 4294 4295 return 0; 4296 fail: 4297 kfree(ctxs); 4298 return -ENOMEM; 4299 } 4300 4301 /* 4302 * It is the actual release handler for mq, but we do it from 4303 * request queue's release handler for avoiding use-after-free 4304 * and headache because q->mq_kobj shouldn't have been introduced, 4305 * but we can't group ctx/kctx kobj without it. 4306 */ 4307 void blk_mq_release(struct request_queue *q) 4308 { 4309 struct blk_mq_hw_ctx *hctx, *next; 4310 unsigned long i; 4311 4312 queue_for_each_hw_ctx(q, hctx, i) 4313 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list)); 4314 4315 /* all hctx are in .unused_hctx_list now */ 4316 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) { 4317 list_del_init(&hctx->hctx_list); 4318 kobject_put(&hctx->kobj); 4319 } 4320 4321 xa_destroy(&q->hctx_table); 4322 4323 /* 4324 * release .mq_kobj and sw queue's kobject now because 4325 * both share lifetime with request queue. 4326 */ 4327 blk_mq_sysfs_deinit(q); 4328 } 4329 4330 struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set, 4331 struct queue_limits *lim, void *queuedata) 4332 { 4333 struct queue_limits default_lim = { }; 4334 struct request_queue *q; 4335 int ret; 4336 4337 if (!lim) 4338 lim = &default_lim; 4339 lim->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT; 4340 if (set->nr_maps > HCTX_TYPE_POLL) 4341 lim->features |= BLK_FEAT_POLL; 4342 4343 q = blk_alloc_queue(lim, set->numa_node); 4344 if (IS_ERR(q)) 4345 return q; 4346 q->queuedata = queuedata; 4347 ret = blk_mq_init_allocated_queue(set, q); 4348 if (ret) { 4349 blk_put_queue(q); 4350 return ERR_PTR(ret); 4351 } 4352 return q; 4353 } 4354 EXPORT_SYMBOL(blk_mq_alloc_queue); 4355 4356 /** 4357 * blk_mq_destroy_queue - shutdown a request queue 4358 * @q: request queue to shutdown 4359 * 4360 * This shuts down a request queue allocated by blk_mq_alloc_queue(). All future 4361 * requests will be failed with -ENODEV. The caller is responsible for dropping 4362 * the reference from blk_mq_alloc_queue() by calling blk_put_queue(). 4363 * 4364 * Context: can sleep 4365 */ 4366 void blk_mq_destroy_queue(struct request_queue *q) 4367 { 4368 WARN_ON_ONCE(!queue_is_mq(q)); 4369 WARN_ON_ONCE(blk_queue_registered(q)); 4370 4371 might_sleep(); 4372 4373 blk_queue_flag_set(QUEUE_FLAG_DYING, q); 4374 blk_queue_start_drain(q); 4375 blk_mq_freeze_queue_wait(q); 4376 4377 blk_sync_queue(q); 4378 blk_mq_cancel_work_sync(q); 4379 blk_mq_exit_queue(q); 4380 } 4381 EXPORT_SYMBOL(blk_mq_destroy_queue); 4382 4383 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, 4384 struct queue_limits *lim, void *queuedata, 4385 struct lock_class_key *lkclass) 4386 { 4387 struct request_queue *q; 4388 struct gendisk *disk; 4389 4390 q = blk_mq_alloc_queue(set, lim, queuedata); 4391 if (IS_ERR(q)) 4392 return ERR_CAST(q); 4393 4394 disk = __alloc_disk_node(q, set->numa_node, lkclass); 4395 if (!disk) { 4396 blk_mq_destroy_queue(q); 4397 blk_put_queue(q); 4398 return ERR_PTR(-ENOMEM); 4399 } 4400 set_bit(GD_OWNS_QUEUE, &disk->state); 4401 return disk; 4402 } 4403 EXPORT_SYMBOL(__blk_mq_alloc_disk); 4404 4405 struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q, 4406 struct lock_class_key *lkclass) 4407 { 4408 struct gendisk *disk; 4409 4410 if (!blk_get_queue(q)) 4411 return NULL; 4412 disk = __alloc_disk_node(q, NUMA_NO_NODE, lkclass); 4413 if (!disk) 4414 blk_put_queue(q); 4415 return disk; 4416 } 4417 EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue); 4418 4419 /* 4420 * Only hctx removed from cpuhp list can be reused 4421 */ 4422 static bool blk_mq_hctx_is_reusable(struct blk_mq_hw_ctx *hctx) 4423 { 4424 return hlist_unhashed(&hctx->cpuhp_online) && 4425 hlist_unhashed(&hctx->cpuhp_dead); 4426 } 4427 4428 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx( 4429 struct blk_mq_tag_set *set, struct request_queue *q, 4430 int hctx_idx, int node) 4431 { 4432 struct blk_mq_hw_ctx *hctx = NULL, *tmp; 4433 4434 /* reuse dead hctx first */ 4435 spin_lock(&q->unused_hctx_lock); 4436 list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) { 4437 if (tmp->numa_node == node && blk_mq_hctx_is_reusable(tmp)) { 4438 hctx = tmp; 4439 break; 4440 } 4441 } 4442 if (hctx) 4443 list_del_init(&hctx->hctx_list); 4444 spin_unlock(&q->unused_hctx_lock); 4445 4446 if (!hctx) 4447 hctx = blk_mq_alloc_hctx(q, set, node); 4448 if (!hctx) 4449 goto fail; 4450 4451 if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) 4452 goto free_hctx; 4453 4454 return hctx; 4455 4456 free_hctx: 4457 kobject_put(&hctx->kobj); 4458 fail: 4459 return NULL; 4460 } 4461 4462 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, 4463 struct request_queue *q) 4464 { 4465 struct blk_mq_hw_ctx *hctx; 4466 unsigned long i, j; 4467 4468 /* protect against switching io scheduler */ 4469 mutex_lock(&q->sysfs_lock); 4470 for (i = 0; i < set->nr_hw_queues; i++) { 4471 int old_node; 4472 int node = blk_mq_get_hctx_node(set, i); 4473 struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i); 4474 4475 if (old_hctx) { 4476 old_node = old_hctx->numa_node; 4477 blk_mq_exit_hctx(q, set, old_hctx, i); 4478 } 4479 4480 if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) { 4481 if (!old_hctx) 4482 break; 4483 pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n", 4484 node, old_node); 4485 hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node); 4486 WARN_ON_ONCE(!hctx); 4487 } 4488 } 4489 /* 4490 * Increasing nr_hw_queues fails. Free the newly allocated 4491 * hctxs and keep the previous q->nr_hw_queues. 4492 */ 4493 if (i != set->nr_hw_queues) { 4494 j = q->nr_hw_queues; 4495 } else { 4496 j = i; 4497 q->nr_hw_queues = set->nr_hw_queues; 4498 } 4499 4500 xa_for_each_start(&q->hctx_table, j, hctx, j) 4501 blk_mq_exit_hctx(q, set, hctx, j); 4502 mutex_unlock(&q->sysfs_lock); 4503 4504 /* unregister cpuhp callbacks for exited hctxs */ 4505 blk_mq_remove_hw_queues_cpuhp(q); 4506 4507 /* register cpuhp for new initialized hctxs */ 4508 blk_mq_add_hw_queues_cpuhp(q); 4509 } 4510 4511 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 4512 struct request_queue *q) 4513 { 4514 /* mark the queue as mq asap */ 4515 q->mq_ops = set->ops; 4516 4517 /* 4518 * ->tag_set has to be setup before initialize hctx, which cpuphp 4519 * handler needs it for checking queue mapping 4520 */ 4521 q->tag_set = set; 4522 4523 if (blk_mq_alloc_ctxs(q)) 4524 goto err_exit; 4525 4526 /* init q->mq_kobj and sw queues' kobjects */ 4527 blk_mq_sysfs_init(q); 4528 4529 INIT_LIST_HEAD(&q->unused_hctx_list); 4530 spin_lock_init(&q->unused_hctx_lock); 4531 4532 xa_init(&q->hctx_table); 4533 4534 blk_mq_realloc_hw_ctxs(set, q); 4535 if (!q->nr_hw_queues) 4536 goto err_hctxs; 4537 4538 INIT_WORK(&q->timeout_work, blk_mq_timeout_work); 4539 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); 4540 4541 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; 4542 4543 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); 4544 INIT_LIST_HEAD(&q->flush_list); 4545 INIT_LIST_HEAD(&q->requeue_list); 4546 spin_lock_init(&q->requeue_lock); 4547 4548 q->nr_requests = set->queue_depth; 4549 4550 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 4551 blk_mq_add_queue_tag_set(set, q); 4552 blk_mq_map_swqueue(q); 4553 return 0; 4554 4555 err_hctxs: 4556 blk_mq_release(q); 4557 err_exit: 4558 q->mq_ops = NULL; 4559 return -ENOMEM; 4560 } 4561 EXPORT_SYMBOL(blk_mq_init_allocated_queue); 4562 4563 /* tags can _not_ be used after returning from blk_mq_exit_queue */ 4564 void blk_mq_exit_queue(struct request_queue *q) 4565 { 4566 struct blk_mq_tag_set *set = q->tag_set; 4567 4568 /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */ 4569 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); 4570 /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */ 4571 blk_mq_del_queue_tag_set(q); 4572 } 4573 4574 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 4575 { 4576 int i; 4577 4578 if (blk_mq_is_shared_tags(set->flags)) { 4579 set->shared_tags = blk_mq_alloc_map_and_rqs(set, 4580 BLK_MQ_NO_HCTX_IDX, 4581 set->queue_depth); 4582 if (!set->shared_tags) 4583 return -ENOMEM; 4584 } 4585 4586 for (i = 0; i < set->nr_hw_queues; i++) { 4587 if (!__blk_mq_alloc_map_and_rqs(set, i)) 4588 goto out_unwind; 4589 cond_resched(); 4590 } 4591 4592 return 0; 4593 4594 out_unwind: 4595 while (--i >= 0) 4596 __blk_mq_free_map_and_rqs(set, i); 4597 4598 if (blk_mq_is_shared_tags(set->flags)) { 4599 blk_mq_free_map_and_rqs(set, set->shared_tags, 4600 BLK_MQ_NO_HCTX_IDX); 4601 } 4602 4603 return -ENOMEM; 4604 } 4605 4606 /* 4607 * Allocate the request maps associated with this tag_set. Note that this 4608 * may reduce the depth asked for, if memory is tight. set->queue_depth 4609 * will be updated to reflect the allocated depth. 4610 */ 4611 static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set) 4612 { 4613 unsigned int depth; 4614 int err; 4615 4616 depth = set->queue_depth; 4617 do { 4618 err = __blk_mq_alloc_rq_maps(set); 4619 if (!err) 4620 break; 4621 4622 set->queue_depth >>= 1; 4623 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { 4624 err = -ENOMEM; 4625 break; 4626 } 4627 } while (set->queue_depth); 4628 4629 if (!set->queue_depth || err) { 4630 pr_err("blk-mq: failed to allocate request map\n"); 4631 return -ENOMEM; 4632 } 4633 4634 if (depth != set->queue_depth) 4635 pr_info("blk-mq: reduced tag depth (%u -> %u)\n", 4636 depth, set->queue_depth); 4637 4638 return 0; 4639 } 4640 4641 static void blk_mq_update_queue_map(struct blk_mq_tag_set *set) 4642 { 4643 /* 4644 * blk_mq_map_queues() and multiple .map_queues() implementations 4645 * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the 4646 * number of hardware queues. 4647 */ 4648 if (set->nr_maps == 1) 4649 set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues; 4650 4651 if (set->ops->map_queues) { 4652 int i; 4653 4654 /* 4655 * transport .map_queues is usually done in the following 4656 * way: 4657 * 4658 * for (queue = 0; queue < set->nr_hw_queues; queue++) { 4659 * mask = get_cpu_mask(queue) 4660 * for_each_cpu(cpu, mask) 4661 * set->map[x].mq_map[cpu] = queue; 4662 * } 4663 * 4664 * When we need to remap, the table has to be cleared for 4665 * killing stale mapping since one CPU may not be mapped 4666 * to any hw queue. 4667 */ 4668 for (i = 0; i < set->nr_maps; i++) 4669 blk_mq_clear_mq_map(&set->map[i]); 4670 4671 set->ops->map_queues(set); 4672 } else { 4673 BUG_ON(set->nr_maps > 1); 4674 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); 4675 } 4676 } 4677 4678 static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set, 4679 int new_nr_hw_queues) 4680 { 4681 struct blk_mq_tags **new_tags; 4682 int i; 4683 4684 if (set->nr_hw_queues >= new_nr_hw_queues) 4685 goto done; 4686 4687 new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *), 4688 GFP_KERNEL, set->numa_node); 4689 if (!new_tags) 4690 return -ENOMEM; 4691 4692 if (set->tags) 4693 memcpy(new_tags, set->tags, set->nr_hw_queues * 4694 sizeof(*set->tags)); 4695 kfree(set->tags); 4696 set->tags = new_tags; 4697 4698 for (i = set->nr_hw_queues; i < new_nr_hw_queues; i++) { 4699 if (!__blk_mq_alloc_map_and_rqs(set, i)) { 4700 while (--i >= set->nr_hw_queues) 4701 __blk_mq_free_map_and_rqs(set, i); 4702 return -ENOMEM; 4703 } 4704 cond_resched(); 4705 } 4706 4707 done: 4708 set->nr_hw_queues = new_nr_hw_queues; 4709 return 0; 4710 } 4711 4712 /* 4713 * Alloc a tag set to be associated with one or more request queues. 4714 * May fail with EINVAL for various error conditions. May adjust the 4715 * requested depth down, if it's too large. In that case, the set 4716 * value will be stored in set->queue_depth. 4717 */ 4718 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) 4719 { 4720 int i, ret; 4721 4722 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS); 4723 4724 if (!set->nr_hw_queues) 4725 return -EINVAL; 4726 if (!set->queue_depth) 4727 return -EINVAL; 4728 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) 4729 return -EINVAL; 4730 4731 if (!set->ops->queue_rq) 4732 return -EINVAL; 4733 4734 if (!set->ops->get_budget ^ !set->ops->put_budget) 4735 return -EINVAL; 4736 4737 if (set->queue_depth > BLK_MQ_MAX_DEPTH) { 4738 pr_info("blk-mq: reduced tag depth to %u\n", 4739 BLK_MQ_MAX_DEPTH); 4740 set->queue_depth = BLK_MQ_MAX_DEPTH; 4741 } 4742 4743 if (!set->nr_maps) 4744 set->nr_maps = 1; 4745 else if (set->nr_maps > HCTX_MAX_TYPES) 4746 return -EINVAL; 4747 4748 /* 4749 * If a crashdump is active, then we are potentially in a very 4750 * memory constrained environment. Limit us to 64 tags to prevent 4751 * using too much memory. 4752 */ 4753 if (is_kdump_kernel()) 4754 set->queue_depth = min(64U, set->queue_depth); 4755 4756 /* 4757 * There is no use for more h/w queues than cpus if we just have 4758 * a single map 4759 */ 4760 if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids) 4761 set->nr_hw_queues = nr_cpu_ids; 4762 4763 if (set->flags & BLK_MQ_F_BLOCKING) { 4764 set->srcu = kmalloc(sizeof(*set->srcu), GFP_KERNEL); 4765 if (!set->srcu) 4766 return -ENOMEM; 4767 ret = init_srcu_struct(set->srcu); 4768 if (ret) 4769 goto out_free_srcu; 4770 } 4771 4772 ret = -ENOMEM; 4773 set->tags = kcalloc_node(set->nr_hw_queues, 4774 sizeof(struct blk_mq_tags *), GFP_KERNEL, 4775 set->numa_node); 4776 if (!set->tags) 4777 goto out_cleanup_srcu; 4778 4779 for (i = 0; i < set->nr_maps; i++) { 4780 set->map[i].mq_map = kcalloc_node(nr_cpu_ids, 4781 sizeof(set->map[i].mq_map[0]), 4782 GFP_KERNEL, set->numa_node); 4783 if (!set->map[i].mq_map) 4784 goto out_free_mq_map; 4785 set->map[i].nr_queues = set->nr_hw_queues; 4786 } 4787 4788 blk_mq_update_queue_map(set); 4789 4790 ret = blk_mq_alloc_set_map_and_rqs(set); 4791 if (ret) 4792 goto out_free_mq_map; 4793 4794 mutex_init(&set->tag_list_lock); 4795 INIT_LIST_HEAD(&set->tag_list); 4796 4797 return 0; 4798 4799 out_free_mq_map: 4800 for (i = 0; i < set->nr_maps; i++) { 4801 kfree(set->map[i].mq_map); 4802 set->map[i].mq_map = NULL; 4803 } 4804 kfree(set->tags); 4805 set->tags = NULL; 4806 out_cleanup_srcu: 4807 if (set->flags & BLK_MQ_F_BLOCKING) 4808 cleanup_srcu_struct(set->srcu); 4809 out_free_srcu: 4810 if (set->flags & BLK_MQ_F_BLOCKING) 4811 kfree(set->srcu); 4812 return ret; 4813 } 4814 EXPORT_SYMBOL(blk_mq_alloc_tag_set); 4815 4816 /* allocate and initialize a tagset for a simple single-queue device */ 4817 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set, 4818 const struct blk_mq_ops *ops, unsigned int queue_depth, 4819 unsigned int set_flags) 4820 { 4821 memset(set, 0, sizeof(*set)); 4822 set->ops = ops; 4823 set->nr_hw_queues = 1; 4824 set->nr_maps = 1; 4825 set->queue_depth = queue_depth; 4826 set->numa_node = NUMA_NO_NODE; 4827 set->flags = set_flags; 4828 return blk_mq_alloc_tag_set(set); 4829 } 4830 EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set); 4831 4832 void blk_mq_free_tag_set(struct blk_mq_tag_set *set) 4833 { 4834 int i, j; 4835 4836 for (i = 0; i < set->nr_hw_queues; i++) 4837 __blk_mq_free_map_and_rqs(set, i); 4838 4839 if (blk_mq_is_shared_tags(set->flags)) { 4840 blk_mq_free_map_and_rqs(set, set->shared_tags, 4841 BLK_MQ_NO_HCTX_IDX); 4842 } 4843 4844 for (j = 0; j < set->nr_maps; j++) { 4845 kfree(set->map[j].mq_map); 4846 set->map[j].mq_map = NULL; 4847 } 4848 4849 kfree(set->tags); 4850 set->tags = NULL; 4851 if (set->flags & BLK_MQ_F_BLOCKING) { 4852 cleanup_srcu_struct(set->srcu); 4853 kfree(set->srcu); 4854 } 4855 } 4856 EXPORT_SYMBOL(blk_mq_free_tag_set); 4857 4858 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) 4859 { 4860 struct blk_mq_tag_set *set = q->tag_set; 4861 struct blk_mq_hw_ctx *hctx; 4862 int ret; 4863 unsigned long i; 4864 4865 if (WARN_ON_ONCE(!q->mq_freeze_depth)) 4866 return -EINVAL; 4867 4868 if (!set) 4869 return -EINVAL; 4870 4871 if (q->nr_requests == nr) 4872 return 0; 4873 4874 blk_mq_quiesce_queue(q); 4875 4876 ret = 0; 4877 queue_for_each_hw_ctx(q, hctx, i) { 4878 if (!hctx->tags) 4879 continue; 4880 /* 4881 * If we're using an MQ scheduler, just update the scheduler 4882 * queue depth. This is similar to what the old code would do. 4883 */ 4884 if (hctx->sched_tags) { 4885 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, 4886 nr, true); 4887 } else { 4888 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, 4889 false); 4890 } 4891 if (ret) 4892 break; 4893 if (q->elevator && q->elevator->type->ops.depth_updated) 4894 q->elevator->type->ops.depth_updated(hctx); 4895 } 4896 if (!ret) { 4897 q->nr_requests = nr; 4898 if (blk_mq_is_shared_tags(set->flags)) { 4899 if (q->elevator) 4900 blk_mq_tag_update_sched_shared_tags(q); 4901 else 4902 blk_mq_tag_resize_shared_tags(set, nr); 4903 } 4904 } 4905 4906 blk_mq_unquiesce_queue(q); 4907 4908 return ret; 4909 } 4910 4911 /* 4912 * request_queue and elevator_type pair. 4913 * It is just used by __blk_mq_update_nr_hw_queues to cache 4914 * the elevator_type associated with a request_queue. 4915 */ 4916 struct blk_mq_qe_pair { 4917 struct list_head node; 4918 struct request_queue *q; 4919 struct elevator_type *type; 4920 }; 4921 4922 /* 4923 * Cache the elevator_type in qe pair list and switch the 4924 * io scheduler to 'none' 4925 */ 4926 static bool blk_mq_elv_switch_none(struct list_head *head, 4927 struct request_queue *q) 4928 { 4929 struct blk_mq_qe_pair *qe; 4930 4931 qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY); 4932 if (!qe) 4933 return false; 4934 4935 /* q->elevator needs protection from ->sysfs_lock */ 4936 mutex_lock(&q->sysfs_lock); 4937 4938 /* the check has to be done with holding sysfs_lock */ 4939 if (!q->elevator) { 4940 kfree(qe); 4941 goto unlock; 4942 } 4943 4944 INIT_LIST_HEAD(&qe->node); 4945 qe->q = q; 4946 qe->type = q->elevator->type; 4947 /* keep a reference to the elevator module as we'll switch back */ 4948 __elevator_get(qe->type); 4949 list_add(&qe->node, head); 4950 elevator_disable(q); 4951 unlock: 4952 mutex_unlock(&q->sysfs_lock); 4953 4954 return true; 4955 } 4956 4957 static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head, 4958 struct request_queue *q) 4959 { 4960 struct blk_mq_qe_pair *qe; 4961 4962 list_for_each_entry(qe, head, node) 4963 if (qe->q == q) 4964 return qe; 4965 4966 return NULL; 4967 } 4968 4969 static void blk_mq_elv_switch_back(struct list_head *head, 4970 struct request_queue *q) 4971 { 4972 struct blk_mq_qe_pair *qe; 4973 struct elevator_type *t; 4974 4975 qe = blk_lookup_qe_pair(head, q); 4976 if (!qe) 4977 return; 4978 t = qe->type; 4979 list_del(&qe->node); 4980 kfree(qe); 4981 4982 mutex_lock(&q->sysfs_lock); 4983 elevator_switch(q, t); 4984 /* drop the reference acquired in blk_mq_elv_switch_none */ 4985 elevator_put(t); 4986 mutex_unlock(&q->sysfs_lock); 4987 } 4988 4989 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, 4990 int nr_hw_queues) 4991 { 4992 struct request_queue *q; 4993 LIST_HEAD(head); 4994 int prev_nr_hw_queues = set->nr_hw_queues; 4995 int i; 4996 4997 lockdep_assert_held(&set->tag_list_lock); 4998 4999 if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids) 5000 nr_hw_queues = nr_cpu_ids; 5001 if (nr_hw_queues < 1) 5002 return; 5003 if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues) 5004 return; 5005 5006 list_for_each_entry(q, &set->tag_list, tag_set_list) 5007 blk_mq_freeze_queue(q); 5008 /* 5009 * Switch IO scheduler to 'none', cleaning up the data associated 5010 * with the previous scheduler. We will switch back once we are done 5011 * updating the new sw to hw queue mappings. 5012 */ 5013 list_for_each_entry(q, &set->tag_list, tag_set_list) 5014 if (!blk_mq_elv_switch_none(&head, q)) 5015 goto switch_back; 5016 5017 list_for_each_entry(q, &set->tag_list, tag_set_list) { 5018 blk_mq_debugfs_unregister_hctxs(q); 5019 blk_mq_sysfs_unregister_hctxs(q); 5020 } 5021 5022 if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0) 5023 goto reregister; 5024 5025 fallback: 5026 blk_mq_update_queue_map(set); 5027 list_for_each_entry(q, &set->tag_list, tag_set_list) { 5028 blk_mq_realloc_hw_ctxs(set, q); 5029 5030 if (q->nr_hw_queues != set->nr_hw_queues) { 5031 int i = prev_nr_hw_queues; 5032 5033 pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n", 5034 nr_hw_queues, prev_nr_hw_queues); 5035 for (; i < set->nr_hw_queues; i++) 5036 __blk_mq_free_map_and_rqs(set, i); 5037 5038 set->nr_hw_queues = prev_nr_hw_queues; 5039 goto fallback; 5040 } 5041 blk_mq_map_swqueue(q); 5042 } 5043 5044 reregister: 5045 list_for_each_entry(q, &set->tag_list, tag_set_list) { 5046 blk_mq_sysfs_register_hctxs(q); 5047 blk_mq_debugfs_register_hctxs(q); 5048 } 5049 5050 switch_back: 5051 list_for_each_entry(q, &set->tag_list, tag_set_list) 5052 blk_mq_elv_switch_back(&head, q); 5053 5054 list_for_each_entry(q, &set->tag_list, tag_set_list) 5055 blk_mq_unfreeze_queue(q); 5056 5057 /* Free the excess tags when nr_hw_queues shrink. */ 5058 for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++) 5059 __blk_mq_free_map_and_rqs(set, i); 5060 } 5061 5062 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) 5063 { 5064 mutex_lock(&set->tag_list_lock); 5065 __blk_mq_update_nr_hw_queues(set, nr_hw_queues); 5066 mutex_unlock(&set->tag_list_lock); 5067 } 5068 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); 5069 5070 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx, 5071 struct io_comp_batch *iob, unsigned int flags) 5072 { 5073 long state = get_current_state(); 5074 int ret; 5075 5076 do { 5077 ret = q->mq_ops->poll(hctx, iob); 5078 if (ret > 0) { 5079 __set_current_state(TASK_RUNNING); 5080 return ret; 5081 } 5082 5083 if (signal_pending_state(state, current)) 5084 __set_current_state(TASK_RUNNING); 5085 if (task_is_running(current)) 5086 return 1; 5087 5088 if (ret < 0 || (flags & BLK_POLL_ONESHOT)) 5089 break; 5090 cpu_relax(); 5091 } while (!need_resched()); 5092 5093 __set_current_state(TASK_RUNNING); 5094 return 0; 5095 } 5096 5097 int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, 5098 struct io_comp_batch *iob, unsigned int flags) 5099 { 5100 if (!blk_mq_can_poll(q)) 5101 return 0; 5102 return blk_hctx_poll(q, xa_load(&q->hctx_table, cookie), iob, flags); 5103 } 5104 5105 int blk_rq_poll(struct request *rq, struct io_comp_batch *iob, 5106 unsigned int poll_flags) 5107 { 5108 struct request_queue *q = rq->q; 5109 int ret; 5110 5111 if (!blk_rq_is_poll(rq)) 5112 return 0; 5113 if (!percpu_ref_tryget(&q->q_usage_counter)) 5114 return 0; 5115 5116 ret = blk_hctx_poll(q, rq->mq_hctx, iob, poll_flags); 5117 blk_queue_exit(q); 5118 5119 return ret; 5120 } 5121 EXPORT_SYMBOL_GPL(blk_rq_poll); 5122 5123 unsigned int blk_mq_rq_cpu(struct request *rq) 5124 { 5125 return rq->mq_ctx->cpu; 5126 } 5127 EXPORT_SYMBOL(blk_mq_rq_cpu); 5128 5129 void blk_mq_cancel_work_sync(struct request_queue *q) 5130 { 5131 struct blk_mq_hw_ctx *hctx; 5132 unsigned long i; 5133 5134 cancel_delayed_work_sync(&q->requeue_work); 5135 5136 queue_for_each_hw_ctx(q, hctx, i) 5137 cancel_delayed_work_sync(&hctx->run_work); 5138 } 5139 5140 static int __init blk_mq_init(void) 5141 { 5142 int i; 5143 5144 for_each_possible_cpu(i) 5145 init_llist_head(&per_cpu(blk_cpu_done, i)); 5146 for_each_possible_cpu(i) 5147 INIT_CSD(&per_cpu(blk_cpu_csd, i), 5148 __blk_mq_complete_request_remote, NULL); 5149 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); 5150 5151 cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, 5152 "block/softirq:dead", NULL, 5153 blk_softirq_cpu_dead); 5154 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, 5155 blk_mq_hctx_notify_dead); 5156 cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online", 5157 blk_mq_hctx_notify_online, 5158 blk_mq_hctx_notify_offline); 5159 return 0; 5160 } 5161 subsys_initcall(blk_mq_init); 5162