1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Block multiqueue core code 4 * 5 * Copyright (C) 2013-2014 Jens Axboe 6 * Copyright (C) 2013-2014 Christoph Hellwig 7 */ 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/backing-dev.h> 11 #include <linux/bio.h> 12 #include <linux/blkdev.h> 13 #include <linux/blk-integrity.h> 14 #include <linux/kmemleak.h> 15 #include <linux/mm.h> 16 #include <linux/init.h> 17 #include <linux/slab.h> 18 #include <linux/workqueue.h> 19 #include <linux/smp.h> 20 #include <linux/interrupt.h> 21 #include <linux/llist.h> 22 #include <linux/cpu.h> 23 #include <linux/cache.h> 24 #include <linux/sched/sysctl.h> 25 #include <linux/sched/topology.h> 26 #include <linux/sched/signal.h> 27 #include <linux/delay.h> 28 #include <linux/crash_dump.h> 29 #include <linux/prefetch.h> 30 #include <linux/blk-crypto.h> 31 #include <linux/part_stat.h> 32 33 #include <trace/events/block.h> 34 35 #include <linux/t10-pi.h> 36 #include "blk.h" 37 #include "blk-mq.h" 38 #include "blk-mq-debugfs.h" 39 #include "blk-pm.h" 40 #include "blk-stat.h" 41 #include "blk-mq-sched.h" 42 #include "blk-rq-qos.h" 43 #include "blk-ioprio.h" 44 45 static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); 46 47 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags); 48 static void blk_mq_request_bypass_insert(struct request *rq, 49 blk_insert_t flags); 50 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 51 struct list_head *list); 52 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx, 53 struct io_comp_batch *iob, unsigned int flags); 54 55 /* 56 * Check if any of the ctx, dispatch list or elevator 57 * have pending work in this hardware queue. 58 */ 59 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) 60 { 61 return !list_empty_careful(&hctx->dispatch) || 62 sbitmap_any_bit_set(&hctx->ctx_map) || 63 blk_mq_sched_has_work(hctx); 64 } 65 66 /* 67 * Mark this ctx as having pending work in this hardware queue 68 */ 69 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, 70 struct blk_mq_ctx *ctx) 71 { 72 const int bit = ctx->index_hw[hctx->type]; 73 74 if (!sbitmap_test_bit(&hctx->ctx_map, bit)) 75 sbitmap_set_bit(&hctx->ctx_map, bit); 76 } 77 78 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, 79 struct blk_mq_ctx *ctx) 80 { 81 const int bit = ctx->index_hw[hctx->type]; 82 83 sbitmap_clear_bit(&hctx->ctx_map, bit); 84 } 85 86 struct mq_inflight { 87 struct block_device *part; 88 unsigned int inflight[2]; 89 }; 90 91 static bool blk_mq_check_inflight(struct request *rq, void *priv) 92 { 93 struct mq_inflight *mi = priv; 94 95 if (rq->part && blk_do_io_stat(rq) && 96 (!mi->part->bd_partno || rq->part == mi->part) && 97 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) 98 mi->inflight[rq_data_dir(rq)]++; 99 100 return true; 101 } 102 103 unsigned int blk_mq_in_flight(struct request_queue *q, 104 struct block_device *part) 105 { 106 struct mq_inflight mi = { .part = part }; 107 108 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 109 110 return mi.inflight[0] + mi.inflight[1]; 111 } 112 113 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, 114 unsigned int inflight[2]) 115 { 116 struct mq_inflight mi = { .part = part }; 117 118 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 119 inflight[0] = mi.inflight[0]; 120 inflight[1] = mi.inflight[1]; 121 } 122 123 void blk_freeze_queue_start(struct request_queue *q) 124 { 125 mutex_lock(&q->mq_freeze_lock); 126 if (++q->mq_freeze_depth == 1) { 127 percpu_ref_kill(&q->q_usage_counter); 128 mutex_unlock(&q->mq_freeze_lock); 129 if (queue_is_mq(q)) 130 blk_mq_run_hw_queues(q, false); 131 } else { 132 mutex_unlock(&q->mq_freeze_lock); 133 } 134 } 135 EXPORT_SYMBOL_GPL(blk_freeze_queue_start); 136 137 void blk_mq_freeze_queue_wait(struct request_queue *q) 138 { 139 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); 140 } 141 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait); 142 143 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, 144 unsigned long timeout) 145 { 146 return wait_event_timeout(q->mq_freeze_wq, 147 percpu_ref_is_zero(&q->q_usage_counter), 148 timeout); 149 } 150 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout); 151 152 /* 153 * Guarantee no request is in use, so we can change any data structure of 154 * the queue afterward. 155 */ 156 void blk_freeze_queue(struct request_queue *q) 157 { 158 /* 159 * In the !blk_mq case we are only calling this to kill the 160 * q_usage_counter, otherwise this increases the freeze depth 161 * and waits for it to return to zero. For this reason there is 162 * no blk_unfreeze_queue(), and blk_freeze_queue() is not 163 * exported to drivers as the only user for unfreeze is blk_mq. 164 */ 165 blk_freeze_queue_start(q); 166 blk_mq_freeze_queue_wait(q); 167 } 168 169 void blk_mq_freeze_queue(struct request_queue *q) 170 { 171 /* 172 * ...just an alias to keep freeze and unfreeze actions balanced 173 * in the blk_mq_* namespace 174 */ 175 blk_freeze_queue(q); 176 } 177 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); 178 179 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic) 180 { 181 mutex_lock(&q->mq_freeze_lock); 182 if (force_atomic) 183 q->q_usage_counter.data->force_atomic = true; 184 q->mq_freeze_depth--; 185 WARN_ON_ONCE(q->mq_freeze_depth < 0); 186 if (!q->mq_freeze_depth) { 187 percpu_ref_resurrect(&q->q_usage_counter); 188 wake_up_all(&q->mq_freeze_wq); 189 } 190 mutex_unlock(&q->mq_freeze_lock); 191 } 192 193 void blk_mq_unfreeze_queue(struct request_queue *q) 194 { 195 __blk_mq_unfreeze_queue(q, false); 196 } 197 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); 198 199 /* 200 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the 201 * mpt3sas driver such that this function can be removed. 202 */ 203 void blk_mq_quiesce_queue_nowait(struct request_queue *q) 204 { 205 unsigned long flags; 206 207 spin_lock_irqsave(&q->queue_lock, flags); 208 if (!q->quiesce_depth++) 209 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); 210 spin_unlock_irqrestore(&q->queue_lock, flags); 211 } 212 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); 213 214 /** 215 * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done 216 * @set: tag_set to wait on 217 * 218 * Note: it is driver's responsibility for making sure that quiesce has 219 * been started on or more of the request_queues of the tag_set. This 220 * function only waits for the quiesce on those request_queues that had 221 * the quiesce flag set using blk_mq_quiesce_queue_nowait. 222 */ 223 void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set) 224 { 225 if (set->flags & BLK_MQ_F_BLOCKING) 226 synchronize_srcu(set->srcu); 227 else 228 synchronize_rcu(); 229 } 230 EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done); 231 232 /** 233 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished 234 * @q: request queue. 235 * 236 * Note: this function does not prevent that the struct request end_io() 237 * callback function is invoked. Once this function is returned, we make 238 * sure no dispatch can happen until the queue is unquiesced via 239 * blk_mq_unquiesce_queue(). 240 */ 241 void blk_mq_quiesce_queue(struct request_queue *q) 242 { 243 blk_mq_quiesce_queue_nowait(q); 244 /* nothing to wait for non-mq queues */ 245 if (queue_is_mq(q)) 246 blk_mq_wait_quiesce_done(q->tag_set); 247 } 248 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); 249 250 /* 251 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue() 252 * @q: request queue. 253 * 254 * This function recovers queue into the state before quiescing 255 * which is done by blk_mq_quiesce_queue. 256 */ 257 void blk_mq_unquiesce_queue(struct request_queue *q) 258 { 259 unsigned long flags; 260 bool run_queue = false; 261 262 spin_lock_irqsave(&q->queue_lock, flags); 263 if (WARN_ON_ONCE(q->quiesce_depth <= 0)) { 264 ; 265 } else if (!--q->quiesce_depth) { 266 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); 267 run_queue = true; 268 } 269 spin_unlock_irqrestore(&q->queue_lock, flags); 270 271 /* dispatch requests which are inserted during quiescing */ 272 if (run_queue) 273 blk_mq_run_hw_queues(q, true); 274 } 275 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue); 276 277 void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set) 278 { 279 struct request_queue *q; 280 281 mutex_lock(&set->tag_list_lock); 282 list_for_each_entry(q, &set->tag_list, tag_set_list) { 283 if (!blk_queue_skip_tagset_quiesce(q)) 284 blk_mq_quiesce_queue_nowait(q); 285 } 286 blk_mq_wait_quiesce_done(set); 287 mutex_unlock(&set->tag_list_lock); 288 } 289 EXPORT_SYMBOL_GPL(blk_mq_quiesce_tagset); 290 291 void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set) 292 { 293 struct request_queue *q; 294 295 mutex_lock(&set->tag_list_lock); 296 list_for_each_entry(q, &set->tag_list, tag_set_list) { 297 if (!blk_queue_skip_tagset_quiesce(q)) 298 blk_mq_unquiesce_queue(q); 299 } 300 mutex_unlock(&set->tag_list_lock); 301 } 302 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_tagset); 303 304 void blk_mq_wake_waiters(struct request_queue *q) 305 { 306 struct blk_mq_hw_ctx *hctx; 307 unsigned long i; 308 309 queue_for_each_hw_ctx(q, hctx, i) 310 if (blk_mq_hw_queue_mapped(hctx)) 311 blk_mq_tag_wakeup_all(hctx->tags, true); 312 } 313 314 void blk_rq_init(struct request_queue *q, struct request *rq) 315 { 316 memset(rq, 0, sizeof(*rq)); 317 318 INIT_LIST_HEAD(&rq->queuelist); 319 rq->q = q; 320 rq->__sector = (sector_t) -1; 321 INIT_HLIST_NODE(&rq->hash); 322 RB_CLEAR_NODE(&rq->rb_node); 323 rq->tag = BLK_MQ_NO_TAG; 324 rq->internal_tag = BLK_MQ_NO_TAG; 325 rq->start_time_ns = ktime_get_ns(); 326 rq->part = NULL; 327 blk_crypto_rq_set_defaults(rq); 328 } 329 EXPORT_SYMBOL(blk_rq_init); 330 331 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, 332 struct blk_mq_tags *tags, unsigned int tag, u64 alloc_time_ns) 333 { 334 struct blk_mq_ctx *ctx = data->ctx; 335 struct blk_mq_hw_ctx *hctx = data->hctx; 336 struct request_queue *q = data->q; 337 struct request *rq = tags->static_rqs[tag]; 338 339 rq->q = q; 340 rq->mq_ctx = ctx; 341 rq->mq_hctx = hctx; 342 rq->cmd_flags = data->cmd_flags; 343 344 if (data->flags & BLK_MQ_REQ_PM) 345 data->rq_flags |= RQF_PM; 346 if (blk_queue_io_stat(q)) 347 data->rq_flags |= RQF_IO_STAT; 348 rq->rq_flags = data->rq_flags; 349 350 if (data->rq_flags & RQF_SCHED_TAGS) { 351 rq->tag = BLK_MQ_NO_TAG; 352 rq->internal_tag = tag; 353 } else { 354 rq->tag = tag; 355 rq->internal_tag = BLK_MQ_NO_TAG; 356 } 357 rq->timeout = 0; 358 359 if (blk_mq_need_time_stamp(rq)) 360 rq->start_time_ns = ktime_get_ns(); 361 else 362 rq->start_time_ns = 0; 363 rq->part = NULL; 364 #ifdef CONFIG_BLK_RQ_ALLOC_TIME 365 rq->alloc_time_ns = alloc_time_ns; 366 #endif 367 rq->io_start_time_ns = 0; 368 rq->stats_sectors = 0; 369 rq->nr_phys_segments = 0; 370 #if defined(CONFIG_BLK_DEV_INTEGRITY) 371 rq->nr_integrity_segments = 0; 372 #endif 373 rq->end_io = NULL; 374 rq->end_io_data = NULL; 375 376 blk_crypto_rq_set_defaults(rq); 377 INIT_LIST_HEAD(&rq->queuelist); 378 /* tag was already set */ 379 WRITE_ONCE(rq->deadline, 0); 380 req_ref_set(rq, 1); 381 382 if (rq->rq_flags & RQF_USE_SCHED) { 383 struct elevator_queue *e = data->q->elevator; 384 385 INIT_HLIST_NODE(&rq->hash); 386 RB_CLEAR_NODE(&rq->rb_node); 387 388 if (e->type->ops.prepare_request) 389 e->type->ops.prepare_request(rq); 390 } 391 392 return rq; 393 } 394 395 static inline struct request * 396 __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data, 397 u64 alloc_time_ns) 398 { 399 unsigned int tag, tag_offset; 400 struct blk_mq_tags *tags; 401 struct request *rq; 402 unsigned long tag_mask; 403 int i, nr = 0; 404 405 tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset); 406 if (unlikely(!tag_mask)) 407 return NULL; 408 409 tags = blk_mq_tags_from_data(data); 410 for (i = 0; tag_mask; i++) { 411 if (!(tag_mask & (1UL << i))) 412 continue; 413 tag = tag_offset + i; 414 prefetch(tags->static_rqs[tag]); 415 tag_mask &= ~(1UL << i); 416 rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns); 417 rq_list_add(data->cached_rq, rq); 418 nr++; 419 } 420 /* caller already holds a reference, add for remainder */ 421 percpu_ref_get_many(&data->q->q_usage_counter, nr - 1); 422 data->nr_tags -= nr; 423 424 return rq_list_pop(data->cached_rq); 425 } 426 427 static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) 428 { 429 struct request_queue *q = data->q; 430 u64 alloc_time_ns = 0; 431 struct request *rq; 432 unsigned int tag; 433 434 /* alloc_time includes depth and tag waits */ 435 if (blk_queue_rq_alloc_time(q)) 436 alloc_time_ns = ktime_get_ns(); 437 438 if (data->cmd_flags & REQ_NOWAIT) 439 data->flags |= BLK_MQ_REQ_NOWAIT; 440 441 if (q->elevator) { 442 /* 443 * All requests use scheduler tags when an I/O scheduler is 444 * enabled for the queue. 445 */ 446 data->rq_flags |= RQF_SCHED_TAGS; 447 448 /* 449 * Flush/passthrough requests are special and go directly to the 450 * dispatch list. 451 */ 452 if ((data->cmd_flags & REQ_OP_MASK) != REQ_OP_FLUSH && 453 !blk_op_is_passthrough(data->cmd_flags)) { 454 struct elevator_mq_ops *ops = &q->elevator->type->ops; 455 456 WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED); 457 458 data->rq_flags |= RQF_USE_SCHED; 459 if (ops->limit_depth) 460 ops->limit_depth(data->cmd_flags, data); 461 } 462 } 463 464 retry: 465 data->ctx = blk_mq_get_ctx(q); 466 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); 467 if (!(data->rq_flags & RQF_SCHED_TAGS)) 468 blk_mq_tag_busy(data->hctx); 469 470 if (data->flags & BLK_MQ_REQ_RESERVED) 471 data->rq_flags |= RQF_RESV; 472 473 /* 474 * Try batched alloc if we want more than 1 tag. 475 */ 476 if (data->nr_tags > 1) { 477 rq = __blk_mq_alloc_requests_batch(data, alloc_time_ns); 478 if (rq) 479 return rq; 480 data->nr_tags = 1; 481 } 482 483 /* 484 * Waiting allocations only fail because of an inactive hctx. In that 485 * case just retry the hctx assignment and tag allocation as CPU hotplug 486 * should have migrated us to an online CPU by now. 487 */ 488 tag = blk_mq_get_tag(data); 489 if (tag == BLK_MQ_NO_TAG) { 490 if (data->flags & BLK_MQ_REQ_NOWAIT) 491 return NULL; 492 /* 493 * Give up the CPU and sleep for a random short time to 494 * ensure that thread using a realtime scheduling class 495 * are migrated off the CPU, and thus off the hctx that 496 * is going away. 497 */ 498 msleep(3); 499 goto retry; 500 } 501 502 return blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag, 503 alloc_time_ns); 504 } 505 506 static struct request *blk_mq_rq_cache_fill(struct request_queue *q, 507 struct blk_plug *plug, 508 blk_opf_t opf, 509 blk_mq_req_flags_t flags) 510 { 511 struct blk_mq_alloc_data data = { 512 .q = q, 513 .flags = flags, 514 .cmd_flags = opf, 515 .nr_tags = plug->nr_ios, 516 .cached_rq = &plug->cached_rq, 517 }; 518 struct request *rq; 519 520 if (blk_queue_enter(q, flags)) 521 return NULL; 522 523 plug->nr_ios = 1; 524 525 rq = __blk_mq_alloc_requests(&data); 526 if (unlikely(!rq)) 527 blk_queue_exit(q); 528 return rq; 529 } 530 531 static struct request *blk_mq_alloc_cached_request(struct request_queue *q, 532 blk_opf_t opf, 533 blk_mq_req_flags_t flags) 534 { 535 struct blk_plug *plug = current->plug; 536 struct request *rq; 537 538 if (!plug) 539 return NULL; 540 541 if (rq_list_empty(plug->cached_rq)) { 542 if (plug->nr_ios == 1) 543 return NULL; 544 rq = blk_mq_rq_cache_fill(q, plug, opf, flags); 545 if (!rq) 546 return NULL; 547 } else { 548 rq = rq_list_peek(&plug->cached_rq); 549 if (!rq || rq->q != q) 550 return NULL; 551 552 if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type) 553 return NULL; 554 if (op_is_flush(rq->cmd_flags) != op_is_flush(opf)) 555 return NULL; 556 557 plug->cached_rq = rq_list_next(rq); 558 } 559 560 rq->cmd_flags = opf; 561 INIT_LIST_HEAD(&rq->queuelist); 562 return rq; 563 } 564 565 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf, 566 blk_mq_req_flags_t flags) 567 { 568 struct request *rq; 569 570 rq = blk_mq_alloc_cached_request(q, opf, flags); 571 if (!rq) { 572 struct blk_mq_alloc_data data = { 573 .q = q, 574 .flags = flags, 575 .cmd_flags = opf, 576 .nr_tags = 1, 577 }; 578 int ret; 579 580 ret = blk_queue_enter(q, flags); 581 if (ret) 582 return ERR_PTR(ret); 583 584 rq = __blk_mq_alloc_requests(&data); 585 if (!rq) 586 goto out_queue_exit; 587 } 588 rq->__data_len = 0; 589 rq->__sector = (sector_t) -1; 590 rq->bio = rq->biotail = NULL; 591 return rq; 592 out_queue_exit: 593 blk_queue_exit(q); 594 return ERR_PTR(-EWOULDBLOCK); 595 } 596 EXPORT_SYMBOL(blk_mq_alloc_request); 597 598 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, 599 blk_opf_t opf, blk_mq_req_flags_t flags, unsigned int hctx_idx) 600 { 601 struct blk_mq_alloc_data data = { 602 .q = q, 603 .flags = flags, 604 .cmd_flags = opf, 605 .nr_tags = 1, 606 }; 607 u64 alloc_time_ns = 0; 608 struct request *rq; 609 unsigned int cpu; 610 unsigned int tag; 611 int ret; 612 613 /* alloc_time includes depth and tag waits */ 614 if (blk_queue_rq_alloc_time(q)) 615 alloc_time_ns = ktime_get_ns(); 616 617 /* 618 * If the tag allocator sleeps we could get an allocation for a 619 * different hardware context. No need to complicate the low level 620 * allocator for this for the rare use case of a command tied to 621 * a specific queue. 622 */ 623 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)) || 624 WARN_ON_ONCE(!(flags & BLK_MQ_REQ_RESERVED))) 625 return ERR_PTR(-EINVAL); 626 627 if (hctx_idx >= q->nr_hw_queues) 628 return ERR_PTR(-EIO); 629 630 ret = blk_queue_enter(q, flags); 631 if (ret) 632 return ERR_PTR(ret); 633 634 /* 635 * Check if the hardware context is actually mapped to anything. 636 * If not tell the caller that it should skip this queue. 637 */ 638 ret = -EXDEV; 639 data.hctx = xa_load(&q->hctx_table, hctx_idx); 640 if (!blk_mq_hw_queue_mapped(data.hctx)) 641 goto out_queue_exit; 642 cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); 643 if (cpu >= nr_cpu_ids) 644 goto out_queue_exit; 645 data.ctx = __blk_mq_get_ctx(q, cpu); 646 647 if (q->elevator) 648 data.rq_flags |= RQF_SCHED_TAGS; 649 else 650 blk_mq_tag_busy(data.hctx); 651 652 if (flags & BLK_MQ_REQ_RESERVED) 653 data.rq_flags |= RQF_RESV; 654 655 ret = -EWOULDBLOCK; 656 tag = blk_mq_get_tag(&data); 657 if (tag == BLK_MQ_NO_TAG) 658 goto out_queue_exit; 659 rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag, 660 alloc_time_ns); 661 rq->__data_len = 0; 662 rq->__sector = (sector_t) -1; 663 rq->bio = rq->biotail = NULL; 664 return rq; 665 666 out_queue_exit: 667 blk_queue_exit(q); 668 return ERR_PTR(ret); 669 } 670 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 671 672 static void __blk_mq_free_request(struct request *rq) 673 { 674 struct request_queue *q = rq->q; 675 struct blk_mq_ctx *ctx = rq->mq_ctx; 676 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 677 const int sched_tag = rq->internal_tag; 678 679 blk_crypto_free_request(rq); 680 blk_pm_mark_last_busy(rq); 681 rq->mq_hctx = NULL; 682 683 if (rq->rq_flags & RQF_MQ_INFLIGHT) 684 __blk_mq_dec_active_requests(hctx); 685 686 if (rq->tag != BLK_MQ_NO_TAG) 687 blk_mq_put_tag(hctx->tags, ctx, rq->tag); 688 if (sched_tag != BLK_MQ_NO_TAG) 689 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag); 690 blk_mq_sched_restart(hctx); 691 blk_queue_exit(q); 692 } 693 694 void blk_mq_free_request(struct request *rq) 695 { 696 struct request_queue *q = rq->q; 697 698 if ((rq->rq_flags & RQF_USE_SCHED) && 699 q->elevator->type->ops.finish_request) 700 q->elevator->type->ops.finish_request(rq); 701 702 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) 703 laptop_io_completion(q->disk->bdi); 704 705 rq_qos_done(q, rq); 706 707 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 708 if (req_ref_put_and_test(rq)) 709 __blk_mq_free_request(rq); 710 } 711 EXPORT_SYMBOL_GPL(blk_mq_free_request); 712 713 void blk_mq_free_plug_rqs(struct blk_plug *plug) 714 { 715 struct request *rq; 716 717 while ((rq = rq_list_pop(&plug->cached_rq)) != NULL) 718 blk_mq_free_request(rq); 719 } 720 721 void blk_dump_rq_flags(struct request *rq, char *msg) 722 { 723 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg, 724 rq->q->disk ? rq->q->disk->disk_name : "?", 725 (__force unsigned long long) rq->cmd_flags); 726 727 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 728 (unsigned long long)blk_rq_pos(rq), 729 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 730 printk(KERN_INFO " bio %p, biotail %p, len %u\n", 731 rq->bio, rq->biotail, blk_rq_bytes(rq)); 732 } 733 EXPORT_SYMBOL(blk_dump_rq_flags); 734 735 static void req_bio_endio(struct request *rq, struct bio *bio, 736 unsigned int nbytes, blk_status_t error) 737 { 738 if (unlikely(error)) { 739 bio->bi_status = error; 740 } else if (req_op(rq) == REQ_OP_ZONE_APPEND) { 741 /* 742 * Partial zone append completions cannot be supported as the 743 * BIO fragments may end up not being written sequentially. 744 */ 745 if (bio->bi_iter.bi_size != nbytes) 746 bio->bi_status = BLK_STS_IOERR; 747 else 748 bio->bi_iter.bi_sector = rq->__sector; 749 } 750 751 bio_advance(bio, nbytes); 752 753 if (unlikely(rq->rq_flags & RQF_QUIET)) 754 bio_set_flag(bio, BIO_QUIET); 755 /* don't actually finish bio if it's part of flush sequence */ 756 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) 757 bio_endio(bio); 758 } 759 760 static void blk_account_io_completion(struct request *req, unsigned int bytes) 761 { 762 if (req->part && blk_do_io_stat(req)) { 763 const int sgrp = op_stat_group(req_op(req)); 764 765 part_stat_lock(); 766 part_stat_add(req->part, sectors[sgrp], bytes >> 9); 767 part_stat_unlock(); 768 } 769 } 770 771 static void blk_print_req_error(struct request *req, blk_status_t status) 772 { 773 printk_ratelimited(KERN_ERR 774 "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x " 775 "phys_seg %u prio class %u\n", 776 blk_status_to_str(status), 777 req->q->disk ? req->q->disk->disk_name : "?", 778 blk_rq_pos(req), (__force u32)req_op(req), 779 blk_op_str(req_op(req)), 780 (__force u32)(req->cmd_flags & ~REQ_OP_MASK), 781 req->nr_phys_segments, 782 IOPRIO_PRIO_CLASS(req->ioprio)); 783 } 784 785 /* 786 * Fully end IO on a request. Does not support partial completions, or 787 * errors. 788 */ 789 static void blk_complete_request(struct request *req) 790 { 791 const bool is_flush = (req->rq_flags & RQF_FLUSH_SEQ) != 0; 792 int total_bytes = blk_rq_bytes(req); 793 struct bio *bio = req->bio; 794 795 trace_block_rq_complete(req, BLK_STS_OK, total_bytes); 796 797 if (!bio) 798 return; 799 800 #ifdef CONFIG_BLK_DEV_INTEGRITY 801 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ) 802 req->q->integrity.profile->complete_fn(req, total_bytes); 803 #endif 804 805 /* 806 * Upper layers may call blk_crypto_evict_key() anytime after the last 807 * bio_endio(). Therefore, the keyslot must be released before that. 808 */ 809 blk_crypto_rq_put_keyslot(req); 810 811 blk_account_io_completion(req, total_bytes); 812 813 do { 814 struct bio *next = bio->bi_next; 815 816 /* Completion has already been traced */ 817 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 818 819 if (req_op(req) == REQ_OP_ZONE_APPEND) 820 bio->bi_iter.bi_sector = req->__sector; 821 822 if (!is_flush) 823 bio_endio(bio); 824 bio = next; 825 } while (bio); 826 827 /* 828 * Reset counters so that the request stacking driver 829 * can find how many bytes remain in the request 830 * later. 831 */ 832 if (!req->end_io) { 833 req->bio = NULL; 834 req->__data_len = 0; 835 } 836 } 837 838 /** 839 * blk_update_request - Complete multiple bytes without completing the request 840 * @req: the request being processed 841 * @error: block status code 842 * @nr_bytes: number of bytes to complete for @req 843 * 844 * Description: 845 * Ends I/O on a number of bytes attached to @req, but doesn't complete 846 * the request structure even if @req doesn't have leftover. 847 * If @req has leftover, sets it up for the next range of segments. 848 * 849 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 850 * %false return from this function. 851 * 852 * Note: 853 * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function 854 * except in the consistency check at the end of this function. 855 * 856 * Return: 857 * %false - this request doesn't have any more data 858 * %true - this request has more data 859 **/ 860 bool blk_update_request(struct request *req, blk_status_t error, 861 unsigned int nr_bytes) 862 { 863 int total_bytes; 864 865 trace_block_rq_complete(req, error, nr_bytes); 866 867 if (!req->bio) 868 return false; 869 870 #ifdef CONFIG_BLK_DEV_INTEGRITY 871 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && 872 error == BLK_STS_OK) 873 req->q->integrity.profile->complete_fn(req, nr_bytes); 874 #endif 875 876 /* 877 * Upper layers may call blk_crypto_evict_key() anytime after the last 878 * bio_endio(). Therefore, the keyslot must be released before that. 879 */ 880 if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req)) 881 __blk_crypto_rq_put_keyslot(req); 882 883 if (unlikely(error && !blk_rq_is_passthrough(req) && 884 !(req->rq_flags & RQF_QUIET)) && 885 !test_bit(GD_DEAD, &req->q->disk->state)) { 886 blk_print_req_error(req, error); 887 trace_block_rq_error(req, error, nr_bytes); 888 } 889 890 blk_account_io_completion(req, nr_bytes); 891 892 total_bytes = 0; 893 while (req->bio) { 894 struct bio *bio = req->bio; 895 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); 896 897 if (bio_bytes == bio->bi_iter.bi_size) 898 req->bio = bio->bi_next; 899 900 /* Completion has already been traced */ 901 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 902 req_bio_endio(req, bio, bio_bytes, error); 903 904 total_bytes += bio_bytes; 905 nr_bytes -= bio_bytes; 906 907 if (!nr_bytes) 908 break; 909 } 910 911 /* 912 * completely done 913 */ 914 if (!req->bio) { 915 /* 916 * Reset counters so that the request stacking driver 917 * can find how many bytes remain in the request 918 * later. 919 */ 920 req->__data_len = 0; 921 return false; 922 } 923 924 req->__data_len -= total_bytes; 925 926 /* update sector only for requests with clear definition of sector */ 927 if (!blk_rq_is_passthrough(req)) 928 req->__sector += total_bytes >> 9; 929 930 /* mixed attributes always follow the first bio */ 931 if (req->rq_flags & RQF_MIXED_MERGE) { 932 req->cmd_flags &= ~REQ_FAILFAST_MASK; 933 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; 934 } 935 936 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) { 937 /* 938 * If total number of sectors is less than the first segment 939 * size, something has gone terribly wrong. 940 */ 941 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 942 blk_dump_rq_flags(req, "request botched"); 943 req->__data_len = blk_rq_cur_bytes(req); 944 } 945 946 /* recalculate the number of segments */ 947 req->nr_phys_segments = blk_recalc_rq_segments(req); 948 } 949 950 return true; 951 } 952 EXPORT_SYMBOL_GPL(blk_update_request); 953 954 static inline void blk_account_io_done(struct request *req, u64 now) 955 { 956 trace_block_io_done(req); 957 958 /* 959 * Account IO completion. flush_rq isn't accounted as a 960 * normal IO on queueing nor completion. Accounting the 961 * containing request is enough. 962 */ 963 if (blk_do_io_stat(req) && req->part && 964 !(req->rq_flags & RQF_FLUSH_SEQ)) { 965 const int sgrp = op_stat_group(req_op(req)); 966 967 part_stat_lock(); 968 update_io_ticks(req->part, jiffies, true); 969 part_stat_inc(req->part, ios[sgrp]); 970 part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns); 971 part_stat_unlock(); 972 } 973 } 974 975 static inline void blk_account_io_start(struct request *req) 976 { 977 trace_block_io_start(req); 978 979 if (blk_do_io_stat(req)) { 980 /* 981 * All non-passthrough requests are created from a bio with one 982 * exception: when a flush command that is part of a flush sequence 983 * generated by the state machine in blk-flush.c is cloned onto the 984 * lower device by dm-multipath we can get here without a bio. 985 */ 986 if (req->bio) 987 req->part = req->bio->bi_bdev; 988 else 989 req->part = req->q->disk->part0; 990 991 part_stat_lock(); 992 update_io_ticks(req->part, jiffies, false); 993 part_stat_unlock(); 994 } 995 } 996 997 static inline void __blk_mq_end_request_acct(struct request *rq, u64 now) 998 { 999 if (rq->rq_flags & RQF_STATS) 1000 blk_stat_add(rq, now); 1001 1002 blk_mq_sched_completed_request(rq, now); 1003 blk_account_io_done(rq, now); 1004 } 1005 1006 inline void __blk_mq_end_request(struct request *rq, blk_status_t error) 1007 { 1008 if (blk_mq_need_time_stamp(rq)) 1009 __blk_mq_end_request_acct(rq, ktime_get_ns()); 1010 1011 if (rq->end_io) { 1012 rq_qos_done(rq->q, rq); 1013 if (rq->end_io(rq, error) == RQ_END_IO_FREE) 1014 blk_mq_free_request(rq); 1015 } else { 1016 blk_mq_free_request(rq); 1017 } 1018 } 1019 EXPORT_SYMBOL(__blk_mq_end_request); 1020 1021 void blk_mq_end_request(struct request *rq, blk_status_t error) 1022 { 1023 if (blk_update_request(rq, error, blk_rq_bytes(rq))) 1024 BUG(); 1025 __blk_mq_end_request(rq, error); 1026 } 1027 EXPORT_SYMBOL(blk_mq_end_request); 1028 1029 #define TAG_COMP_BATCH 32 1030 1031 static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx, 1032 int *tag_array, int nr_tags) 1033 { 1034 struct request_queue *q = hctx->queue; 1035 1036 /* 1037 * All requests should have been marked as RQF_MQ_INFLIGHT, so 1038 * update hctx->nr_active in batch 1039 */ 1040 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) 1041 __blk_mq_sub_active_requests(hctx, nr_tags); 1042 1043 blk_mq_put_tags(hctx->tags, tag_array, nr_tags); 1044 percpu_ref_put_many(&q->q_usage_counter, nr_tags); 1045 } 1046 1047 void blk_mq_end_request_batch(struct io_comp_batch *iob) 1048 { 1049 int tags[TAG_COMP_BATCH], nr_tags = 0; 1050 struct blk_mq_hw_ctx *cur_hctx = NULL; 1051 struct request *rq; 1052 u64 now = 0; 1053 1054 if (iob->need_ts) 1055 now = ktime_get_ns(); 1056 1057 while ((rq = rq_list_pop(&iob->req_list)) != NULL) { 1058 prefetch(rq->bio); 1059 prefetch(rq->rq_next); 1060 1061 blk_complete_request(rq); 1062 if (iob->need_ts) 1063 __blk_mq_end_request_acct(rq, now); 1064 1065 rq_qos_done(rq->q, rq); 1066 1067 /* 1068 * If end_io handler returns NONE, then it still has 1069 * ownership of the request. 1070 */ 1071 if (rq->end_io && rq->end_io(rq, 0) == RQ_END_IO_NONE) 1072 continue; 1073 1074 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 1075 if (!req_ref_put_and_test(rq)) 1076 continue; 1077 1078 blk_crypto_free_request(rq); 1079 blk_pm_mark_last_busy(rq); 1080 1081 if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) { 1082 if (cur_hctx) 1083 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags); 1084 nr_tags = 0; 1085 cur_hctx = rq->mq_hctx; 1086 } 1087 tags[nr_tags++] = rq->tag; 1088 } 1089 1090 if (nr_tags) 1091 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags); 1092 } 1093 EXPORT_SYMBOL_GPL(blk_mq_end_request_batch); 1094 1095 static void blk_complete_reqs(struct llist_head *list) 1096 { 1097 struct llist_node *entry = llist_reverse_order(llist_del_all(list)); 1098 struct request *rq, *next; 1099 1100 llist_for_each_entry_safe(rq, next, entry, ipi_list) 1101 rq->q->mq_ops->complete(rq); 1102 } 1103 1104 static __latent_entropy void blk_done_softirq(struct softirq_action *h) 1105 { 1106 blk_complete_reqs(this_cpu_ptr(&blk_cpu_done)); 1107 } 1108 1109 static int blk_softirq_cpu_dead(unsigned int cpu) 1110 { 1111 blk_complete_reqs(&per_cpu(blk_cpu_done, cpu)); 1112 return 0; 1113 } 1114 1115 static void __blk_mq_complete_request_remote(void *data) 1116 { 1117 __raise_softirq_irqoff(BLOCK_SOFTIRQ); 1118 } 1119 1120 static inline bool blk_mq_complete_need_ipi(struct request *rq) 1121 { 1122 int cpu = raw_smp_processor_id(); 1123 1124 if (!IS_ENABLED(CONFIG_SMP) || 1125 !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) 1126 return false; 1127 /* 1128 * With force threaded interrupts enabled, raising softirq from an SMP 1129 * function call will always result in waking the ksoftirqd thread. 1130 * This is probably worse than completing the request on a different 1131 * cache domain. 1132 */ 1133 if (force_irqthreads()) 1134 return false; 1135 1136 /* same CPU or cache domain? Complete locally */ 1137 if (cpu == rq->mq_ctx->cpu || 1138 (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) && 1139 cpus_share_cache(cpu, rq->mq_ctx->cpu))) 1140 return false; 1141 1142 /* don't try to IPI to an offline CPU */ 1143 return cpu_online(rq->mq_ctx->cpu); 1144 } 1145 1146 static void blk_mq_complete_send_ipi(struct request *rq) 1147 { 1148 struct llist_head *list; 1149 unsigned int cpu; 1150 1151 cpu = rq->mq_ctx->cpu; 1152 list = &per_cpu(blk_cpu_done, cpu); 1153 if (llist_add(&rq->ipi_list, list)) { 1154 INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq); 1155 smp_call_function_single_async(cpu, &rq->csd); 1156 } 1157 } 1158 1159 static void blk_mq_raise_softirq(struct request *rq) 1160 { 1161 struct llist_head *list; 1162 1163 preempt_disable(); 1164 list = this_cpu_ptr(&blk_cpu_done); 1165 if (llist_add(&rq->ipi_list, list)) 1166 raise_softirq(BLOCK_SOFTIRQ); 1167 preempt_enable(); 1168 } 1169 1170 bool blk_mq_complete_request_remote(struct request *rq) 1171 { 1172 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); 1173 1174 /* 1175 * For request which hctx has only one ctx mapping, 1176 * or a polled request, always complete locally, 1177 * it's pointless to redirect the completion. 1178 */ 1179 if ((rq->mq_hctx->nr_ctx == 1 && 1180 rq->mq_ctx->cpu == raw_smp_processor_id()) || 1181 rq->cmd_flags & REQ_POLLED) 1182 return false; 1183 1184 if (blk_mq_complete_need_ipi(rq)) { 1185 blk_mq_complete_send_ipi(rq); 1186 return true; 1187 } 1188 1189 if (rq->q->nr_hw_queues == 1) { 1190 blk_mq_raise_softirq(rq); 1191 return true; 1192 } 1193 return false; 1194 } 1195 EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote); 1196 1197 /** 1198 * blk_mq_complete_request - end I/O on a request 1199 * @rq: the request being processed 1200 * 1201 * Description: 1202 * Complete a request by scheduling the ->complete_rq operation. 1203 **/ 1204 void blk_mq_complete_request(struct request *rq) 1205 { 1206 if (!blk_mq_complete_request_remote(rq)) 1207 rq->q->mq_ops->complete(rq); 1208 } 1209 EXPORT_SYMBOL(blk_mq_complete_request); 1210 1211 /** 1212 * blk_mq_start_request - Start processing a request 1213 * @rq: Pointer to request to be started 1214 * 1215 * Function used by device drivers to notify the block layer that a request 1216 * is going to be processed now, so blk layer can do proper initializations 1217 * such as starting the timeout timer. 1218 */ 1219 void blk_mq_start_request(struct request *rq) 1220 { 1221 struct request_queue *q = rq->q; 1222 1223 trace_block_rq_issue(rq); 1224 1225 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { 1226 rq->io_start_time_ns = ktime_get_ns(); 1227 rq->stats_sectors = blk_rq_sectors(rq); 1228 rq->rq_flags |= RQF_STATS; 1229 rq_qos_issue(q, rq); 1230 } 1231 1232 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE); 1233 1234 blk_add_timer(rq); 1235 WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT); 1236 1237 #ifdef CONFIG_BLK_DEV_INTEGRITY 1238 if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE) 1239 q->integrity.profile->prepare_fn(rq); 1240 #endif 1241 if (rq->bio && rq->bio->bi_opf & REQ_POLLED) 1242 WRITE_ONCE(rq->bio->bi_cookie, rq->mq_hctx->queue_num); 1243 } 1244 EXPORT_SYMBOL(blk_mq_start_request); 1245 1246 /* 1247 * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple 1248 * queues. This is important for md arrays to benefit from merging 1249 * requests. 1250 */ 1251 static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug) 1252 { 1253 if (plug->multiple_queues) 1254 return BLK_MAX_REQUEST_COUNT * 2; 1255 return BLK_MAX_REQUEST_COUNT; 1256 } 1257 1258 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) 1259 { 1260 struct request *last = rq_list_peek(&plug->mq_list); 1261 1262 if (!plug->rq_count) { 1263 trace_block_plug(rq->q); 1264 } else if (plug->rq_count >= blk_plug_max_rq_count(plug) || 1265 (!blk_queue_nomerges(rq->q) && 1266 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { 1267 blk_mq_flush_plug_list(plug, false); 1268 last = NULL; 1269 trace_block_plug(rq->q); 1270 } 1271 1272 if (!plug->multiple_queues && last && last->q != rq->q) 1273 plug->multiple_queues = true; 1274 /* 1275 * Any request allocated from sched tags can't be issued to 1276 * ->queue_rqs() directly 1277 */ 1278 if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS)) 1279 plug->has_elevator = true; 1280 rq->rq_next = NULL; 1281 rq_list_add(&plug->mq_list, rq); 1282 plug->rq_count++; 1283 } 1284 1285 /** 1286 * blk_execute_rq_nowait - insert a request to I/O scheduler for execution 1287 * @rq: request to insert 1288 * @at_head: insert request at head or tail of queue 1289 * 1290 * Description: 1291 * Insert a fully prepared request at the back of the I/O scheduler queue 1292 * for execution. Don't wait for completion. 1293 * 1294 * Note: 1295 * This function will invoke @done directly if the queue is dead. 1296 */ 1297 void blk_execute_rq_nowait(struct request *rq, bool at_head) 1298 { 1299 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 1300 1301 WARN_ON(irqs_disabled()); 1302 WARN_ON(!blk_rq_is_passthrough(rq)); 1303 1304 blk_account_io_start(rq); 1305 1306 /* 1307 * As plugging can be enabled for passthrough requests on a zoned 1308 * device, directly accessing the plug instead of using blk_mq_plug() 1309 * should not have any consequences. 1310 */ 1311 if (current->plug && !at_head) { 1312 blk_add_rq_to_plug(current->plug, rq); 1313 return; 1314 } 1315 1316 blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0); 1317 blk_mq_run_hw_queue(hctx, false); 1318 } 1319 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); 1320 1321 struct blk_rq_wait { 1322 struct completion done; 1323 blk_status_t ret; 1324 }; 1325 1326 static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret) 1327 { 1328 struct blk_rq_wait *wait = rq->end_io_data; 1329 1330 wait->ret = ret; 1331 complete(&wait->done); 1332 return RQ_END_IO_NONE; 1333 } 1334 1335 bool blk_rq_is_poll(struct request *rq) 1336 { 1337 if (!rq->mq_hctx) 1338 return false; 1339 if (rq->mq_hctx->type != HCTX_TYPE_POLL) 1340 return false; 1341 return true; 1342 } 1343 EXPORT_SYMBOL_GPL(blk_rq_is_poll); 1344 1345 static void blk_rq_poll_completion(struct request *rq, struct completion *wait) 1346 { 1347 do { 1348 blk_hctx_poll(rq->q, rq->mq_hctx, NULL, 0); 1349 cond_resched(); 1350 } while (!completion_done(wait)); 1351 } 1352 1353 /** 1354 * blk_execute_rq - insert a request into queue for execution 1355 * @rq: request to insert 1356 * @at_head: insert request at head or tail of queue 1357 * 1358 * Description: 1359 * Insert a fully prepared request at the back of the I/O scheduler queue 1360 * for execution and wait for completion. 1361 * Return: The blk_status_t result provided to blk_mq_end_request(). 1362 */ 1363 blk_status_t blk_execute_rq(struct request *rq, bool at_head) 1364 { 1365 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 1366 struct blk_rq_wait wait = { 1367 .done = COMPLETION_INITIALIZER_ONSTACK(wait.done), 1368 }; 1369 1370 WARN_ON(irqs_disabled()); 1371 WARN_ON(!blk_rq_is_passthrough(rq)); 1372 1373 rq->end_io_data = &wait; 1374 rq->end_io = blk_end_sync_rq; 1375 1376 blk_account_io_start(rq); 1377 blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0); 1378 blk_mq_run_hw_queue(hctx, false); 1379 1380 if (blk_rq_is_poll(rq)) { 1381 blk_rq_poll_completion(rq, &wait.done); 1382 } else { 1383 /* 1384 * Prevent hang_check timer from firing at us during very long 1385 * I/O 1386 */ 1387 unsigned long hang_check = sysctl_hung_task_timeout_secs; 1388 1389 if (hang_check) 1390 while (!wait_for_completion_io_timeout(&wait.done, 1391 hang_check * (HZ/2))) 1392 ; 1393 else 1394 wait_for_completion_io(&wait.done); 1395 } 1396 1397 return wait.ret; 1398 } 1399 EXPORT_SYMBOL(blk_execute_rq); 1400 1401 static void __blk_mq_requeue_request(struct request *rq) 1402 { 1403 struct request_queue *q = rq->q; 1404 1405 blk_mq_put_driver_tag(rq); 1406 1407 trace_block_rq_requeue(rq); 1408 rq_qos_requeue(q, rq); 1409 1410 if (blk_mq_request_started(rq)) { 1411 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 1412 rq->rq_flags &= ~RQF_TIMED_OUT; 1413 } 1414 } 1415 1416 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) 1417 { 1418 struct request_queue *q = rq->q; 1419 unsigned long flags; 1420 1421 __blk_mq_requeue_request(rq); 1422 1423 /* this request will be re-inserted to io scheduler queue */ 1424 blk_mq_sched_requeue_request(rq); 1425 1426 spin_lock_irqsave(&q->requeue_lock, flags); 1427 list_add_tail(&rq->queuelist, &q->requeue_list); 1428 spin_unlock_irqrestore(&q->requeue_lock, flags); 1429 1430 if (kick_requeue_list) 1431 blk_mq_kick_requeue_list(q); 1432 } 1433 EXPORT_SYMBOL(blk_mq_requeue_request); 1434 1435 static void blk_mq_requeue_work(struct work_struct *work) 1436 { 1437 struct request_queue *q = 1438 container_of(work, struct request_queue, requeue_work.work); 1439 LIST_HEAD(rq_list); 1440 LIST_HEAD(flush_list); 1441 struct request *rq; 1442 1443 spin_lock_irq(&q->requeue_lock); 1444 list_splice_init(&q->requeue_list, &rq_list); 1445 list_splice_init(&q->flush_list, &flush_list); 1446 spin_unlock_irq(&q->requeue_lock); 1447 1448 while (!list_empty(&rq_list)) { 1449 rq = list_entry(rq_list.next, struct request, queuelist); 1450 /* 1451 * If RQF_DONTPREP ist set, the request has been started by the 1452 * driver already and might have driver-specific data allocated 1453 * already. Insert it into the hctx dispatch list to avoid 1454 * block layer merges for the request. 1455 */ 1456 if (rq->rq_flags & RQF_DONTPREP) { 1457 list_del_init(&rq->queuelist); 1458 blk_mq_request_bypass_insert(rq, 0); 1459 } else { 1460 list_del_init(&rq->queuelist); 1461 blk_mq_insert_request(rq, BLK_MQ_INSERT_AT_HEAD); 1462 } 1463 } 1464 1465 while (!list_empty(&flush_list)) { 1466 rq = list_entry(flush_list.next, struct request, queuelist); 1467 list_del_init(&rq->queuelist); 1468 blk_mq_insert_request(rq, 0); 1469 } 1470 1471 blk_mq_run_hw_queues(q, false); 1472 } 1473 1474 void blk_mq_kick_requeue_list(struct request_queue *q) 1475 { 1476 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0); 1477 } 1478 EXPORT_SYMBOL(blk_mq_kick_requeue_list); 1479 1480 void blk_mq_delay_kick_requeue_list(struct request_queue *q, 1481 unsigned long msecs) 1482 { 1483 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 1484 msecs_to_jiffies(msecs)); 1485 } 1486 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); 1487 1488 static bool blk_mq_rq_inflight(struct request *rq, void *priv) 1489 { 1490 /* 1491 * If we find a request that isn't idle we know the queue is busy 1492 * as it's checked in the iter. 1493 * Return false to stop the iteration. 1494 */ 1495 if (blk_mq_request_started(rq)) { 1496 bool *busy = priv; 1497 1498 *busy = true; 1499 return false; 1500 } 1501 1502 return true; 1503 } 1504 1505 bool blk_mq_queue_inflight(struct request_queue *q) 1506 { 1507 bool busy = false; 1508 1509 blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy); 1510 return busy; 1511 } 1512 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight); 1513 1514 static void blk_mq_rq_timed_out(struct request *req) 1515 { 1516 req->rq_flags |= RQF_TIMED_OUT; 1517 if (req->q->mq_ops->timeout) { 1518 enum blk_eh_timer_return ret; 1519 1520 ret = req->q->mq_ops->timeout(req); 1521 if (ret == BLK_EH_DONE) 1522 return; 1523 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER); 1524 } 1525 1526 blk_add_timer(req); 1527 } 1528 1529 struct blk_expired_data { 1530 bool has_timedout_rq; 1531 unsigned long next; 1532 unsigned long timeout_start; 1533 }; 1534 1535 static bool blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired) 1536 { 1537 unsigned long deadline; 1538 1539 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT) 1540 return false; 1541 if (rq->rq_flags & RQF_TIMED_OUT) 1542 return false; 1543 1544 deadline = READ_ONCE(rq->deadline); 1545 if (time_after_eq(expired->timeout_start, deadline)) 1546 return true; 1547 1548 if (expired->next == 0) 1549 expired->next = deadline; 1550 else if (time_after(expired->next, deadline)) 1551 expired->next = deadline; 1552 return false; 1553 } 1554 1555 void blk_mq_put_rq_ref(struct request *rq) 1556 { 1557 if (is_flush_rq(rq)) { 1558 if (rq->end_io(rq, 0) == RQ_END_IO_FREE) 1559 blk_mq_free_request(rq); 1560 } else if (req_ref_put_and_test(rq)) { 1561 __blk_mq_free_request(rq); 1562 } 1563 } 1564 1565 static bool blk_mq_check_expired(struct request *rq, void *priv) 1566 { 1567 struct blk_expired_data *expired = priv; 1568 1569 /* 1570 * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot 1571 * be reallocated underneath the timeout handler's processing, then 1572 * the expire check is reliable. If the request is not expired, then 1573 * it was completed and reallocated as a new request after returning 1574 * from blk_mq_check_expired(). 1575 */ 1576 if (blk_mq_req_expired(rq, expired)) { 1577 expired->has_timedout_rq = true; 1578 return false; 1579 } 1580 return true; 1581 } 1582 1583 static bool blk_mq_handle_expired(struct request *rq, void *priv) 1584 { 1585 struct blk_expired_data *expired = priv; 1586 1587 if (blk_mq_req_expired(rq, expired)) 1588 blk_mq_rq_timed_out(rq); 1589 return true; 1590 } 1591 1592 static void blk_mq_timeout_work(struct work_struct *work) 1593 { 1594 struct request_queue *q = 1595 container_of(work, struct request_queue, timeout_work); 1596 struct blk_expired_data expired = { 1597 .timeout_start = jiffies, 1598 }; 1599 struct blk_mq_hw_ctx *hctx; 1600 unsigned long i; 1601 1602 /* A deadlock might occur if a request is stuck requiring a 1603 * timeout at the same time a queue freeze is waiting 1604 * completion, since the timeout code would not be able to 1605 * acquire the queue reference here. 1606 * 1607 * That's why we don't use blk_queue_enter here; instead, we use 1608 * percpu_ref_tryget directly, because we need to be able to 1609 * obtain a reference even in the short window between the queue 1610 * starting to freeze, by dropping the first reference in 1611 * blk_freeze_queue_start, and the moment the last request is 1612 * consumed, marked by the instant q_usage_counter reaches 1613 * zero. 1614 */ 1615 if (!percpu_ref_tryget(&q->q_usage_counter)) 1616 return; 1617 1618 /* check if there is any timed-out request */ 1619 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &expired); 1620 if (expired.has_timedout_rq) { 1621 /* 1622 * Before walking tags, we must ensure any submit started 1623 * before the current time has finished. Since the submit 1624 * uses srcu or rcu, wait for a synchronization point to 1625 * ensure all running submits have finished 1626 */ 1627 blk_mq_wait_quiesce_done(q->tag_set); 1628 1629 expired.next = 0; 1630 blk_mq_queue_tag_busy_iter(q, blk_mq_handle_expired, &expired); 1631 } 1632 1633 if (expired.next != 0) { 1634 mod_timer(&q->timeout, expired.next); 1635 } else { 1636 /* 1637 * Request timeouts are handled as a forward rolling timer. If 1638 * we end up here it means that no requests are pending and 1639 * also that no request has been pending for a while. Mark 1640 * each hctx as idle. 1641 */ 1642 queue_for_each_hw_ctx(q, hctx, i) { 1643 /* the hctx may be unmapped, so check it here */ 1644 if (blk_mq_hw_queue_mapped(hctx)) 1645 blk_mq_tag_idle(hctx); 1646 } 1647 } 1648 blk_queue_exit(q); 1649 } 1650 1651 struct flush_busy_ctx_data { 1652 struct blk_mq_hw_ctx *hctx; 1653 struct list_head *list; 1654 }; 1655 1656 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data) 1657 { 1658 struct flush_busy_ctx_data *flush_data = data; 1659 struct blk_mq_hw_ctx *hctx = flush_data->hctx; 1660 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 1661 enum hctx_type type = hctx->type; 1662 1663 spin_lock(&ctx->lock); 1664 list_splice_tail_init(&ctx->rq_lists[type], flush_data->list); 1665 sbitmap_clear_bit(sb, bitnr); 1666 spin_unlock(&ctx->lock); 1667 return true; 1668 } 1669 1670 /* 1671 * Process software queues that have been marked busy, splicing them 1672 * to the for-dispatch 1673 */ 1674 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) 1675 { 1676 struct flush_busy_ctx_data data = { 1677 .hctx = hctx, 1678 .list = list, 1679 }; 1680 1681 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data); 1682 } 1683 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs); 1684 1685 struct dispatch_rq_data { 1686 struct blk_mq_hw_ctx *hctx; 1687 struct request *rq; 1688 }; 1689 1690 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr, 1691 void *data) 1692 { 1693 struct dispatch_rq_data *dispatch_data = data; 1694 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; 1695 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 1696 enum hctx_type type = hctx->type; 1697 1698 spin_lock(&ctx->lock); 1699 if (!list_empty(&ctx->rq_lists[type])) { 1700 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next); 1701 list_del_init(&dispatch_data->rq->queuelist); 1702 if (list_empty(&ctx->rq_lists[type])) 1703 sbitmap_clear_bit(sb, bitnr); 1704 } 1705 spin_unlock(&ctx->lock); 1706 1707 return !dispatch_data->rq; 1708 } 1709 1710 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 1711 struct blk_mq_ctx *start) 1712 { 1713 unsigned off = start ? start->index_hw[hctx->type] : 0; 1714 struct dispatch_rq_data data = { 1715 .hctx = hctx, 1716 .rq = NULL, 1717 }; 1718 1719 __sbitmap_for_each_set(&hctx->ctx_map, off, 1720 dispatch_rq_from_ctx, &data); 1721 1722 return data.rq; 1723 } 1724 1725 static bool __blk_mq_alloc_driver_tag(struct request *rq) 1726 { 1727 struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags; 1728 unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags; 1729 int tag; 1730 1731 blk_mq_tag_busy(rq->mq_hctx); 1732 1733 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) { 1734 bt = &rq->mq_hctx->tags->breserved_tags; 1735 tag_offset = 0; 1736 } else { 1737 if (!hctx_may_queue(rq->mq_hctx, bt)) 1738 return false; 1739 } 1740 1741 tag = __sbitmap_queue_get(bt); 1742 if (tag == BLK_MQ_NO_TAG) 1743 return false; 1744 1745 rq->tag = tag + tag_offset; 1746 return true; 1747 } 1748 1749 bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq) 1750 { 1751 if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq)) 1752 return false; 1753 1754 if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) && 1755 !(rq->rq_flags & RQF_MQ_INFLIGHT)) { 1756 rq->rq_flags |= RQF_MQ_INFLIGHT; 1757 __blk_mq_inc_active_requests(hctx); 1758 } 1759 hctx->tags->rqs[rq->tag] = rq; 1760 return true; 1761 } 1762 1763 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, 1764 int flags, void *key) 1765 { 1766 struct blk_mq_hw_ctx *hctx; 1767 1768 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); 1769 1770 spin_lock(&hctx->dispatch_wait_lock); 1771 if (!list_empty(&wait->entry)) { 1772 struct sbitmap_queue *sbq; 1773 1774 list_del_init(&wait->entry); 1775 sbq = &hctx->tags->bitmap_tags; 1776 atomic_dec(&sbq->ws_active); 1777 } 1778 spin_unlock(&hctx->dispatch_wait_lock); 1779 1780 blk_mq_run_hw_queue(hctx, true); 1781 return 1; 1782 } 1783 1784 /* 1785 * Mark us waiting for a tag. For shared tags, this involves hooking us into 1786 * the tag wakeups. For non-shared tags, we can simply mark us needing a 1787 * restart. For both cases, take care to check the condition again after 1788 * marking us as waiting. 1789 */ 1790 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, 1791 struct request *rq) 1792 { 1793 struct sbitmap_queue *sbq; 1794 struct wait_queue_head *wq; 1795 wait_queue_entry_t *wait; 1796 bool ret; 1797 1798 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) && 1799 !(blk_mq_is_shared_tags(hctx->flags))) { 1800 blk_mq_sched_mark_restart_hctx(hctx); 1801 1802 /* 1803 * It's possible that a tag was freed in the window between the 1804 * allocation failure and adding the hardware queue to the wait 1805 * queue. 1806 * 1807 * Don't clear RESTART here, someone else could have set it. 1808 * At most this will cost an extra queue run. 1809 */ 1810 return blk_mq_get_driver_tag(rq); 1811 } 1812 1813 wait = &hctx->dispatch_wait; 1814 if (!list_empty_careful(&wait->entry)) 1815 return false; 1816 1817 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) 1818 sbq = &hctx->tags->breserved_tags; 1819 else 1820 sbq = &hctx->tags->bitmap_tags; 1821 wq = &bt_wait_ptr(sbq, hctx)->wait; 1822 1823 spin_lock_irq(&wq->lock); 1824 spin_lock(&hctx->dispatch_wait_lock); 1825 if (!list_empty(&wait->entry)) { 1826 spin_unlock(&hctx->dispatch_wait_lock); 1827 spin_unlock_irq(&wq->lock); 1828 return false; 1829 } 1830 1831 atomic_inc(&sbq->ws_active); 1832 wait->flags &= ~WQ_FLAG_EXCLUSIVE; 1833 __add_wait_queue(wq, wait); 1834 1835 /* 1836 * It's possible that a tag was freed in the window between the 1837 * allocation failure and adding the hardware queue to the wait 1838 * queue. 1839 */ 1840 ret = blk_mq_get_driver_tag(rq); 1841 if (!ret) { 1842 spin_unlock(&hctx->dispatch_wait_lock); 1843 spin_unlock_irq(&wq->lock); 1844 return false; 1845 } 1846 1847 /* 1848 * We got a tag, remove ourselves from the wait queue to ensure 1849 * someone else gets the wakeup. 1850 */ 1851 list_del_init(&wait->entry); 1852 atomic_dec(&sbq->ws_active); 1853 spin_unlock(&hctx->dispatch_wait_lock); 1854 spin_unlock_irq(&wq->lock); 1855 1856 return true; 1857 } 1858 1859 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT 8 1860 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR 4 1861 /* 1862 * Update dispatch busy with the Exponential Weighted Moving Average(EWMA): 1863 * - EWMA is one simple way to compute running average value 1864 * - weight(7/8 and 1/8) is applied so that it can decrease exponentially 1865 * - take 4 as factor for avoiding to get too small(0) result, and this 1866 * factor doesn't matter because EWMA decreases exponentially 1867 */ 1868 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy) 1869 { 1870 unsigned int ewma; 1871 1872 ewma = hctx->dispatch_busy; 1873 1874 if (!ewma && !busy) 1875 return; 1876 1877 ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1; 1878 if (busy) 1879 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR; 1880 ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT; 1881 1882 hctx->dispatch_busy = ewma; 1883 } 1884 1885 #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */ 1886 1887 static void blk_mq_handle_dev_resource(struct request *rq, 1888 struct list_head *list) 1889 { 1890 list_add(&rq->queuelist, list); 1891 __blk_mq_requeue_request(rq); 1892 } 1893 1894 static void blk_mq_handle_zone_resource(struct request *rq, 1895 struct list_head *zone_list) 1896 { 1897 /* 1898 * If we end up here it is because we cannot dispatch a request to a 1899 * specific zone due to LLD level zone-write locking or other zone 1900 * related resource not being available. In this case, set the request 1901 * aside in zone_list for retrying it later. 1902 */ 1903 list_add(&rq->queuelist, zone_list); 1904 __blk_mq_requeue_request(rq); 1905 } 1906 1907 enum prep_dispatch { 1908 PREP_DISPATCH_OK, 1909 PREP_DISPATCH_NO_TAG, 1910 PREP_DISPATCH_NO_BUDGET, 1911 }; 1912 1913 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq, 1914 bool need_budget) 1915 { 1916 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 1917 int budget_token = -1; 1918 1919 if (need_budget) { 1920 budget_token = blk_mq_get_dispatch_budget(rq->q); 1921 if (budget_token < 0) { 1922 blk_mq_put_driver_tag(rq); 1923 return PREP_DISPATCH_NO_BUDGET; 1924 } 1925 blk_mq_set_rq_budget_token(rq, budget_token); 1926 } 1927 1928 if (!blk_mq_get_driver_tag(rq)) { 1929 /* 1930 * The initial allocation attempt failed, so we need to 1931 * rerun the hardware queue when a tag is freed. The 1932 * waitqueue takes care of that. If the queue is run 1933 * before we add this entry back on the dispatch list, 1934 * we'll re-run it below. 1935 */ 1936 if (!blk_mq_mark_tag_wait(hctx, rq)) { 1937 /* 1938 * All budgets not got from this function will be put 1939 * together during handling partial dispatch 1940 */ 1941 if (need_budget) 1942 blk_mq_put_dispatch_budget(rq->q, budget_token); 1943 return PREP_DISPATCH_NO_TAG; 1944 } 1945 } 1946 1947 return PREP_DISPATCH_OK; 1948 } 1949 1950 /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */ 1951 static void blk_mq_release_budgets(struct request_queue *q, 1952 struct list_head *list) 1953 { 1954 struct request *rq; 1955 1956 list_for_each_entry(rq, list, queuelist) { 1957 int budget_token = blk_mq_get_rq_budget_token(rq); 1958 1959 if (budget_token >= 0) 1960 blk_mq_put_dispatch_budget(q, budget_token); 1961 } 1962 } 1963 1964 /* 1965 * blk_mq_commit_rqs will notify driver using bd->last that there is no 1966 * more requests. (See comment in struct blk_mq_ops for commit_rqs for 1967 * details) 1968 * Attention, we should explicitly call this in unusual cases: 1969 * 1) did not queue everything initially scheduled to queue 1970 * 2) the last attempt to queue a request failed 1971 */ 1972 static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued, 1973 bool from_schedule) 1974 { 1975 if (hctx->queue->mq_ops->commit_rqs && queued) { 1976 trace_block_unplug(hctx->queue, queued, !from_schedule); 1977 hctx->queue->mq_ops->commit_rqs(hctx); 1978 } 1979 } 1980 1981 /* 1982 * Returns true if we did some work AND can potentially do more. 1983 */ 1984 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, 1985 unsigned int nr_budgets) 1986 { 1987 enum prep_dispatch prep; 1988 struct request_queue *q = hctx->queue; 1989 struct request *rq; 1990 int queued; 1991 blk_status_t ret = BLK_STS_OK; 1992 LIST_HEAD(zone_list); 1993 bool needs_resource = false; 1994 1995 if (list_empty(list)) 1996 return false; 1997 1998 /* 1999 * Now process all the entries, sending them to the driver. 2000 */ 2001 queued = 0; 2002 do { 2003 struct blk_mq_queue_data bd; 2004 2005 rq = list_first_entry(list, struct request, queuelist); 2006 2007 WARN_ON_ONCE(hctx != rq->mq_hctx); 2008 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets); 2009 if (prep != PREP_DISPATCH_OK) 2010 break; 2011 2012 list_del_init(&rq->queuelist); 2013 2014 bd.rq = rq; 2015 bd.last = list_empty(list); 2016 2017 /* 2018 * once the request is queued to lld, no need to cover the 2019 * budget any more 2020 */ 2021 if (nr_budgets) 2022 nr_budgets--; 2023 ret = q->mq_ops->queue_rq(hctx, &bd); 2024 switch (ret) { 2025 case BLK_STS_OK: 2026 queued++; 2027 break; 2028 case BLK_STS_RESOURCE: 2029 needs_resource = true; 2030 fallthrough; 2031 case BLK_STS_DEV_RESOURCE: 2032 blk_mq_handle_dev_resource(rq, list); 2033 goto out; 2034 case BLK_STS_ZONE_RESOURCE: 2035 /* 2036 * Move the request to zone_list and keep going through 2037 * the dispatch list to find more requests the drive can 2038 * accept. 2039 */ 2040 blk_mq_handle_zone_resource(rq, &zone_list); 2041 needs_resource = true; 2042 break; 2043 default: 2044 blk_mq_end_request(rq, ret); 2045 } 2046 } while (!list_empty(list)); 2047 out: 2048 if (!list_empty(&zone_list)) 2049 list_splice_tail_init(&zone_list, list); 2050 2051 /* If we didn't flush the entire list, we could have told the driver 2052 * there was more coming, but that turned out to be a lie. 2053 */ 2054 if (!list_empty(list) || ret != BLK_STS_OK) 2055 blk_mq_commit_rqs(hctx, queued, false); 2056 2057 /* 2058 * Any items that need requeuing? Stuff them into hctx->dispatch, 2059 * that is where we will continue on next queue run. 2060 */ 2061 if (!list_empty(list)) { 2062 bool needs_restart; 2063 /* For non-shared tags, the RESTART check will suffice */ 2064 bool no_tag = prep == PREP_DISPATCH_NO_TAG && 2065 ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) || 2066 blk_mq_is_shared_tags(hctx->flags)); 2067 2068 if (nr_budgets) 2069 blk_mq_release_budgets(q, list); 2070 2071 spin_lock(&hctx->lock); 2072 list_splice_tail_init(list, &hctx->dispatch); 2073 spin_unlock(&hctx->lock); 2074 2075 /* 2076 * Order adding requests to hctx->dispatch and checking 2077 * SCHED_RESTART flag. The pair of this smp_mb() is the one 2078 * in blk_mq_sched_restart(). Avoid restart code path to 2079 * miss the new added requests to hctx->dispatch, meantime 2080 * SCHED_RESTART is observed here. 2081 */ 2082 smp_mb(); 2083 2084 /* 2085 * If SCHED_RESTART was set by the caller of this function and 2086 * it is no longer set that means that it was cleared by another 2087 * thread and hence that a queue rerun is needed. 2088 * 2089 * If 'no_tag' is set, that means that we failed getting 2090 * a driver tag with an I/O scheduler attached. If our dispatch 2091 * waitqueue is no longer active, ensure that we run the queue 2092 * AFTER adding our entries back to the list. 2093 * 2094 * If no I/O scheduler has been configured it is possible that 2095 * the hardware queue got stopped and restarted before requests 2096 * were pushed back onto the dispatch list. Rerun the queue to 2097 * avoid starvation. Notes: 2098 * - blk_mq_run_hw_queue() checks whether or not a queue has 2099 * been stopped before rerunning a queue. 2100 * - Some but not all block drivers stop a queue before 2101 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq 2102 * and dm-rq. 2103 * 2104 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART 2105 * bit is set, run queue after a delay to avoid IO stalls 2106 * that could otherwise occur if the queue is idle. We'll do 2107 * similar if we couldn't get budget or couldn't lock a zone 2108 * and SCHED_RESTART is set. 2109 */ 2110 needs_restart = blk_mq_sched_needs_restart(hctx); 2111 if (prep == PREP_DISPATCH_NO_BUDGET) 2112 needs_resource = true; 2113 if (!needs_restart || 2114 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) 2115 blk_mq_run_hw_queue(hctx, true); 2116 else if (needs_resource) 2117 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); 2118 2119 blk_mq_update_dispatch_busy(hctx, true); 2120 return false; 2121 } 2122 2123 blk_mq_update_dispatch_busy(hctx, false); 2124 return true; 2125 } 2126 2127 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) 2128 { 2129 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask); 2130 2131 if (cpu >= nr_cpu_ids) 2132 cpu = cpumask_first(hctx->cpumask); 2133 return cpu; 2134 } 2135 2136 /* 2137 * It'd be great if the workqueue API had a way to pass 2138 * in a mask and had some smarts for more clever placement. 2139 * For now we just round-robin here, switching for every 2140 * BLK_MQ_CPU_WORK_BATCH queued items. 2141 */ 2142 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) 2143 { 2144 bool tried = false; 2145 int next_cpu = hctx->next_cpu; 2146 2147 if (hctx->queue->nr_hw_queues == 1) 2148 return WORK_CPU_UNBOUND; 2149 2150 if (--hctx->next_cpu_batch <= 0) { 2151 select_cpu: 2152 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, 2153 cpu_online_mask); 2154 if (next_cpu >= nr_cpu_ids) 2155 next_cpu = blk_mq_first_mapped_cpu(hctx); 2156 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 2157 } 2158 2159 /* 2160 * Do unbound schedule if we can't find a online CPU for this hctx, 2161 * and it should only happen in the path of handling CPU DEAD. 2162 */ 2163 if (!cpu_online(next_cpu)) { 2164 if (!tried) { 2165 tried = true; 2166 goto select_cpu; 2167 } 2168 2169 /* 2170 * Make sure to re-select CPU next time once after CPUs 2171 * in hctx->cpumask become online again. 2172 */ 2173 hctx->next_cpu = next_cpu; 2174 hctx->next_cpu_batch = 1; 2175 return WORK_CPU_UNBOUND; 2176 } 2177 2178 hctx->next_cpu = next_cpu; 2179 return next_cpu; 2180 } 2181 2182 /** 2183 * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously. 2184 * @hctx: Pointer to the hardware queue to run. 2185 * @msecs: Milliseconds of delay to wait before running the queue. 2186 * 2187 * Run a hardware queue asynchronously with a delay of @msecs. 2188 */ 2189 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) 2190 { 2191 if (unlikely(blk_mq_hctx_stopped(hctx))) 2192 return; 2193 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, 2194 msecs_to_jiffies(msecs)); 2195 } 2196 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue); 2197 2198 /** 2199 * blk_mq_run_hw_queue - Start to run a hardware queue. 2200 * @hctx: Pointer to the hardware queue to run. 2201 * @async: If we want to run the queue asynchronously. 2202 * 2203 * Check if the request queue is not in a quiesced state and if there are 2204 * pending requests to be sent. If this is true, run the queue to send requests 2205 * to hardware. 2206 */ 2207 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 2208 { 2209 bool need_run; 2210 2211 /* 2212 * We can't run the queue inline with interrupts disabled. 2213 */ 2214 WARN_ON_ONCE(!async && in_interrupt()); 2215 2216 /* 2217 * When queue is quiesced, we may be switching io scheduler, or 2218 * updating nr_hw_queues, or other things, and we can't run queue 2219 * any more, even __blk_mq_hctx_has_pending() can't be called safely. 2220 * 2221 * And queue will be rerun in blk_mq_unquiesce_queue() if it is 2222 * quiesced. 2223 */ 2224 __blk_mq_run_dispatch_ops(hctx->queue, false, 2225 need_run = !blk_queue_quiesced(hctx->queue) && 2226 blk_mq_hctx_has_pending(hctx)); 2227 2228 if (!need_run) 2229 return; 2230 2231 if (async || (hctx->flags & BLK_MQ_F_BLOCKING) || 2232 !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) { 2233 blk_mq_delay_run_hw_queue(hctx, 0); 2234 return; 2235 } 2236 2237 blk_mq_run_dispatch_ops(hctx->queue, 2238 blk_mq_sched_dispatch_requests(hctx)); 2239 } 2240 EXPORT_SYMBOL(blk_mq_run_hw_queue); 2241 2242 /* 2243 * Return prefered queue to dispatch from (if any) for non-mq aware IO 2244 * scheduler. 2245 */ 2246 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q) 2247 { 2248 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); 2249 /* 2250 * If the IO scheduler does not respect hardware queues when 2251 * dispatching, we just don't bother with multiple HW queues and 2252 * dispatch from hctx for the current CPU since running multiple queues 2253 * just causes lock contention inside the scheduler and pointless cache 2254 * bouncing. 2255 */ 2256 struct blk_mq_hw_ctx *hctx = ctx->hctxs[HCTX_TYPE_DEFAULT]; 2257 2258 if (!blk_mq_hctx_stopped(hctx)) 2259 return hctx; 2260 return NULL; 2261 } 2262 2263 /** 2264 * blk_mq_run_hw_queues - Run all hardware queues in a request queue. 2265 * @q: Pointer to the request queue to run. 2266 * @async: If we want to run the queue asynchronously. 2267 */ 2268 void blk_mq_run_hw_queues(struct request_queue *q, bool async) 2269 { 2270 struct blk_mq_hw_ctx *hctx, *sq_hctx; 2271 unsigned long i; 2272 2273 sq_hctx = NULL; 2274 if (blk_queue_sq_sched(q)) 2275 sq_hctx = blk_mq_get_sq_hctx(q); 2276 queue_for_each_hw_ctx(q, hctx, i) { 2277 if (blk_mq_hctx_stopped(hctx)) 2278 continue; 2279 /* 2280 * Dispatch from this hctx either if there's no hctx preferred 2281 * by IO scheduler or if it has requests that bypass the 2282 * scheduler. 2283 */ 2284 if (!sq_hctx || sq_hctx == hctx || 2285 !list_empty_careful(&hctx->dispatch)) 2286 blk_mq_run_hw_queue(hctx, async); 2287 } 2288 } 2289 EXPORT_SYMBOL(blk_mq_run_hw_queues); 2290 2291 /** 2292 * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously. 2293 * @q: Pointer to the request queue to run. 2294 * @msecs: Milliseconds of delay to wait before running the queues. 2295 */ 2296 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs) 2297 { 2298 struct blk_mq_hw_ctx *hctx, *sq_hctx; 2299 unsigned long i; 2300 2301 sq_hctx = NULL; 2302 if (blk_queue_sq_sched(q)) 2303 sq_hctx = blk_mq_get_sq_hctx(q); 2304 queue_for_each_hw_ctx(q, hctx, i) { 2305 if (blk_mq_hctx_stopped(hctx)) 2306 continue; 2307 /* 2308 * If there is already a run_work pending, leave the 2309 * pending delay untouched. Otherwise, a hctx can stall 2310 * if another hctx is re-delaying the other's work 2311 * before the work executes. 2312 */ 2313 if (delayed_work_pending(&hctx->run_work)) 2314 continue; 2315 /* 2316 * Dispatch from this hctx either if there's no hctx preferred 2317 * by IO scheduler or if it has requests that bypass the 2318 * scheduler. 2319 */ 2320 if (!sq_hctx || sq_hctx == hctx || 2321 !list_empty_careful(&hctx->dispatch)) 2322 blk_mq_delay_run_hw_queue(hctx, msecs); 2323 } 2324 } 2325 EXPORT_SYMBOL(blk_mq_delay_run_hw_queues); 2326 2327 /* 2328 * This function is often used for pausing .queue_rq() by driver when 2329 * there isn't enough resource or some conditions aren't satisfied, and 2330 * BLK_STS_RESOURCE is usually returned. 2331 * 2332 * We do not guarantee that dispatch can be drained or blocked 2333 * after blk_mq_stop_hw_queue() returns. Please use 2334 * blk_mq_quiesce_queue() for that requirement. 2335 */ 2336 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) 2337 { 2338 cancel_delayed_work(&hctx->run_work); 2339 2340 set_bit(BLK_MQ_S_STOPPED, &hctx->state); 2341 } 2342 EXPORT_SYMBOL(blk_mq_stop_hw_queue); 2343 2344 /* 2345 * This function is often used for pausing .queue_rq() by driver when 2346 * there isn't enough resource or some conditions aren't satisfied, and 2347 * BLK_STS_RESOURCE is usually returned. 2348 * 2349 * We do not guarantee that dispatch can be drained or blocked 2350 * after blk_mq_stop_hw_queues() returns. Please use 2351 * blk_mq_quiesce_queue() for that requirement. 2352 */ 2353 void blk_mq_stop_hw_queues(struct request_queue *q) 2354 { 2355 struct blk_mq_hw_ctx *hctx; 2356 unsigned long i; 2357 2358 queue_for_each_hw_ctx(q, hctx, i) 2359 blk_mq_stop_hw_queue(hctx); 2360 } 2361 EXPORT_SYMBOL(blk_mq_stop_hw_queues); 2362 2363 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) 2364 { 2365 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 2366 2367 blk_mq_run_hw_queue(hctx, false); 2368 } 2369 EXPORT_SYMBOL(blk_mq_start_hw_queue); 2370 2371 void blk_mq_start_hw_queues(struct request_queue *q) 2372 { 2373 struct blk_mq_hw_ctx *hctx; 2374 unsigned long i; 2375 2376 queue_for_each_hw_ctx(q, hctx, i) 2377 blk_mq_start_hw_queue(hctx); 2378 } 2379 EXPORT_SYMBOL(blk_mq_start_hw_queues); 2380 2381 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 2382 { 2383 if (!blk_mq_hctx_stopped(hctx)) 2384 return; 2385 2386 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 2387 blk_mq_run_hw_queue(hctx, async); 2388 } 2389 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue); 2390 2391 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) 2392 { 2393 struct blk_mq_hw_ctx *hctx; 2394 unsigned long i; 2395 2396 queue_for_each_hw_ctx(q, hctx, i) 2397 blk_mq_start_stopped_hw_queue(hctx, async); 2398 } 2399 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); 2400 2401 static void blk_mq_run_work_fn(struct work_struct *work) 2402 { 2403 struct blk_mq_hw_ctx *hctx = 2404 container_of(work, struct blk_mq_hw_ctx, run_work.work); 2405 2406 blk_mq_run_dispatch_ops(hctx->queue, 2407 blk_mq_sched_dispatch_requests(hctx)); 2408 } 2409 2410 /** 2411 * blk_mq_request_bypass_insert - Insert a request at dispatch list. 2412 * @rq: Pointer to request to be inserted. 2413 * @flags: BLK_MQ_INSERT_* 2414 * 2415 * Should only be used carefully, when the caller knows we want to 2416 * bypass a potential IO scheduler on the target device. 2417 */ 2418 static void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags) 2419 { 2420 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 2421 2422 spin_lock(&hctx->lock); 2423 if (flags & BLK_MQ_INSERT_AT_HEAD) 2424 list_add(&rq->queuelist, &hctx->dispatch); 2425 else 2426 list_add_tail(&rq->queuelist, &hctx->dispatch); 2427 spin_unlock(&hctx->lock); 2428 } 2429 2430 static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, 2431 struct blk_mq_ctx *ctx, struct list_head *list, 2432 bool run_queue_async) 2433 { 2434 struct request *rq; 2435 enum hctx_type type = hctx->type; 2436 2437 /* 2438 * Try to issue requests directly if the hw queue isn't busy to save an 2439 * extra enqueue & dequeue to the sw queue. 2440 */ 2441 if (!hctx->dispatch_busy && !run_queue_async) { 2442 blk_mq_run_dispatch_ops(hctx->queue, 2443 blk_mq_try_issue_list_directly(hctx, list)); 2444 if (list_empty(list)) 2445 goto out; 2446 } 2447 2448 /* 2449 * preemption doesn't flush plug list, so it's possible ctx->cpu is 2450 * offline now 2451 */ 2452 list_for_each_entry(rq, list, queuelist) { 2453 BUG_ON(rq->mq_ctx != ctx); 2454 trace_block_rq_insert(rq); 2455 } 2456 2457 spin_lock(&ctx->lock); 2458 list_splice_tail_init(list, &ctx->rq_lists[type]); 2459 blk_mq_hctx_mark_pending(hctx, ctx); 2460 spin_unlock(&ctx->lock); 2461 out: 2462 blk_mq_run_hw_queue(hctx, run_queue_async); 2463 } 2464 2465 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags) 2466 { 2467 struct request_queue *q = rq->q; 2468 struct blk_mq_ctx *ctx = rq->mq_ctx; 2469 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 2470 2471 if (blk_rq_is_passthrough(rq)) { 2472 /* 2473 * Passthrough request have to be added to hctx->dispatch 2474 * directly. The device may be in a situation where it can't 2475 * handle FS request, and always returns BLK_STS_RESOURCE for 2476 * them, which gets them added to hctx->dispatch. 2477 * 2478 * If a passthrough request is required to unblock the queues, 2479 * and it is added to the scheduler queue, there is no chance to 2480 * dispatch it given we prioritize requests in hctx->dispatch. 2481 */ 2482 blk_mq_request_bypass_insert(rq, flags); 2483 } else if (req_op(rq) == REQ_OP_FLUSH) { 2484 /* 2485 * Firstly normal IO request is inserted to scheduler queue or 2486 * sw queue, meantime we add flush request to dispatch queue( 2487 * hctx->dispatch) directly and there is at most one in-flight 2488 * flush request for each hw queue, so it doesn't matter to add 2489 * flush request to tail or front of the dispatch queue. 2490 * 2491 * Secondly in case of NCQ, flush request belongs to non-NCQ 2492 * command, and queueing it will fail when there is any 2493 * in-flight normal IO request(NCQ command). When adding flush 2494 * rq to the front of hctx->dispatch, it is easier to introduce 2495 * extra time to flush rq's latency because of S_SCHED_RESTART 2496 * compared with adding to the tail of dispatch queue, then 2497 * chance of flush merge is increased, and less flush requests 2498 * will be issued to controller. It is observed that ~10% time 2499 * is saved in blktests block/004 on disk attached to AHCI/NCQ 2500 * drive when adding flush rq to the front of hctx->dispatch. 2501 * 2502 * Simply queue flush rq to the front of hctx->dispatch so that 2503 * intensive flush workloads can benefit in case of NCQ HW. 2504 */ 2505 blk_mq_request_bypass_insert(rq, BLK_MQ_INSERT_AT_HEAD); 2506 } else if (q->elevator) { 2507 LIST_HEAD(list); 2508 2509 WARN_ON_ONCE(rq->tag != BLK_MQ_NO_TAG); 2510 2511 list_add(&rq->queuelist, &list); 2512 q->elevator->type->ops.insert_requests(hctx, &list, flags); 2513 } else { 2514 trace_block_rq_insert(rq); 2515 2516 spin_lock(&ctx->lock); 2517 if (flags & BLK_MQ_INSERT_AT_HEAD) 2518 list_add(&rq->queuelist, &ctx->rq_lists[hctx->type]); 2519 else 2520 list_add_tail(&rq->queuelist, 2521 &ctx->rq_lists[hctx->type]); 2522 blk_mq_hctx_mark_pending(hctx, ctx); 2523 spin_unlock(&ctx->lock); 2524 } 2525 } 2526 2527 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, 2528 unsigned int nr_segs) 2529 { 2530 int err; 2531 2532 if (bio->bi_opf & REQ_RAHEAD) 2533 rq->cmd_flags |= REQ_FAILFAST_MASK; 2534 2535 rq->__sector = bio->bi_iter.bi_sector; 2536 blk_rq_bio_prep(rq, bio, nr_segs); 2537 2538 /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */ 2539 err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO); 2540 WARN_ON_ONCE(err); 2541 2542 blk_account_io_start(rq); 2543 } 2544 2545 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, 2546 struct request *rq, bool last) 2547 { 2548 struct request_queue *q = rq->q; 2549 struct blk_mq_queue_data bd = { 2550 .rq = rq, 2551 .last = last, 2552 }; 2553 blk_status_t ret; 2554 2555 /* 2556 * For OK queue, we are done. For error, caller may kill it. 2557 * Any other error (busy), just add it to our list as we 2558 * previously would have done. 2559 */ 2560 ret = q->mq_ops->queue_rq(hctx, &bd); 2561 switch (ret) { 2562 case BLK_STS_OK: 2563 blk_mq_update_dispatch_busy(hctx, false); 2564 break; 2565 case BLK_STS_RESOURCE: 2566 case BLK_STS_DEV_RESOURCE: 2567 blk_mq_update_dispatch_busy(hctx, true); 2568 __blk_mq_requeue_request(rq); 2569 break; 2570 default: 2571 blk_mq_update_dispatch_busy(hctx, false); 2572 break; 2573 } 2574 2575 return ret; 2576 } 2577 2578 static bool blk_mq_get_budget_and_tag(struct request *rq) 2579 { 2580 int budget_token; 2581 2582 budget_token = blk_mq_get_dispatch_budget(rq->q); 2583 if (budget_token < 0) 2584 return false; 2585 blk_mq_set_rq_budget_token(rq, budget_token); 2586 if (!blk_mq_get_driver_tag(rq)) { 2587 blk_mq_put_dispatch_budget(rq->q, budget_token); 2588 return false; 2589 } 2590 return true; 2591 } 2592 2593 /** 2594 * blk_mq_try_issue_directly - Try to send a request directly to device driver. 2595 * @hctx: Pointer of the associated hardware queue. 2596 * @rq: Pointer to request to be sent. 2597 * 2598 * If the device has enough resources to accept a new request now, send the 2599 * request directly to device driver. Else, insert at hctx->dispatch queue, so 2600 * we can try send it another time in the future. Requests inserted at this 2601 * queue have higher priority. 2602 */ 2603 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 2604 struct request *rq) 2605 { 2606 blk_status_t ret; 2607 2608 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) { 2609 blk_mq_insert_request(rq, 0); 2610 return; 2611 } 2612 2613 if ((rq->rq_flags & RQF_USE_SCHED) || !blk_mq_get_budget_and_tag(rq)) { 2614 blk_mq_insert_request(rq, 0); 2615 blk_mq_run_hw_queue(hctx, false); 2616 return; 2617 } 2618 2619 ret = __blk_mq_issue_directly(hctx, rq, true); 2620 switch (ret) { 2621 case BLK_STS_OK: 2622 break; 2623 case BLK_STS_RESOURCE: 2624 case BLK_STS_DEV_RESOURCE: 2625 blk_mq_request_bypass_insert(rq, 0); 2626 blk_mq_run_hw_queue(hctx, false); 2627 break; 2628 default: 2629 blk_mq_end_request(rq, ret); 2630 break; 2631 } 2632 } 2633 2634 static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) 2635 { 2636 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 2637 2638 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) { 2639 blk_mq_insert_request(rq, 0); 2640 return BLK_STS_OK; 2641 } 2642 2643 if (!blk_mq_get_budget_and_tag(rq)) 2644 return BLK_STS_RESOURCE; 2645 return __blk_mq_issue_directly(hctx, rq, last); 2646 } 2647 2648 static void blk_mq_plug_issue_direct(struct blk_plug *plug) 2649 { 2650 struct blk_mq_hw_ctx *hctx = NULL; 2651 struct request *rq; 2652 int queued = 0; 2653 blk_status_t ret = BLK_STS_OK; 2654 2655 while ((rq = rq_list_pop(&plug->mq_list))) { 2656 bool last = rq_list_empty(plug->mq_list); 2657 2658 if (hctx != rq->mq_hctx) { 2659 if (hctx) { 2660 blk_mq_commit_rqs(hctx, queued, false); 2661 queued = 0; 2662 } 2663 hctx = rq->mq_hctx; 2664 } 2665 2666 ret = blk_mq_request_issue_directly(rq, last); 2667 switch (ret) { 2668 case BLK_STS_OK: 2669 queued++; 2670 break; 2671 case BLK_STS_RESOURCE: 2672 case BLK_STS_DEV_RESOURCE: 2673 blk_mq_request_bypass_insert(rq, 0); 2674 blk_mq_run_hw_queue(hctx, false); 2675 goto out; 2676 default: 2677 blk_mq_end_request(rq, ret); 2678 break; 2679 } 2680 } 2681 2682 out: 2683 if (ret != BLK_STS_OK) 2684 blk_mq_commit_rqs(hctx, queued, false); 2685 } 2686 2687 static void __blk_mq_flush_plug_list(struct request_queue *q, 2688 struct blk_plug *plug) 2689 { 2690 if (blk_queue_quiesced(q)) 2691 return; 2692 q->mq_ops->queue_rqs(&plug->mq_list); 2693 } 2694 2695 static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) 2696 { 2697 struct blk_mq_hw_ctx *this_hctx = NULL; 2698 struct blk_mq_ctx *this_ctx = NULL; 2699 struct request *requeue_list = NULL; 2700 struct request **requeue_lastp = &requeue_list; 2701 unsigned int depth = 0; 2702 bool is_passthrough = false; 2703 LIST_HEAD(list); 2704 2705 do { 2706 struct request *rq = rq_list_pop(&plug->mq_list); 2707 2708 if (!this_hctx) { 2709 this_hctx = rq->mq_hctx; 2710 this_ctx = rq->mq_ctx; 2711 is_passthrough = blk_rq_is_passthrough(rq); 2712 } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx || 2713 is_passthrough != blk_rq_is_passthrough(rq)) { 2714 rq_list_add_tail(&requeue_lastp, rq); 2715 continue; 2716 } 2717 list_add(&rq->queuelist, &list); 2718 depth++; 2719 } while (!rq_list_empty(plug->mq_list)); 2720 2721 plug->mq_list = requeue_list; 2722 trace_block_unplug(this_hctx->queue, depth, !from_sched); 2723 2724 percpu_ref_get(&this_hctx->queue->q_usage_counter); 2725 /* passthrough requests should never be issued to the I/O scheduler */ 2726 if (is_passthrough) { 2727 spin_lock(&this_hctx->lock); 2728 list_splice_tail_init(&list, &this_hctx->dispatch); 2729 spin_unlock(&this_hctx->lock); 2730 blk_mq_run_hw_queue(this_hctx, from_sched); 2731 } else if (this_hctx->queue->elevator) { 2732 this_hctx->queue->elevator->type->ops.insert_requests(this_hctx, 2733 &list, 0); 2734 blk_mq_run_hw_queue(this_hctx, from_sched); 2735 } else { 2736 blk_mq_insert_requests(this_hctx, this_ctx, &list, from_sched); 2737 } 2738 percpu_ref_put(&this_hctx->queue->q_usage_counter); 2739 } 2740 2741 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) 2742 { 2743 struct request *rq; 2744 2745 if (rq_list_empty(plug->mq_list)) 2746 return; 2747 plug->rq_count = 0; 2748 2749 if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) { 2750 struct request_queue *q; 2751 2752 rq = rq_list_peek(&plug->mq_list); 2753 q = rq->q; 2754 2755 /* 2756 * Peek first request and see if we have a ->queue_rqs() hook. 2757 * If we do, we can dispatch the whole plug list in one go. We 2758 * already know at this point that all requests belong to the 2759 * same queue, caller must ensure that's the case. 2760 * 2761 * Since we pass off the full list to the driver at this point, 2762 * we do not increment the active request count for the queue. 2763 * Bypass shared tags for now because of that. 2764 */ 2765 if (q->mq_ops->queue_rqs && 2766 !(rq->mq_hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { 2767 blk_mq_run_dispatch_ops(q, 2768 __blk_mq_flush_plug_list(q, plug)); 2769 if (rq_list_empty(plug->mq_list)) 2770 return; 2771 } 2772 2773 blk_mq_run_dispatch_ops(q, 2774 blk_mq_plug_issue_direct(plug)); 2775 if (rq_list_empty(plug->mq_list)) 2776 return; 2777 } 2778 2779 do { 2780 blk_mq_dispatch_plug_list(plug, from_schedule); 2781 } while (!rq_list_empty(plug->mq_list)); 2782 } 2783 2784 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 2785 struct list_head *list) 2786 { 2787 int queued = 0; 2788 blk_status_t ret = BLK_STS_OK; 2789 2790 while (!list_empty(list)) { 2791 struct request *rq = list_first_entry(list, struct request, 2792 queuelist); 2793 2794 list_del_init(&rq->queuelist); 2795 ret = blk_mq_request_issue_directly(rq, list_empty(list)); 2796 switch (ret) { 2797 case BLK_STS_OK: 2798 queued++; 2799 break; 2800 case BLK_STS_RESOURCE: 2801 case BLK_STS_DEV_RESOURCE: 2802 blk_mq_request_bypass_insert(rq, 0); 2803 if (list_empty(list)) 2804 blk_mq_run_hw_queue(hctx, false); 2805 goto out; 2806 default: 2807 blk_mq_end_request(rq, ret); 2808 break; 2809 } 2810 } 2811 2812 out: 2813 if (ret != BLK_STS_OK) 2814 blk_mq_commit_rqs(hctx, queued, false); 2815 } 2816 2817 static bool blk_mq_attempt_bio_merge(struct request_queue *q, 2818 struct bio *bio, unsigned int nr_segs) 2819 { 2820 if (!blk_queue_nomerges(q) && bio_mergeable(bio)) { 2821 if (blk_attempt_plug_merge(q, bio, nr_segs)) 2822 return true; 2823 if (blk_mq_sched_bio_merge(q, bio, nr_segs)) 2824 return true; 2825 } 2826 return false; 2827 } 2828 2829 static struct request *blk_mq_get_new_requests(struct request_queue *q, 2830 struct blk_plug *plug, 2831 struct bio *bio, 2832 unsigned int nsegs) 2833 { 2834 struct blk_mq_alloc_data data = { 2835 .q = q, 2836 .nr_tags = 1, 2837 .cmd_flags = bio->bi_opf, 2838 }; 2839 struct request *rq; 2840 2841 if (unlikely(bio_queue_enter(bio))) 2842 return NULL; 2843 2844 if (blk_mq_attempt_bio_merge(q, bio, nsegs)) 2845 goto queue_exit; 2846 2847 rq_qos_throttle(q, bio); 2848 2849 if (plug) { 2850 data.nr_tags = plug->nr_ios; 2851 plug->nr_ios = 1; 2852 data.cached_rq = &plug->cached_rq; 2853 } 2854 2855 rq = __blk_mq_alloc_requests(&data); 2856 if (rq) 2857 return rq; 2858 rq_qos_cleanup(q, bio); 2859 if (bio->bi_opf & REQ_NOWAIT) 2860 bio_wouldblock_error(bio); 2861 queue_exit: 2862 blk_queue_exit(q); 2863 return NULL; 2864 } 2865 2866 static inline struct request *blk_mq_get_cached_request(struct request_queue *q, 2867 struct blk_plug *plug, struct bio **bio, unsigned int nsegs) 2868 { 2869 struct request *rq; 2870 enum hctx_type type, hctx_type; 2871 2872 if (!plug) 2873 return NULL; 2874 rq = rq_list_peek(&plug->cached_rq); 2875 if (!rq || rq->q != q) 2876 return NULL; 2877 2878 if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) { 2879 *bio = NULL; 2880 return NULL; 2881 } 2882 2883 type = blk_mq_get_hctx_type((*bio)->bi_opf); 2884 hctx_type = rq->mq_hctx->type; 2885 if (type != hctx_type && 2886 !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT)) 2887 return NULL; 2888 if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf)) 2889 return NULL; 2890 2891 /* 2892 * If any qos ->throttle() end up blocking, we will have flushed the 2893 * plug and hence killed the cached_rq list as well. Pop this entry 2894 * before we throttle. 2895 */ 2896 plug->cached_rq = rq_list_next(rq); 2897 rq_qos_throttle(q, *bio); 2898 2899 rq->cmd_flags = (*bio)->bi_opf; 2900 INIT_LIST_HEAD(&rq->queuelist); 2901 return rq; 2902 } 2903 2904 static void bio_set_ioprio(struct bio *bio) 2905 { 2906 /* Nobody set ioprio so far? Initialize it based on task's nice value */ 2907 if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE) 2908 bio->bi_ioprio = get_current_ioprio(); 2909 blkcg_set_ioprio(bio); 2910 } 2911 2912 /** 2913 * blk_mq_submit_bio - Create and send a request to block device. 2914 * @bio: Bio pointer. 2915 * 2916 * Builds up a request structure from @q and @bio and send to the device. The 2917 * request may not be queued directly to hardware if: 2918 * * This request can be merged with another one 2919 * * We want to place request at plug queue for possible future merging 2920 * * There is an IO scheduler active at this queue 2921 * 2922 * It will not queue the request if there is an error with the bio, or at the 2923 * request creation. 2924 */ 2925 void blk_mq_submit_bio(struct bio *bio) 2926 { 2927 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 2928 struct blk_plug *plug = blk_mq_plug(bio); 2929 const int is_sync = op_is_sync(bio->bi_opf); 2930 struct blk_mq_hw_ctx *hctx; 2931 struct request *rq; 2932 unsigned int nr_segs = 1; 2933 blk_status_t ret; 2934 2935 bio = blk_queue_bounce(bio, q); 2936 if (bio_may_exceed_limits(bio, &q->limits)) { 2937 bio = __bio_split_to_limits(bio, &q->limits, &nr_segs); 2938 if (!bio) 2939 return; 2940 } 2941 2942 if (!bio_integrity_prep(bio)) 2943 return; 2944 2945 bio_set_ioprio(bio); 2946 2947 rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs); 2948 if (!rq) { 2949 if (!bio) 2950 return; 2951 rq = blk_mq_get_new_requests(q, plug, bio, nr_segs); 2952 if (unlikely(!rq)) 2953 return; 2954 } 2955 2956 trace_block_getrq(bio); 2957 2958 rq_qos_track(q, rq, bio); 2959 2960 blk_mq_bio_to_request(rq, bio, nr_segs); 2961 2962 ret = blk_crypto_rq_get_keyslot(rq); 2963 if (ret != BLK_STS_OK) { 2964 bio->bi_status = ret; 2965 bio_endio(bio); 2966 blk_mq_free_request(rq); 2967 return; 2968 } 2969 2970 if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq)) 2971 return; 2972 2973 if (plug) { 2974 blk_add_rq_to_plug(plug, rq); 2975 return; 2976 } 2977 2978 hctx = rq->mq_hctx; 2979 if ((rq->rq_flags & RQF_USE_SCHED) || 2980 (hctx->dispatch_busy && (q->nr_hw_queues == 1 || !is_sync))) { 2981 blk_mq_insert_request(rq, 0); 2982 blk_mq_run_hw_queue(hctx, true); 2983 } else { 2984 blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq)); 2985 } 2986 } 2987 2988 #ifdef CONFIG_BLK_MQ_STACKING 2989 /** 2990 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 2991 * @rq: the request being queued 2992 */ 2993 blk_status_t blk_insert_cloned_request(struct request *rq) 2994 { 2995 struct request_queue *q = rq->q; 2996 unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq)); 2997 unsigned int max_segments = blk_rq_get_max_segments(rq); 2998 blk_status_t ret; 2999 3000 if (blk_rq_sectors(rq) > max_sectors) { 3001 /* 3002 * SCSI device does not have a good way to return if 3003 * Write Same/Zero is actually supported. If a device rejects 3004 * a non-read/write command (discard, write same,etc.) the 3005 * low-level device driver will set the relevant queue limit to 3006 * 0 to prevent blk-lib from issuing more of the offending 3007 * operations. Commands queued prior to the queue limit being 3008 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O 3009 * errors being propagated to upper layers. 3010 */ 3011 if (max_sectors == 0) 3012 return BLK_STS_NOTSUPP; 3013 3014 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n", 3015 __func__, blk_rq_sectors(rq), max_sectors); 3016 return BLK_STS_IOERR; 3017 } 3018 3019 /* 3020 * The queue settings related to segment counting may differ from the 3021 * original queue. 3022 */ 3023 rq->nr_phys_segments = blk_recalc_rq_segments(rq); 3024 if (rq->nr_phys_segments > max_segments) { 3025 printk(KERN_ERR "%s: over max segments limit. (%u > %u)\n", 3026 __func__, rq->nr_phys_segments, max_segments); 3027 return BLK_STS_IOERR; 3028 } 3029 3030 if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq))) 3031 return BLK_STS_IOERR; 3032 3033 ret = blk_crypto_rq_get_keyslot(rq); 3034 if (ret != BLK_STS_OK) 3035 return ret; 3036 3037 blk_account_io_start(rq); 3038 3039 /* 3040 * Since we have a scheduler attached on the top device, 3041 * bypass a potential scheduler on the bottom device for 3042 * insert. 3043 */ 3044 blk_mq_run_dispatch_ops(q, 3045 ret = blk_mq_request_issue_directly(rq, true)); 3046 if (ret) 3047 blk_account_io_done(rq, ktime_get_ns()); 3048 return ret; 3049 } 3050 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 3051 3052 /** 3053 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 3054 * @rq: the clone request to be cleaned up 3055 * 3056 * Description: 3057 * Free all bios in @rq for a cloned request. 3058 */ 3059 void blk_rq_unprep_clone(struct request *rq) 3060 { 3061 struct bio *bio; 3062 3063 while ((bio = rq->bio) != NULL) { 3064 rq->bio = bio->bi_next; 3065 3066 bio_put(bio); 3067 } 3068 } 3069 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 3070 3071 /** 3072 * blk_rq_prep_clone - Helper function to setup clone request 3073 * @rq: the request to be setup 3074 * @rq_src: original request to be cloned 3075 * @bs: bio_set that bios for clone are allocated from 3076 * @gfp_mask: memory allocation mask for bio 3077 * @bio_ctr: setup function to be called for each clone bio. 3078 * Returns %0 for success, non %0 for failure. 3079 * @data: private data to be passed to @bio_ctr 3080 * 3081 * Description: 3082 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 3083 * Also, pages which the original bios are pointing to are not copied 3084 * and the cloned bios just point same pages. 3085 * So cloned bios must be completed before original bios, which means 3086 * the caller must complete @rq before @rq_src. 3087 */ 3088 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 3089 struct bio_set *bs, gfp_t gfp_mask, 3090 int (*bio_ctr)(struct bio *, struct bio *, void *), 3091 void *data) 3092 { 3093 struct bio *bio, *bio_src; 3094 3095 if (!bs) 3096 bs = &fs_bio_set; 3097 3098 __rq_for_each_bio(bio_src, rq_src) { 3099 bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask, 3100 bs); 3101 if (!bio) 3102 goto free_and_out; 3103 3104 if (bio_ctr && bio_ctr(bio, bio_src, data)) 3105 goto free_and_out; 3106 3107 if (rq->bio) { 3108 rq->biotail->bi_next = bio; 3109 rq->biotail = bio; 3110 } else { 3111 rq->bio = rq->biotail = bio; 3112 } 3113 bio = NULL; 3114 } 3115 3116 /* Copy attributes of the original request to the clone request. */ 3117 rq->__sector = blk_rq_pos(rq_src); 3118 rq->__data_len = blk_rq_bytes(rq_src); 3119 if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) { 3120 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; 3121 rq->special_vec = rq_src->special_vec; 3122 } 3123 rq->nr_phys_segments = rq_src->nr_phys_segments; 3124 rq->ioprio = rq_src->ioprio; 3125 3126 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0) 3127 goto free_and_out; 3128 3129 return 0; 3130 3131 free_and_out: 3132 if (bio) 3133 bio_put(bio); 3134 blk_rq_unprep_clone(rq); 3135 3136 return -ENOMEM; 3137 } 3138 EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 3139 #endif /* CONFIG_BLK_MQ_STACKING */ 3140 3141 /* 3142 * Steal bios from a request and add them to a bio list. 3143 * The request must not have been partially completed before. 3144 */ 3145 void blk_steal_bios(struct bio_list *list, struct request *rq) 3146 { 3147 if (rq->bio) { 3148 if (list->tail) 3149 list->tail->bi_next = rq->bio; 3150 else 3151 list->head = rq->bio; 3152 list->tail = rq->biotail; 3153 3154 rq->bio = NULL; 3155 rq->biotail = NULL; 3156 } 3157 3158 rq->__data_len = 0; 3159 } 3160 EXPORT_SYMBOL_GPL(blk_steal_bios); 3161 3162 static size_t order_to_size(unsigned int order) 3163 { 3164 return (size_t)PAGE_SIZE << order; 3165 } 3166 3167 /* called before freeing request pool in @tags */ 3168 static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags, 3169 struct blk_mq_tags *tags) 3170 { 3171 struct page *page; 3172 unsigned long flags; 3173 3174 /* 3175 * There is no need to clear mapping if driver tags is not initialized 3176 * or the mapping belongs to the driver tags. 3177 */ 3178 if (!drv_tags || drv_tags == tags) 3179 return; 3180 3181 list_for_each_entry(page, &tags->page_list, lru) { 3182 unsigned long start = (unsigned long)page_address(page); 3183 unsigned long end = start + order_to_size(page->private); 3184 int i; 3185 3186 for (i = 0; i < drv_tags->nr_tags; i++) { 3187 struct request *rq = drv_tags->rqs[i]; 3188 unsigned long rq_addr = (unsigned long)rq; 3189 3190 if (rq_addr >= start && rq_addr < end) { 3191 WARN_ON_ONCE(req_ref_read(rq) != 0); 3192 cmpxchg(&drv_tags->rqs[i], rq, NULL); 3193 } 3194 } 3195 } 3196 3197 /* 3198 * Wait until all pending iteration is done. 3199 * 3200 * Request reference is cleared and it is guaranteed to be observed 3201 * after the ->lock is released. 3202 */ 3203 spin_lock_irqsave(&drv_tags->lock, flags); 3204 spin_unlock_irqrestore(&drv_tags->lock, flags); 3205 } 3206 3207 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 3208 unsigned int hctx_idx) 3209 { 3210 struct blk_mq_tags *drv_tags; 3211 struct page *page; 3212 3213 if (list_empty(&tags->page_list)) 3214 return; 3215 3216 if (blk_mq_is_shared_tags(set->flags)) 3217 drv_tags = set->shared_tags; 3218 else 3219 drv_tags = set->tags[hctx_idx]; 3220 3221 if (tags->static_rqs && set->ops->exit_request) { 3222 int i; 3223 3224 for (i = 0; i < tags->nr_tags; i++) { 3225 struct request *rq = tags->static_rqs[i]; 3226 3227 if (!rq) 3228 continue; 3229 set->ops->exit_request(set, rq, hctx_idx); 3230 tags->static_rqs[i] = NULL; 3231 } 3232 } 3233 3234 blk_mq_clear_rq_mapping(drv_tags, tags); 3235 3236 while (!list_empty(&tags->page_list)) { 3237 page = list_first_entry(&tags->page_list, struct page, lru); 3238 list_del_init(&page->lru); 3239 /* 3240 * Remove kmemleak object previously allocated in 3241 * blk_mq_alloc_rqs(). 3242 */ 3243 kmemleak_free(page_address(page)); 3244 __free_pages(page, page->private); 3245 } 3246 } 3247 3248 void blk_mq_free_rq_map(struct blk_mq_tags *tags) 3249 { 3250 kfree(tags->rqs); 3251 tags->rqs = NULL; 3252 kfree(tags->static_rqs); 3253 tags->static_rqs = NULL; 3254 3255 blk_mq_free_tags(tags); 3256 } 3257 3258 static enum hctx_type hctx_idx_to_type(struct blk_mq_tag_set *set, 3259 unsigned int hctx_idx) 3260 { 3261 int i; 3262 3263 for (i = 0; i < set->nr_maps; i++) { 3264 unsigned int start = set->map[i].queue_offset; 3265 unsigned int end = start + set->map[i].nr_queues; 3266 3267 if (hctx_idx >= start && hctx_idx < end) 3268 break; 3269 } 3270 3271 if (i >= set->nr_maps) 3272 i = HCTX_TYPE_DEFAULT; 3273 3274 return i; 3275 } 3276 3277 static int blk_mq_get_hctx_node(struct blk_mq_tag_set *set, 3278 unsigned int hctx_idx) 3279 { 3280 enum hctx_type type = hctx_idx_to_type(set, hctx_idx); 3281 3282 return blk_mq_hw_queue_to_node(&set->map[type], hctx_idx); 3283 } 3284 3285 static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, 3286 unsigned int hctx_idx, 3287 unsigned int nr_tags, 3288 unsigned int reserved_tags) 3289 { 3290 int node = blk_mq_get_hctx_node(set, hctx_idx); 3291 struct blk_mq_tags *tags; 3292 3293 if (node == NUMA_NO_NODE) 3294 node = set->numa_node; 3295 3296 tags = blk_mq_init_tags(nr_tags, reserved_tags, node, 3297 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); 3298 if (!tags) 3299 return NULL; 3300 3301 tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *), 3302 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 3303 node); 3304 if (!tags->rqs) 3305 goto err_free_tags; 3306 3307 tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *), 3308 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 3309 node); 3310 if (!tags->static_rqs) 3311 goto err_free_rqs; 3312 3313 return tags; 3314 3315 err_free_rqs: 3316 kfree(tags->rqs); 3317 err_free_tags: 3318 blk_mq_free_tags(tags); 3319 return NULL; 3320 } 3321 3322 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, 3323 unsigned int hctx_idx, int node) 3324 { 3325 int ret; 3326 3327 if (set->ops->init_request) { 3328 ret = set->ops->init_request(set, rq, hctx_idx, node); 3329 if (ret) 3330 return ret; 3331 } 3332 3333 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 3334 return 0; 3335 } 3336 3337 static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, 3338 struct blk_mq_tags *tags, 3339 unsigned int hctx_idx, unsigned int depth) 3340 { 3341 unsigned int i, j, entries_per_page, max_order = 4; 3342 int node = blk_mq_get_hctx_node(set, hctx_idx); 3343 size_t rq_size, left; 3344 3345 if (node == NUMA_NO_NODE) 3346 node = set->numa_node; 3347 3348 INIT_LIST_HEAD(&tags->page_list); 3349 3350 /* 3351 * rq_size is the size of the request plus driver payload, rounded 3352 * to the cacheline size 3353 */ 3354 rq_size = round_up(sizeof(struct request) + set->cmd_size, 3355 cache_line_size()); 3356 left = rq_size * depth; 3357 3358 for (i = 0; i < depth; ) { 3359 int this_order = max_order; 3360 struct page *page; 3361 int to_do; 3362 void *p; 3363 3364 while (this_order && left < order_to_size(this_order - 1)) 3365 this_order--; 3366 3367 do { 3368 page = alloc_pages_node(node, 3369 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, 3370 this_order); 3371 if (page) 3372 break; 3373 if (!this_order--) 3374 break; 3375 if (order_to_size(this_order) < rq_size) 3376 break; 3377 } while (1); 3378 3379 if (!page) 3380 goto fail; 3381 3382 page->private = this_order; 3383 list_add_tail(&page->lru, &tags->page_list); 3384 3385 p = page_address(page); 3386 /* 3387 * Allow kmemleak to scan these pages as they contain pointers 3388 * to additional allocations like via ops->init_request(). 3389 */ 3390 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO); 3391 entries_per_page = order_to_size(this_order) / rq_size; 3392 to_do = min(entries_per_page, depth - i); 3393 left -= to_do * rq_size; 3394 for (j = 0; j < to_do; j++) { 3395 struct request *rq = p; 3396 3397 tags->static_rqs[i] = rq; 3398 if (blk_mq_init_request(set, rq, hctx_idx, node)) { 3399 tags->static_rqs[i] = NULL; 3400 goto fail; 3401 } 3402 3403 p += rq_size; 3404 i++; 3405 } 3406 } 3407 return 0; 3408 3409 fail: 3410 blk_mq_free_rqs(set, tags, hctx_idx); 3411 return -ENOMEM; 3412 } 3413 3414 struct rq_iter_data { 3415 struct blk_mq_hw_ctx *hctx; 3416 bool has_rq; 3417 }; 3418 3419 static bool blk_mq_has_request(struct request *rq, void *data) 3420 { 3421 struct rq_iter_data *iter_data = data; 3422 3423 if (rq->mq_hctx != iter_data->hctx) 3424 return true; 3425 iter_data->has_rq = true; 3426 return false; 3427 } 3428 3429 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx) 3430 { 3431 struct blk_mq_tags *tags = hctx->sched_tags ? 3432 hctx->sched_tags : hctx->tags; 3433 struct rq_iter_data data = { 3434 .hctx = hctx, 3435 }; 3436 3437 blk_mq_all_tag_iter(tags, blk_mq_has_request, &data); 3438 return data.has_rq; 3439 } 3440 3441 static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu, 3442 struct blk_mq_hw_ctx *hctx) 3443 { 3444 if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu) 3445 return false; 3446 if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids) 3447 return false; 3448 return true; 3449 } 3450 3451 static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node) 3452 { 3453 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, 3454 struct blk_mq_hw_ctx, cpuhp_online); 3455 3456 if (!cpumask_test_cpu(cpu, hctx->cpumask) || 3457 !blk_mq_last_cpu_in_hctx(cpu, hctx)) 3458 return 0; 3459 3460 /* 3461 * Prevent new request from being allocated on the current hctx. 3462 * 3463 * The smp_mb__after_atomic() Pairs with the implied barrier in 3464 * test_and_set_bit_lock in sbitmap_get(). Ensures the inactive flag is 3465 * seen once we return from the tag allocator. 3466 */ 3467 set_bit(BLK_MQ_S_INACTIVE, &hctx->state); 3468 smp_mb__after_atomic(); 3469 3470 /* 3471 * Try to grab a reference to the queue and wait for any outstanding 3472 * requests. If we could not grab a reference the queue has been 3473 * frozen and there are no requests. 3474 */ 3475 if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) { 3476 while (blk_mq_hctx_has_requests(hctx)) 3477 msleep(5); 3478 percpu_ref_put(&hctx->queue->q_usage_counter); 3479 } 3480 3481 return 0; 3482 } 3483 3484 static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node) 3485 { 3486 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, 3487 struct blk_mq_hw_ctx, cpuhp_online); 3488 3489 if (cpumask_test_cpu(cpu, hctx->cpumask)) 3490 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state); 3491 return 0; 3492 } 3493 3494 /* 3495 * 'cpu' is going away. splice any existing rq_list entries from this 3496 * software queue to the hw queue dispatch list, and ensure that it 3497 * gets run. 3498 */ 3499 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node) 3500 { 3501 struct blk_mq_hw_ctx *hctx; 3502 struct blk_mq_ctx *ctx; 3503 LIST_HEAD(tmp); 3504 enum hctx_type type; 3505 3506 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead); 3507 if (!cpumask_test_cpu(cpu, hctx->cpumask)) 3508 return 0; 3509 3510 ctx = __blk_mq_get_ctx(hctx->queue, cpu); 3511 type = hctx->type; 3512 3513 spin_lock(&ctx->lock); 3514 if (!list_empty(&ctx->rq_lists[type])) { 3515 list_splice_init(&ctx->rq_lists[type], &tmp); 3516 blk_mq_hctx_clear_pending(hctx, ctx); 3517 } 3518 spin_unlock(&ctx->lock); 3519 3520 if (list_empty(&tmp)) 3521 return 0; 3522 3523 spin_lock(&hctx->lock); 3524 list_splice_tail_init(&tmp, &hctx->dispatch); 3525 spin_unlock(&hctx->lock); 3526 3527 blk_mq_run_hw_queue(hctx, true); 3528 return 0; 3529 } 3530 3531 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) 3532 { 3533 if (!(hctx->flags & BLK_MQ_F_STACKING)) 3534 cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, 3535 &hctx->cpuhp_online); 3536 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD, 3537 &hctx->cpuhp_dead); 3538 } 3539 3540 /* 3541 * Before freeing hw queue, clearing the flush request reference in 3542 * tags->rqs[] for avoiding potential UAF. 3543 */ 3544 static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags, 3545 unsigned int queue_depth, struct request *flush_rq) 3546 { 3547 int i; 3548 unsigned long flags; 3549 3550 /* The hw queue may not be mapped yet */ 3551 if (!tags) 3552 return; 3553 3554 WARN_ON_ONCE(req_ref_read(flush_rq) != 0); 3555 3556 for (i = 0; i < queue_depth; i++) 3557 cmpxchg(&tags->rqs[i], flush_rq, NULL); 3558 3559 /* 3560 * Wait until all pending iteration is done. 3561 * 3562 * Request reference is cleared and it is guaranteed to be observed 3563 * after the ->lock is released. 3564 */ 3565 spin_lock_irqsave(&tags->lock, flags); 3566 spin_unlock_irqrestore(&tags->lock, flags); 3567 } 3568 3569 /* hctx->ctxs will be freed in queue's release handler */ 3570 static void blk_mq_exit_hctx(struct request_queue *q, 3571 struct blk_mq_tag_set *set, 3572 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 3573 { 3574 struct request *flush_rq = hctx->fq->flush_rq; 3575 3576 if (blk_mq_hw_queue_mapped(hctx)) 3577 blk_mq_tag_idle(hctx); 3578 3579 if (blk_queue_init_done(q)) 3580 blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx], 3581 set->queue_depth, flush_rq); 3582 if (set->ops->exit_request) 3583 set->ops->exit_request(set, flush_rq, hctx_idx); 3584 3585 if (set->ops->exit_hctx) 3586 set->ops->exit_hctx(hctx, hctx_idx); 3587 3588 blk_mq_remove_cpuhp(hctx); 3589 3590 xa_erase(&q->hctx_table, hctx_idx); 3591 3592 spin_lock(&q->unused_hctx_lock); 3593 list_add(&hctx->hctx_list, &q->unused_hctx_list); 3594 spin_unlock(&q->unused_hctx_lock); 3595 } 3596 3597 static void blk_mq_exit_hw_queues(struct request_queue *q, 3598 struct blk_mq_tag_set *set, int nr_queue) 3599 { 3600 struct blk_mq_hw_ctx *hctx; 3601 unsigned long i; 3602 3603 queue_for_each_hw_ctx(q, hctx, i) { 3604 if (i == nr_queue) 3605 break; 3606 blk_mq_exit_hctx(q, set, hctx, i); 3607 } 3608 } 3609 3610 static int blk_mq_init_hctx(struct request_queue *q, 3611 struct blk_mq_tag_set *set, 3612 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) 3613 { 3614 hctx->queue_num = hctx_idx; 3615 3616 if (!(hctx->flags & BLK_MQ_F_STACKING)) 3617 cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, 3618 &hctx->cpuhp_online); 3619 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); 3620 3621 hctx->tags = set->tags[hctx_idx]; 3622 3623 if (set->ops->init_hctx && 3624 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) 3625 goto unregister_cpu_notifier; 3626 3627 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, 3628 hctx->numa_node)) 3629 goto exit_hctx; 3630 3631 if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL)) 3632 goto exit_flush_rq; 3633 3634 return 0; 3635 3636 exit_flush_rq: 3637 if (set->ops->exit_request) 3638 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); 3639 exit_hctx: 3640 if (set->ops->exit_hctx) 3641 set->ops->exit_hctx(hctx, hctx_idx); 3642 unregister_cpu_notifier: 3643 blk_mq_remove_cpuhp(hctx); 3644 return -1; 3645 } 3646 3647 static struct blk_mq_hw_ctx * 3648 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, 3649 int node) 3650 { 3651 struct blk_mq_hw_ctx *hctx; 3652 gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY; 3653 3654 hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node); 3655 if (!hctx) 3656 goto fail_alloc_hctx; 3657 3658 if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node)) 3659 goto free_hctx; 3660 3661 atomic_set(&hctx->nr_active, 0); 3662 if (node == NUMA_NO_NODE) 3663 node = set->numa_node; 3664 hctx->numa_node = node; 3665 3666 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); 3667 spin_lock_init(&hctx->lock); 3668 INIT_LIST_HEAD(&hctx->dispatch); 3669 hctx->queue = q; 3670 hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED; 3671 3672 INIT_LIST_HEAD(&hctx->hctx_list); 3673 3674 /* 3675 * Allocate space for all possible cpus to avoid allocation at 3676 * runtime 3677 */ 3678 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), 3679 gfp, node); 3680 if (!hctx->ctxs) 3681 goto free_cpumask; 3682 3683 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), 3684 gfp, node, false, false)) 3685 goto free_ctxs; 3686 hctx->nr_ctx = 0; 3687 3688 spin_lock_init(&hctx->dispatch_wait_lock); 3689 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); 3690 INIT_LIST_HEAD(&hctx->dispatch_wait.entry); 3691 3692 hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp); 3693 if (!hctx->fq) 3694 goto free_bitmap; 3695 3696 blk_mq_hctx_kobj_init(hctx); 3697 3698 return hctx; 3699 3700 free_bitmap: 3701 sbitmap_free(&hctx->ctx_map); 3702 free_ctxs: 3703 kfree(hctx->ctxs); 3704 free_cpumask: 3705 free_cpumask_var(hctx->cpumask); 3706 free_hctx: 3707 kfree(hctx); 3708 fail_alloc_hctx: 3709 return NULL; 3710 } 3711 3712 static void blk_mq_init_cpu_queues(struct request_queue *q, 3713 unsigned int nr_hw_queues) 3714 { 3715 struct blk_mq_tag_set *set = q->tag_set; 3716 unsigned int i, j; 3717 3718 for_each_possible_cpu(i) { 3719 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); 3720 struct blk_mq_hw_ctx *hctx; 3721 int k; 3722 3723 __ctx->cpu = i; 3724 spin_lock_init(&__ctx->lock); 3725 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++) 3726 INIT_LIST_HEAD(&__ctx->rq_lists[k]); 3727 3728 __ctx->queue = q; 3729 3730 /* 3731 * Set local node, IFF we have more than one hw queue. If 3732 * not, we remain on the home node of the device 3733 */ 3734 for (j = 0; j < set->nr_maps; j++) { 3735 hctx = blk_mq_map_queue_type(q, j, i); 3736 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) 3737 hctx->numa_node = cpu_to_node(i); 3738 } 3739 } 3740 } 3741 3742 struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, 3743 unsigned int hctx_idx, 3744 unsigned int depth) 3745 { 3746 struct blk_mq_tags *tags; 3747 int ret; 3748 3749 tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags); 3750 if (!tags) 3751 return NULL; 3752 3753 ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth); 3754 if (ret) { 3755 blk_mq_free_rq_map(tags); 3756 return NULL; 3757 } 3758 3759 return tags; 3760 } 3761 3762 static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, 3763 int hctx_idx) 3764 { 3765 if (blk_mq_is_shared_tags(set->flags)) { 3766 set->tags[hctx_idx] = set->shared_tags; 3767 3768 return true; 3769 } 3770 3771 set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx, 3772 set->queue_depth); 3773 3774 return set->tags[hctx_idx]; 3775 } 3776 3777 void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, 3778 struct blk_mq_tags *tags, 3779 unsigned int hctx_idx) 3780 { 3781 if (tags) { 3782 blk_mq_free_rqs(set, tags, hctx_idx); 3783 blk_mq_free_rq_map(tags); 3784 } 3785 } 3786 3787 static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, 3788 unsigned int hctx_idx) 3789 { 3790 if (!blk_mq_is_shared_tags(set->flags)) 3791 blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx); 3792 3793 set->tags[hctx_idx] = NULL; 3794 } 3795 3796 static void blk_mq_map_swqueue(struct request_queue *q) 3797 { 3798 unsigned int j, hctx_idx; 3799 unsigned long i; 3800 struct blk_mq_hw_ctx *hctx; 3801 struct blk_mq_ctx *ctx; 3802 struct blk_mq_tag_set *set = q->tag_set; 3803 3804 queue_for_each_hw_ctx(q, hctx, i) { 3805 cpumask_clear(hctx->cpumask); 3806 hctx->nr_ctx = 0; 3807 hctx->dispatch_from = NULL; 3808 } 3809 3810 /* 3811 * Map software to hardware queues. 3812 * 3813 * If the cpu isn't present, the cpu is mapped to first hctx. 3814 */ 3815 for_each_possible_cpu(i) { 3816 3817 ctx = per_cpu_ptr(q->queue_ctx, i); 3818 for (j = 0; j < set->nr_maps; j++) { 3819 if (!set->map[j].nr_queues) { 3820 ctx->hctxs[j] = blk_mq_map_queue_type(q, 3821 HCTX_TYPE_DEFAULT, i); 3822 continue; 3823 } 3824 hctx_idx = set->map[j].mq_map[i]; 3825 /* unmapped hw queue can be remapped after CPU topo changed */ 3826 if (!set->tags[hctx_idx] && 3827 !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) { 3828 /* 3829 * If tags initialization fail for some hctx, 3830 * that hctx won't be brought online. In this 3831 * case, remap the current ctx to hctx[0] which 3832 * is guaranteed to always have tags allocated 3833 */ 3834 set->map[j].mq_map[i] = 0; 3835 } 3836 3837 hctx = blk_mq_map_queue_type(q, j, i); 3838 ctx->hctxs[j] = hctx; 3839 /* 3840 * If the CPU is already set in the mask, then we've 3841 * mapped this one already. This can happen if 3842 * devices share queues across queue maps. 3843 */ 3844 if (cpumask_test_cpu(i, hctx->cpumask)) 3845 continue; 3846 3847 cpumask_set_cpu(i, hctx->cpumask); 3848 hctx->type = j; 3849 ctx->index_hw[hctx->type] = hctx->nr_ctx; 3850 hctx->ctxs[hctx->nr_ctx++] = ctx; 3851 3852 /* 3853 * If the nr_ctx type overflows, we have exceeded the 3854 * amount of sw queues we can support. 3855 */ 3856 BUG_ON(!hctx->nr_ctx); 3857 } 3858 3859 for (; j < HCTX_MAX_TYPES; j++) 3860 ctx->hctxs[j] = blk_mq_map_queue_type(q, 3861 HCTX_TYPE_DEFAULT, i); 3862 } 3863 3864 queue_for_each_hw_ctx(q, hctx, i) { 3865 /* 3866 * If no software queues are mapped to this hardware queue, 3867 * disable it and free the request entries. 3868 */ 3869 if (!hctx->nr_ctx) { 3870 /* Never unmap queue 0. We need it as a 3871 * fallback in case of a new remap fails 3872 * allocation 3873 */ 3874 if (i) 3875 __blk_mq_free_map_and_rqs(set, i); 3876 3877 hctx->tags = NULL; 3878 continue; 3879 } 3880 3881 hctx->tags = set->tags[i]; 3882 WARN_ON(!hctx->tags); 3883 3884 /* 3885 * Set the map size to the number of mapped software queues. 3886 * This is more accurate and more efficient than looping 3887 * over all possibly mapped software queues. 3888 */ 3889 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx); 3890 3891 /* 3892 * Initialize batch roundrobin counts 3893 */ 3894 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); 3895 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 3896 } 3897 } 3898 3899 /* 3900 * Caller needs to ensure that we're either frozen/quiesced, or that 3901 * the queue isn't live yet. 3902 */ 3903 static void queue_set_hctx_shared(struct request_queue *q, bool shared) 3904 { 3905 struct blk_mq_hw_ctx *hctx; 3906 unsigned long i; 3907 3908 queue_for_each_hw_ctx(q, hctx, i) { 3909 if (shared) { 3910 hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; 3911 } else { 3912 blk_mq_tag_idle(hctx); 3913 hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; 3914 } 3915 } 3916 } 3917 3918 static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set, 3919 bool shared) 3920 { 3921 struct request_queue *q; 3922 3923 lockdep_assert_held(&set->tag_list_lock); 3924 3925 list_for_each_entry(q, &set->tag_list, tag_set_list) { 3926 blk_mq_freeze_queue(q); 3927 queue_set_hctx_shared(q, shared); 3928 blk_mq_unfreeze_queue(q); 3929 } 3930 } 3931 3932 static void blk_mq_del_queue_tag_set(struct request_queue *q) 3933 { 3934 struct blk_mq_tag_set *set = q->tag_set; 3935 3936 mutex_lock(&set->tag_list_lock); 3937 list_del(&q->tag_set_list); 3938 if (list_is_singular(&set->tag_list)) { 3939 /* just transitioned to unshared */ 3940 set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; 3941 /* update existing queue */ 3942 blk_mq_update_tag_set_shared(set, false); 3943 } 3944 mutex_unlock(&set->tag_list_lock); 3945 INIT_LIST_HEAD(&q->tag_set_list); 3946 } 3947 3948 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, 3949 struct request_queue *q) 3950 { 3951 mutex_lock(&set->tag_list_lock); 3952 3953 /* 3954 * Check to see if we're transitioning to shared (from 1 to 2 queues). 3955 */ 3956 if (!list_empty(&set->tag_list) && 3957 !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { 3958 set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; 3959 /* update existing queue */ 3960 blk_mq_update_tag_set_shared(set, true); 3961 } 3962 if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED) 3963 queue_set_hctx_shared(q, true); 3964 list_add_tail(&q->tag_set_list, &set->tag_list); 3965 3966 mutex_unlock(&set->tag_list_lock); 3967 } 3968 3969 /* All allocations will be freed in release handler of q->mq_kobj */ 3970 static int blk_mq_alloc_ctxs(struct request_queue *q) 3971 { 3972 struct blk_mq_ctxs *ctxs; 3973 int cpu; 3974 3975 ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL); 3976 if (!ctxs) 3977 return -ENOMEM; 3978 3979 ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx); 3980 if (!ctxs->queue_ctx) 3981 goto fail; 3982 3983 for_each_possible_cpu(cpu) { 3984 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu); 3985 ctx->ctxs = ctxs; 3986 } 3987 3988 q->mq_kobj = &ctxs->kobj; 3989 q->queue_ctx = ctxs->queue_ctx; 3990 3991 return 0; 3992 fail: 3993 kfree(ctxs); 3994 return -ENOMEM; 3995 } 3996 3997 /* 3998 * It is the actual release handler for mq, but we do it from 3999 * request queue's release handler for avoiding use-after-free 4000 * and headache because q->mq_kobj shouldn't have been introduced, 4001 * but we can't group ctx/kctx kobj without it. 4002 */ 4003 void blk_mq_release(struct request_queue *q) 4004 { 4005 struct blk_mq_hw_ctx *hctx, *next; 4006 unsigned long i; 4007 4008 queue_for_each_hw_ctx(q, hctx, i) 4009 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list)); 4010 4011 /* all hctx are in .unused_hctx_list now */ 4012 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) { 4013 list_del_init(&hctx->hctx_list); 4014 kobject_put(&hctx->kobj); 4015 } 4016 4017 xa_destroy(&q->hctx_table); 4018 4019 /* 4020 * release .mq_kobj and sw queue's kobject now because 4021 * both share lifetime with request queue. 4022 */ 4023 blk_mq_sysfs_deinit(q); 4024 } 4025 4026 static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set, 4027 void *queuedata) 4028 { 4029 struct request_queue *q; 4030 int ret; 4031 4032 q = blk_alloc_queue(set->numa_node); 4033 if (!q) 4034 return ERR_PTR(-ENOMEM); 4035 q->queuedata = queuedata; 4036 ret = blk_mq_init_allocated_queue(set, q); 4037 if (ret) { 4038 blk_put_queue(q); 4039 return ERR_PTR(ret); 4040 } 4041 return q; 4042 } 4043 4044 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) 4045 { 4046 return blk_mq_init_queue_data(set, NULL); 4047 } 4048 EXPORT_SYMBOL(blk_mq_init_queue); 4049 4050 /** 4051 * blk_mq_destroy_queue - shutdown a request queue 4052 * @q: request queue to shutdown 4053 * 4054 * This shuts down a request queue allocated by blk_mq_init_queue(). All future 4055 * requests will be failed with -ENODEV. The caller is responsible for dropping 4056 * the reference from blk_mq_init_queue() by calling blk_put_queue(). 4057 * 4058 * Context: can sleep 4059 */ 4060 void blk_mq_destroy_queue(struct request_queue *q) 4061 { 4062 WARN_ON_ONCE(!queue_is_mq(q)); 4063 WARN_ON_ONCE(blk_queue_registered(q)); 4064 4065 might_sleep(); 4066 4067 blk_queue_flag_set(QUEUE_FLAG_DYING, q); 4068 blk_queue_start_drain(q); 4069 blk_mq_freeze_queue_wait(q); 4070 4071 blk_sync_queue(q); 4072 blk_mq_cancel_work_sync(q); 4073 blk_mq_exit_queue(q); 4074 } 4075 EXPORT_SYMBOL(blk_mq_destroy_queue); 4076 4077 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata, 4078 struct lock_class_key *lkclass) 4079 { 4080 struct request_queue *q; 4081 struct gendisk *disk; 4082 4083 q = blk_mq_init_queue_data(set, queuedata); 4084 if (IS_ERR(q)) 4085 return ERR_CAST(q); 4086 4087 disk = __alloc_disk_node(q, set->numa_node, lkclass); 4088 if (!disk) { 4089 blk_mq_destroy_queue(q); 4090 blk_put_queue(q); 4091 return ERR_PTR(-ENOMEM); 4092 } 4093 set_bit(GD_OWNS_QUEUE, &disk->state); 4094 return disk; 4095 } 4096 EXPORT_SYMBOL(__blk_mq_alloc_disk); 4097 4098 struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q, 4099 struct lock_class_key *lkclass) 4100 { 4101 struct gendisk *disk; 4102 4103 if (!blk_get_queue(q)) 4104 return NULL; 4105 disk = __alloc_disk_node(q, NUMA_NO_NODE, lkclass); 4106 if (!disk) 4107 blk_put_queue(q); 4108 return disk; 4109 } 4110 EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue); 4111 4112 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx( 4113 struct blk_mq_tag_set *set, struct request_queue *q, 4114 int hctx_idx, int node) 4115 { 4116 struct blk_mq_hw_ctx *hctx = NULL, *tmp; 4117 4118 /* reuse dead hctx first */ 4119 spin_lock(&q->unused_hctx_lock); 4120 list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) { 4121 if (tmp->numa_node == node) { 4122 hctx = tmp; 4123 break; 4124 } 4125 } 4126 if (hctx) 4127 list_del_init(&hctx->hctx_list); 4128 spin_unlock(&q->unused_hctx_lock); 4129 4130 if (!hctx) 4131 hctx = blk_mq_alloc_hctx(q, set, node); 4132 if (!hctx) 4133 goto fail; 4134 4135 if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) 4136 goto free_hctx; 4137 4138 return hctx; 4139 4140 free_hctx: 4141 kobject_put(&hctx->kobj); 4142 fail: 4143 return NULL; 4144 } 4145 4146 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, 4147 struct request_queue *q) 4148 { 4149 struct blk_mq_hw_ctx *hctx; 4150 unsigned long i, j; 4151 4152 /* protect against switching io scheduler */ 4153 mutex_lock(&q->sysfs_lock); 4154 for (i = 0; i < set->nr_hw_queues; i++) { 4155 int old_node; 4156 int node = blk_mq_get_hctx_node(set, i); 4157 struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i); 4158 4159 if (old_hctx) { 4160 old_node = old_hctx->numa_node; 4161 blk_mq_exit_hctx(q, set, old_hctx, i); 4162 } 4163 4164 if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) { 4165 if (!old_hctx) 4166 break; 4167 pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n", 4168 node, old_node); 4169 hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node); 4170 WARN_ON_ONCE(!hctx); 4171 } 4172 } 4173 /* 4174 * Increasing nr_hw_queues fails. Free the newly allocated 4175 * hctxs and keep the previous q->nr_hw_queues. 4176 */ 4177 if (i != set->nr_hw_queues) { 4178 j = q->nr_hw_queues; 4179 } else { 4180 j = i; 4181 q->nr_hw_queues = set->nr_hw_queues; 4182 } 4183 4184 xa_for_each_start(&q->hctx_table, j, hctx, j) 4185 blk_mq_exit_hctx(q, set, hctx, j); 4186 mutex_unlock(&q->sysfs_lock); 4187 } 4188 4189 static void blk_mq_update_poll_flag(struct request_queue *q) 4190 { 4191 struct blk_mq_tag_set *set = q->tag_set; 4192 4193 if (set->nr_maps > HCTX_TYPE_POLL && 4194 set->map[HCTX_TYPE_POLL].nr_queues) 4195 blk_queue_flag_set(QUEUE_FLAG_POLL, q); 4196 else 4197 blk_queue_flag_clear(QUEUE_FLAG_POLL, q); 4198 } 4199 4200 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 4201 struct request_queue *q) 4202 { 4203 /* mark the queue as mq asap */ 4204 q->mq_ops = set->ops; 4205 4206 if (blk_mq_alloc_ctxs(q)) 4207 goto err_exit; 4208 4209 /* init q->mq_kobj and sw queues' kobjects */ 4210 blk_mq_sysfs_init(q); 4211 4212 INIT_LIST_HEAD(&q->unused_hctx_list); 4213 spin_lock_init(&q->unused_hctx_lock); 4214 4215 xa_init(&q->hctx_table); 4216 4217 blk_mq_realloc_hw_ctxs(set, q); 4218 if (!q->nr_hw_queues) 4219 goto err_hctxs; 4220 4221 INIT_WORK(&q->timeout_work, blk_mq_timeout_work); 4222 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); 4223 4224 q->tag_set = set; 4225 4226 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; 4227 blk_mq_update_poll_flag(q); 4228 4229 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); 4230 INIT_LIST_HEAD(&q->flush_list); 4231 INIT_LIST_HEAD(&q->requeue_list); 4232 spin_lock_init(&q->requeue_lock); 4233 4234 q->nr_requests = set->queue_depth; 4235 4236 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 4237 blk_mq_add_queue_tag_set(set, q); 4238 blk_mq_map_swqueue(q); 4239 return 0; 4240 4241 err_hctxs: 4242 blk_mq_release(q); 4243 err_exit: 4244 q->mq_ops = NULL; 4245 return -ENOMEM; 4246 } 4247 EXPORT_SYMBOL(blk_mq_init_allocated_queue); 4248 4249 /* tags can _not_ be used after returning from blk_mq_exit_queue */ 4250 void blk_mq_exit_queue(struct request_queue *q) 4251 { 4252 struct blk_mq_tag_set *set = q->tag_set; 4253 4254 /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */ 4255 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); 4256 /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */ 4257 blk_mq_del_queue_tag_set(q); 4258 } 4259 4260 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 4261 { 4262 int i; 4263 4264 if (blk_mq_is_shared_tags(set->flags)) { 4265 set->shared_tags = blk_mq_alloc_map_and_rqs(set, 4266 BLK_MQ_NO_HCTX_IDX, 4267 set->queue_depth); 4268 if (!set->shared_tags) 4269 return -ENOMEM; 4270 } 4271 4272 for (i = 0; i < set->nr_hw_queues; i++) { 4273 if (!__blk_mq_alloc_map_and_rqs(set, i)) 4274 goto out_unwind; 4275 cond_resched(); 4276 } 4277 4278 return 0; 4279 4280 out_unwind: 4281 while (--i >= 0) 4282 __blk_mq_free_map_and_rqs(set, i); 4283 4284 if (blk_mq_is_shared_tags(set->flags)) { 4285 blk_mq_free_map_and_rqs(set, set->shared_tags, 4286 BLK_MQ_NO_HCTX_IDX); 4287 } 4288 4289 return -ENOMEM; 4290 } 4291 4292 /* 4293 * Allocate the request maps associated with this tag_set. Note that this 4294 * may reduce the depth asked for, if memory is tight. set->queue_depth 4295 * will be updated to reflect the allocated depth. 4296 */ 4297 static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set) 4298 { 4299 unsigned int depth; 4300 int err; 4301 4302 depth = set->queue_depth; 4303 do { 4304 err = __blk_mq_alloc_rq_maps(set); 4305 if (!err) 4306 break; 4307 4308 set->queue_depth >>= 1; 4309 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { 4310 err = -ENOMEM; 4311 break; 4312 } 4313 } while (set->queue_depth); 4314 4315 if (!set->queue_depth || err) { 4316 pr_err("blk-mq: failed to allocate request map\n"); 4317 return -ENOMEM; 4318 } 4319 4320 if (depth != set->queue_depth) 4321 pr_info("blk-mq: reduced tag depth (%u -> %u)\n", 4322 depth, set->queue_depth); 4323 4324 return 0; 4325 } 4326 4327 static void blk_mq_update_queue_map(struct blk_mq_tag_set *set) 4328 { 4329 /* 4330 * blk_mq_map_queues() and multiple .map_queues() implementations 4331 * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the 4332 * number of hardware queues. 4333 */ 4334 if (set->nr_maps == 1) 4335 set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues; 4336 4337 if (set->ops->map_queues && !is_kdump_kernel()) { 4338 int i; 4339 4340 /* 4341 * transport .map_queues is usually done in the following 4342 * way: 4343 * 4344 * for (queue = 0; queue < set->nr_hw_queues; queue++) { 4345 * mask = get_cpu_mask(queue) 4346 * for_each_cpu(cpu, mask) 4347 * set->map[x].mq_map[cpu] = queue; 4348 * } 4349 * 4350 * When we need to remap, the table has to be cleared for 4351 * killing stale mapping since one CPU may not be mapped 4352 * to any hw queue. 4353 */ 4354 for (i = 0; i < set->nr_maps; i++) 4355 blk_mq_clear_mq_map(&set->map[i]); 4356 4357 set->ops->map_queues(set); 4358 } else { 4359 BUG_ON(set->nr_maps > 1); 4360 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); 4361 } 4362 } 4363 4364 static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set, 4365 int new_nr_hw_queues) 4366 { 4367 struct blk_mq_tags **new_tags; 4368 4369 if (set->nr_hw_queues >= new_nr_hw_queues) 4370 goto done; 4371 4372 new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *), 4373 GFP_KERNEL, set->numa_node); 4374 if (!new_tags) 4375 return -ENOMEM; 4376 4377 if (set->tags) 4378 memcpy(new_tags, set->tags, set->nr_hw_queues * 4379 sizeof(*set->tags)); 4380 kfree(set->tags); 4381 set->tags = new_tags; 4382 done: 4383 set->nr_hw_queues = new_nr_hw_queues; 4384 return 0; 4385 } 4386 4387 /* 4388 * Alloc a tag set to be associated with one or more request queues. 4389 * May fail with EINVAL for various error conditions. May adjust the 4390 * requested depth down, if it's too large. In that case, the set 4391 * value will be stored in set->queue_depth. 4392 */ 4393 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) 4394 { 4395 int i, ret; 4396 4397 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS); 4398 4399 if (!set->nr_hw_queues) 4400 return -EINVAL; 4401 if (!set->queue_depth) 4402 return -EINVAL; 4403 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) 4404 return -EINVAL; 4405 4406 if (!set->ops->queue_rq) 4407 return -EINVAL; 4408 4409 if (!set->ops->get_budget ^ !set->ops->put_budget) 4410 return -EINVAL; 4411 4412 if (set->queue_depth > BLK_MQ_MAX_DEPTH) { 4413 pr_info("blk-mq: reduced tag depth to %u\n", 4414 BLK_MQ_MAX_DEPTH); 4415 set->queue_depth = BLK_MQ_MAX_DEPTH; 4416 } 4417 4418 if (!set->nr_maps) 4419 set->nr_maps = 1; 4420 else if (set->nr_maps > HCTX_MAX_TYPES) 4421 return -EINVAL; 4422 4423 /* 4424 * If a crashdump is active, then we are potentially in a very 4425 * memory constrained environment. Limit us to 1 queue and 4426 * 64 tags to prevent using too much memory. 4427 */ 4428 if (is_kdump_kernel()) { 4429 set->nr_hw_queues = 1; 4430 set->nr_maps = 1; 4431 set->queue_depth = min(64U, set->queue_depth); 4432 } 4433 /* 4434 * There is no use for more h/w queues than cpus if we just have 4435 * a single map 4436 */ 4437 if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids) 4438 set->nr_hw_queues = nr_cpu_ids; 4439 4440 if (set->flags & BLK_MQ_F_BLOCKING) { 4441 set->srcu = kmalloc(sizeof(*set->srcu), GFP_KERNEL); 4442 if (!set->srcu) 4443 return -ENOMEM; 4444 ret = init_srcu_struct(set->srcu); 4445 if (ret) 4446 goto out_free_srcu; 4447 } 4448 4449 ret = -ENOMEM; 4450 set->tags = kcalloc_node(set->nr_hw_queues, 4451 sizeof(struct blk_mq_tags *), GFP_KERNEL, 4452 set->numa_node); 4453 if (!set->tags) 4454 goto out_cleanup_srcu; 4455 4456 for (i = 0; i < set->nr_maps; i++) { 4457 set->map[i].mq_map = kcalloc_node(nr_cpu_ids, 4458 sizeof(set->map[i].mq_map[0]), 4459 GFP_KERNEL, set->numa_node); 4460 if (!set->map[i].mq_map) 4461 goto out_free_mq_map; 4462 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues; 4463 } 4464 4465 blk_mq_update_queue_map(set); 4466 4467 ret = blk_mq_alloc_set_map_and_rqs(set); 4468 if (ret) 4469 goto out_free_mq_map; 4470 4471 mutex_init(&set->tag_list_lock); 4472 INIT_LIST_HEAD(&set->tag_list); 4473 4474 return 0; 4475 4476 out_free_mq_map: 4477 for (i = 0; i < set->nr_maps; i++) { 4478 kfree(set->map[i].mq_map); 4479 set->map[i].mq_map = NULL; 4480 } 4481 kfree(set->tags); 4482 set->tags = NULL; 4483 out_cleanup_srcu: 4484 if (set->flags & BLK_MQ_F_BLOCKING) 4485 cleanup_srcu_struct(set->srcu); 4486 out_free_srcu: 4487 if (set->flags & BLK_MQ_F_BLOCKING) 4488 kfree(set->srcu); 4489 return ret; 4490 } 4491 EXPORT_SYMBOL(blk_mq_alloc_tag_set); 4492 4493 /* allocate and initialize a tagset for a simple single-queue device */ 4494 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set, 4495 const struct blk_mq_ops *ops, unsigned int queue_depth, 4496 unsigned int set_flags) 4497 { 4498 memset(set, 0, sizeof(*set)); 4499 set->ops = ops; 4500 set->nr_hw_queues = 1; 4501 set->nr_maps = 1; 4502 set->queue_depth = queue_depth; 4503 set->numa_node = NUMA_NO_NODE; 4504 set->flags = set_flags; 4505 return blk_mq_alloc_tag_set(set); 4506 } 4507 EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set); 4508 4509 void blk_mq_free_tag_set(struct blk_mq_tag_set *set) 4510 { 4511 int i, j; 4512 4513 for (i = 0; i < set->nr_hw_queues; i++) 4514 __blk_mq_free_map_and_rqs(set, i); 4515 4516 if (blk_mq_is_shared_tags(set->flags)) { 4517 blk_mq_free_map_and_rqs(set, set->shared_tags, 4518 BLK_MQ_NO_HCTX_IDX); 4519 } 4520 4521 for (j = 0; j < set->nr_maps; j++) { 4522 kfree(set->map[j].mq_map); 4523 set->map[j].mq_map = NULL; 4524 } 4525 4526 kfree(set->tags); 4527 set->tags = NULL; 4528 if (set->flags & BLK_MQ_F_BLOCKING) { 4529 cleanup_srcu_struct(set->srcu); 4530 kfree(set->srcu); 4531 } 4532 } 4533 EXPORT_SYMBOL(blk_mq_free_tag_set); 4534 4535 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) 4536 { 4537 struct blk_mq_tag_set *set = q->tag_set; 4538 struct blk_mq_hw_ctx *hctx; 4539 int ret; 4540 unsigned long i; 4541 4542 if (!set) 4543 return -EINVAL; 4544 4545 if (q->nr_requests == nr) 4546 return 0; 4547 4548 blk_mq_freeze_queue(q); 4549 blk_mq_quiesce_queue(q); 4550 4551 ret = 0; 4552 queue_for_each_hw_ctx(q, hctx, i) { 4553 if (!hctx->tags) 4554 continue; 4555 /* 4556 * If we're using an MQ scheduler, just update the scheduler 4557 * queue depth. This is similar to what the old code would do. 4558 */ 4559 if (hctx->sched_tags) { 4560 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, 4561 nr, true); 4562 } else { 4563 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, 4564 false); 4565 } 4566 if (ret) 4567 break; 4568 if (q->elevator && q->elevator->type->ops.depth_updated) 4569 q->elevator->type->ops.depth_updated(hctx); 4570 } 4571 if (!ret) { 4572 q->nr_requests = nr; 4573 if (blk_mq_is_shared_tags(set->flags)) { 4574 if (q->elevator) 4575 blk_mq_tag_update_sched_shared_tags(q); 4576 else 4577 blk_mq_tag_resize_shared_tags(set, nr); 4578 } 4579 } 4580 4581 blk_mq_unquiesce_queue(q); 4582 blk_mq_unfreeze_queue(q); 4583 4584 return ret; 4585 } 4586 4587 /* 4588 * request_queue and elevator_type pair. 4589 * It is just used by __blk_mq_update_nr_hw_queues to cache 4590 * the elevator_type associated with a request_queue. 4591 */ 4592 struct blk_mq_qe_pair { 4593 struct list_head node; 4594 struct request_queue *q; 4595 struct elevator_type *type; 4596 }; 4597 4598 /* 4599 * Cache the elevator_type in qe pair list and switch the 4600 * io scheduler to 'none' 4601 */ 4602 static bool blk_mq_elv_switch_none(struct list_head *head, 4603 struct request_queue *q) 4604 { 4605 struct blk_mq_qe_pair *qe; 4606 4607 qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY); 4608 if (!qe) 4609 return false; 4610 4611 /* q->elevator needs protection from ->sysfs_lock */ 4612 mutex_lock(&q->sysfs_lock); 4613 4614 /* the check has to be done with holding sysfs_lock */ 4615 if (!q->elevator) { 4616 kfree(qe); 4617 goto unlock; 4618 } 4619 4620 INIT_LIST_HEAD(&qe->node); 4621 qe->q = q; 4622 qe->type = q->elevator->type; 4623 /* keep a reference to the elevator module as we'll switch back */ 4624 __elevator_get(qe->type); 4625 list_add(&qe->node, head); 4626 elevator_disable(q); 4627 unlock: 4628 mutex_unlock(&q->sysfs_lock); 4629 4630 return true; 4631 } 4632 4633 static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head, 4634 struct request_queue *q) 4635 { 4636 struct blk_mq_qe_pair *qe; 4637 4638 list_for_each_entry(qe, head, node) 4639 if (qe->q == q) 4640 return qe; 4641 4642 return NULL; 4643 } 4644 4645 static void blk_mq_elv_switch_back(struct list_head *head, 4646 struct request_queue *q) 4647 { 4648 struct blk_mq_qe_pair *qe; 4649 struct elevator_type *t; 4650 4651 qe = blk_lookup_qe_pair(head, q); 4652 if (!qe) 4653 return; 4654 t = qe->type; 4655 list_del(&qe->node); 4656 kfree(qe); 4657 4658 mutex_lock(&q->sysfs_lock); 4659 elevator_switch(q, t); 4660 /* drop the reference acquired in blk_mq_elv_switch_none */ 4661 elevator_put(t); 4662 mutex_unlock(&q->sysfs_lock); 4663 } 4664 4665 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, 4666 int nr_hw_queues) 4667 { 4668 struct request_queue *q; 4669 LIST_HEAD(head); 4670 int prev_nr_hw_queues; 4671 4672 lockdep_assert_held(&set->tag_list_lock); 4673 4674 if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids) 4675 nr_hw_queues = nr_cpu_ids; 4676 if (nr_hw_queues < 1) 4677 return; 4678 if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues) 4679 return; 4680 4681 list_for_each_entry(q, &set->tag_list, tag_set_list) 4682 blk_mq_freeze_queue(q); 4683 /* 4684 * Switch IO scheduler to 'none', cleaning up the data associated 4685 * with the previous scheduler. We will switch back once we are done 4686 * updating the new sw to hw queue mappings. 4687 */ 4688 list_for_each_entry(q, &set->tag_list, tag_set_list) 4689 if (!blk_mq_elv_switch_none(&head, q)) 4690 goto switch_back; 4691 4692 list_for_each_entry(q, &set->tag_list, tag_set_list) { 4693 blk_mq_debugfs_unregister_hctxs(q); 4694 blk_mq_sysfs_unregister_hctxs(q); 4695 } 4696 4697 prev_nr_hw_queues = set->nr_hw_queues; 4698 if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0) 4699 goto reregister; 4700 4701 fallback: 4702 blk_mq_update_queue_map(set); 4703 list_for_each_entry(q, &set->tag_list, tag_set_list) { 4704 blk_mq_realloc_hw_ctxs(set, q); 4705 blk_mq_update_poll_flag(q); 4706 if (q->nr_hw_queues != set->nr_hw_queues) { 4707 int i = prev_nr_hw_queues; 4708 4709 pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n", 4710 nr_hw_queues, prev_nr_hw_queues); 4711 for (; i < set->nr_hw_queues; i++) 4712 __blk_mq_free_map_and_rqs(set, i); 4713 4714 set->nr_hw_queues = prev_nr_hw_queues; 4715 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); 4716 goto fallback; 4717 } 4718 blk_mq_map_swqueue(q); 4719 } 4720 4721 reregister: 4722 list_for_each_entry(q, &set->tag_list, tag_set_list) { 4723 blk_mq_sysfs_register_hctxs(q); 4724 blk_mq_debugfs_register_hctxs(q); 4725 } 4726 4727 switch_back: 4728 list_for_each_entry(q, &set->tag_list, tag_set_list) 4729 blk_mq_elv_switch_back(&head, q); 4730 4731 list_for_each_entry(q, &set->tag_list, tag_set_list) 4732 blk_mq_unfreeze_queue(q); 4733 } 4734 4735 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) 4736 { 4737 mutex_lock(&set->tag_list_lock); 4738 __blk_mq_update_nr_hw_queues(set, nr_hw_queues); 4739 mutex_unlock(&set->tag_list_lock); 4740 } 4741 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); 4742 4743 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx, 4744 struct io_comp_batch *iob, unsigned int flags) 4745 { 4746 long state = get_current_state(); 4747 int ret; 4748 4749 do { 4750 ret = q->mq_ops->poll(hctx, iob); 4751 if (ret > 0) { 4752 __set_current_state(TASK_RUNNING); 4753 return ret; 4754 } 4755 4756 if (signal_pending_state(state, current)) 4757 __set_current_state(TASK_RUNNING); 4758 if (task_is_running(current)) 4759 return 1; 4760 4761 if (ret < 0 || (flags & BLK_POLL_ONESHOT)) 4762 break; 4763 cpu_relax(); 4764 } while (!need_resched()); 4765 4766 __set_current_state(TASK_RUNNING); 4767 return 0; 4768 } 4769 4770 int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, 4771 struct io_comp_batch *iob, unsigned int flags) 4772 { 4773 struct blk_mq_hw_ctx *hctx = xa_load(&q->hctx_table, cookie); 4774 4775 return blk_hctx_poll(q, hctx, iob, flags); 4776 } 4777 4778 int blk_rq_poll(struct request *rq, struct io_comp_batch *iob, 4779 unsigned int poll_flags) 4780 { 4781 struct request_queue *q = rq->q; 4782 int ret; 4783 4784 if (!blk_rq_is_poll(rq)) 4785 return 0; 4786 if (!percpu_ref_tryget(&q->q_usage_counter)) 4787 return 0; 4788 4789 ret = blk_hctx_poll(q, rq->mq_hctx, iob, poll_flags); 4790 blk_queue_exit(q); 4791 4792 return ret; 4793 } 4794 EXPORT_SYMBOL_GPL(blk_rq_poll); 4795 4796 unsigned int blk_mq_rq_cpu(struct request *rq) 4797 { 4798 return rq->mq_ctx->cpu; 4799 } 4800 EXPORT_SYMBOL(blk_mq_rq_cpu); 4801 4802 void blk_mq_cancel_work_sync(struct request_queue *q) 4803 { 4804 struct blk_mq_hw_ctx *hctx; 4805 unsigned long i; 4806 4807 cancel_delayed_work_sync(&q->requeue_work); 4808 4809 queue_for_each_hw_ctx(q, hctx, i) 4810 cancel_delayed_work_sync(&hctx->run_work); 4811 } 4812 4813 static int __init blk_mq_init(void) 4814 { 4815 int i; 4816 4817 for_each_possible_cpu(i) 4818 init_llist_head(&per_cpu(blk_cpu_done, i)); 4819 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); 4820 4821 cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, 4822 "block/softirq:dead", NULL, 4823 blk_softirq_cpu_dead); 4824 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, 4825 blk_mq_hctx_notify_dead); 4826 cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online", 4827 blk_mq_hctx_notify_online, 4828 blk_mq_hctx_notify_offline); 4829 return 0; 4830 } 4831 subsys_initcall(blk_mq_init); 4832