1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Block multiqueue core code 4 * 5 * Copyright (C) 2013-2014 Jens Axboe 6 * Copyright (C) 2013-2014 Christoph Hellwig 7 */ 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/backing-dev.h> 11 #include <linux/bio.h> 12 #include <linux/blkdev.h> 13 #include <linux/blk-integrity.h> 14 #include <linux/kmemleak.h> 15 #include <linux/mm.h> 16 #include <linux/init.h> 17 #include <linux/slab.h> 18 #include <linux/workqueue.h> 19 #include <linux/smp.h> 20 #include <linux/interrupt.h> 21 #include <linux/llist.h> 22 #include <linux/cpu.h> 23 #include <linux/cache.h> 24 #include <linux/sched/sysctl.h> 25 #include <linux/sched/topology.h> 26 #include <linux/sched/signal.h> 27 #include <linux/delay.h> 28 #include <linux/crash_dump.h> 29 #include <linux/prefetch.h> 30 #include <linux/blk-crypto.h> 31 #include <linux/part_stat.h> 32 33 #include <trace/events/block.h> 34 35 #include <linux/t10-pi.h> 36 #include "blk.h" 37 #include "blk-mq.h" 38 #include "blk-mq-debugfs.h" 39 #include "blk-pm.h" 40 #include "blk-stat.h" 41 #include "blk-mq-sched.h" 42 #include "blk-rq-qos.h" 43 #include "blk-ioprio.h" 44 45 static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); 46 static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd); 47 48 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags); 49 static void blk_mq_request_bypass_insert(struct request *rq, 50 blk_insert_t flags); 51 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 52 struct list_head *list); 53 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx, 54 struct io_comp_batch *iob, unsigned int flags); 55 56 /* 57 * Check if any of the ctx, dispatch list or elevator 58 * have pending work in this hardware queue. 59 */ 60 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) 61 { 62 return !list_empty_careful(&hctx->dispatch) || 63 sbitmap_any_bit_set(&hctx->ctx_map) || 64 blk_mq_sched_has_work(hctx); 65 } 66 67 /* 68 * Mark this ctx as having pending work in this hardware queue 69 */ 70 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, 71 struct blk_mq_ctx *ctx) 72 { 73 const int bit = ctx->index_hw[hctx->type]; 74 75 if (!sbitmap_test_bit(&hctx->ctx_map, bit)) 76 sbitmap_set_bit(&hctx->ctx_map, bit); 77 } 78 79 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, 80 struct blk_mq_ctx *ctx) 81 { 82 const int bit = ctx->index_hw[hctx->type]; 83 84 sbitmap_clear_bit(&hctx->ctx_map, bit); 85 } 86 87 struct mq_inflight { 88 struct block_device *part; 89 unsigned int inflight[2]; 90 }; 91 92 static bool blk_mq_check_inflight(struct request *rq, void *priv) 93 { 94 struct mq_inflight *mi = priv; 95 96 if (rq->part && blk_do_io_stat(rq) && 97 (!mi->part->bd_partno || rq->part == mi->part) && 98 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) 99 mi->inflight[rq_data_dir(rq)]++; 100 101 return true; 102 } 103 104 unsigned int blk_mq_in_flight(struct request_queue *q, 105 struct block_device *part) 106 { 107 struct mq_inflight mi = { .part = part }; 108 109 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 110 111 return mi.inflight[0] + mi.inflight[1]; 112 } 113 114 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, 115 unsigned int inflight[2]) 116 { 117 struct mq_inflight mi = { .part = part }; 118 119 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 120 inflight[0] = mi.inflight[0]; 121 inflight[1] = mi.inflight[1]; 122 } 123 124 void blk_freeze_queue_start(struct request_queue *q) 125 { 126 mutex_lock(&q->mq_freeze_lock); 127 if (++q->mq_freeze_depth == 1) { 128 percpu_ref_kill(&q->q_usage_counter); 129 mutex_unlock(&q->mq_freeze_lock); 130 if (queue_is_mq(q)) 131 blk_mq_run_hw_queues(q, false); 132 } else { 133 mutex_unlock(&q->mq_freeze_lock); 134 } 135 } 136 EXPORT_SYMBOL_GPL(blk_freeze_queue_start); 137 138 void blk_mq_freeze_queue_wait(struct request_queue *q) 139 { 140 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); 141 } 142 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait); 143 144 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, 145 unsigned long timeout) 146 { 147 return wait_event_timeout(q->mq_freeze_wq, 148 percpu_ref_is_zero(&q->q_usage_counter), 149 timeout); 150 } 151 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout); 152 153 /* 154 * Guarantee no request is in use, so we can change any data structure of 155 * the queue afterward. 156 */ 157 void blk_freeze_queue(struct request_queue *q) 158 { 159 /* 160 * In the !blk_mq case we are only calling this to kill the 161 * q_usage_counter, otherwise this increases the freeze depth 162 * and waits for it to return to zero. For this reason there is 163 * no blk_unfreeze_queue(), and blk_freeze_queue() is not 164 * exported to drivers as the only user for unfreeze is blk_mq. 165 */ 166 blk_freeze_queue_start(q); 167 blk_mq_freeze_queue_wait(q); 168 } 169 170 void blk_mq_freeze_queue(struct request_queue *q) 171 { 172 /* 173 * ...just an alias to keep freeze and unfreeze actions balanced 174 * in the blk_mq_* namespace 175 */ 176 blk_freeze_queue(q); 177 } 178 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); 179 180 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic) 181 { 182 mutex_lock(&q->mq_freeze_lock); 183 if (force_atomic) 184 q->q_usage_counter.data->force_atomic = true; 185 q->mq_freeze_depth--; 186 WARN_ON_ONCE(q->mq_freeze_depth < 0); 187 if (!q->mq_freeze_depth) { 188 percpu_ref_resurrect(&q->q_usage_counter); 189 wake_up_all(&q->mq_freeze_wq); 190 } 191 mutex_unlock(&q->mq_freeze_lock); 192 } 193 194 void blk_mq_unfreeze_queue(struct request_queue *q) 195 { 196 __blk_mq_unfreeze_queue(q, false); 197 } 198 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); 199 200 /* 201 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the 202 * mpt3sas driver such that this function can be removed. 203 */ 204 void blk_mq_quiesce_queue_nowait(struct request_queue *q) 205 { 206 unsigned long flags; 207 208 spin_lock_irqsave(&q->queue_lock, flags); 209 if (!q->quiesce_depth++) 210 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); 211 spin_unlock_irqrestore(&q->queue_lock, flags); 212 } 213 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); 214 215 /** 216 * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done 217 * @set: tag_set to wait on 218 * 219 * Note: it is driver's responsibility for making sure that quiesce has 220 * been started on or more of the request_queues of the tag_set. This 221 * function only waits for the quiesce on those request_queues that had 222 * the quiesce flag set using blk_mq_quiesce_queue_nowait. 223 */ 224 void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set) 225 { 226 if (set->flags & BLK_MQ_F_BLOCKING) 227 synchronize_srcu(set->srcu); 228 else 229 synchronize_rcu(); 230 } 231 EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done); 232 233 /** 234 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished 235 * @q: request queue. 236 * 237 * Note: this function does not prevent that the struct request end_io() 238 * callback function is invoked. Once this function is returned, we make 239 * sure no dispatch can happen until the queue is unquiesced via 240 * blk_mq_unquiesce_queue(). 241 */ 242 void blk_mq_quiesce_queue(struct request_queue *q) 243 { 244 blk_mq_quiesce_queue_nowait(q); 245 /* nothing to wait for non-mq queues */ 246 if (queue_is_mq(q)) 247 blk_mq_wait_quiesce_done(q->tag_set); 248 } 249 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); 250 251 /* 252 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue() 253 * @q: request queue. 254 * 255 * This function recovers queue into the state before quiescing 256 * which is done by blk_mq_quiesce_queue. 257 */ 258 void blk_mq_unquiesce_queue(struct request_queue *q) 259 { 260 unsigned long flags; 261 bool run_queue = false; 262 263 spin_lock_irqsave(&q->queue_lock, flags); 264 if (WARN_ON_ONCE(q->quiesce_depth <= 0)) { 265 ; 266 } else if (!--q->quiesce_depth) { 267 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); 268 run_queue = true; 269 } 270 spin_unlock_irqrestore(&q->queue_lock, flags); 271 272 /* dispatch requests which are inserted during quiescing */ 273 if (run_queue) 274 blk_mq_run_hw_queues(q, true); 275 } 276 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue); 277 278 void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set) 279 { 280 struct request_queue *q; 281 282 mutex_lock(&set->tag_list_lock); 283 list_for_each_entry(q, &set->tag_list, tag_set_list) { 284 if (!blk_queue_skip_tagset_quiesce(q)) 285 blk_mq_quiesce_queue_nowait(q); 286 } 287 blk_mq_wait_quiesce_done(set); 288 mutex_unlock(&set->tag_list_lock); 289 } 290 EXPORT_SYMBOL_GPL(blk_mq_quiesce_tagset); 291 292 void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set) 293 { 294 struct request_queue *q; 295 296 mutex_lock(&set->tag_list_lock); 297 list_for_each_entry(q, &set->tag_list, tag_set_list) { 298 if (!blk_queue_skip_tagset_quiesce(q)) 299 blk_mq_unquiesce_queue(q); 300 } 301 mutex_unlock(&set->tag_list_lock); 302 } 303 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_tagset); 304 305 void blk_mq_wake_waiters(struct request_queue *q) 306 { 307 struct blk_mq_hw_ctx *hctx; 308 unsigned long i; 309 310 queue_for_each_hw_ctx(q, hctx, i) 311 if (blk_mq_hw_queue_mapped(hctx)) 312 blk_mq_tag_wakeup_all(hctx->tags, true); 313 } 314 315 void blk_rq_init(struct request_queue *q, struct request *rq) 316 { 317 memset(rq, 0, sizeof(*rq)); 318 319 INIT_LIST_HEAD(&rq->queuelist); 320 rq->q = q; 321 rq->__sector = (sector_t) -1; 322 INIT_HLIST_NODE(&rq->hash); 323 RB_CLEAR_NODE(&rq->rb_node); 324 rq->tag = BLK_MQ_NO_TAG; 325 rq->internal_tag = BLK_MQ_NO_TAG; 326 rq->start_time_ns = ktime_get_ns(); 327 rq->part = NULL; 328 blk_crypto_rq_set_defaults(rq); 329 } 330 EXPORT_SYMBOL(blk_rq_init); 331 332 /* Set start and alloc time when the allocated request is actually used */ 333 static inline void blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns) 334 { 335 if (blk_mq_need_time_stamp(rq)) 336 rq->start_time_ns = ktime_get_ns(); 337 else 338 rq->start_time_ns = 0; 339 340 #ifdef CONFIG_BLK_RQ_ALLOC_TIME 341 if (blk_queue_rq_alloc_time(rq->q)) 342 rq->alloc_time_ns = alloc_time_ns ?: rq->start_time_ns; 343 else 344 rq->alloc_time_ns = 0; 345 #endif 346 } 347 348 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, 349 struct blk_mq_tags *tags, unsigned int tag) 350 { 351 struct blk_mq_ctx *ctx = data->ctx; 352 struct blk_mq_hw_ctx *hctx = data->hctx; 353 struct request_queue *q = data->q; 354 struct request *rq = tags->static_rqs[tag]; 355 356 rq->q = q; 357 rq->mq_ctx = ctx; 358 rq->mq_hctx = hctx; 359 rq->cmd_flags = data->cmd_flags; 360 361 if (data->flags & BLK_MQ_REQ_PM) 362 data->rq_flags |= RQF_PM; 363 if (blk_queue_io_stat(q)) 364 data->rq_flags |= RQF_IO_STAT; 365 rq->rq_flags = data->rq_flags; 366 367 if (data->rq_flags & RQF_SCHED_TAGS) { 368 rq->tag = BLK_MQ_NO_TAG; 369 rq->internal_tag = tag; 370 } else { 371 rq->tag = tag; 372 rq->internal_tag = BLK_MQ_NO_TAG; 373 } 374 rq->timeout = 0; 375 376 rq->part = NULL; 377 rq->io_start_time_ns = 0; 378 rq->stats_sectors = 0; 379 rq->nr_phys_segments = 0; 380 #if defined(CONFIG_BLK_DEV_INTEGRITY) 381 rq->nr_integrity_segments = 0; 382 #endif 383 rq->end_io = NULL; 384 rq->end_io_data = NULL; 385 386 blk_crypto_rq_set_defaults(rq); 387 INIT_LIST_HEAD(&rq->queuelist); 388 /* tag was already set */ 389 WRITE_ONCE(rq->deadline, 0); 390 req_ref_set(rq, 1); 391 392 if (rq->rq_flags & RQF_USE_SCHED) { 393 struct elevator_queue *e = data->q->elevator; 394 395 INIT_HLIST_NODE(&rq->hash); 396 RB_CLEAR_NODE(&rq->rb_node); 397 398 if (e->type->ops.prepare_request) 399 e->type->ops.prepare_request(rq); 400 } 401 402 return rq; 403 } 404 405 static inline struct request * 406 __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data) 407 { 408 unsigned int tag, tag_offset; 409 struct blk_mq_tags *tags; 410 struct request *rq; 411 unsigned long tag_mask; 412 int i, nr = 0; 413 414 tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset); 415 if (unlikely(!tag_mask)) 416 return NULL; 417 418 tags = blk_mq_tags_from_data(data); 419 for (i = 0; tag_mask; i++) { 420 if (!(tag_mask & (1UL << i))) 421 continue; 422 tag = tag_offset + i; 423 prefetch(tags->static_rqs[tag]); 424 tag_mask &= ~(1UL << i); 425 rq = blk_mq_rq_ctx_init(data, tags, tag); 426 rq_list_add(data->cached_rq, rq); 427 nr++; 428 } 429 if (!(data->rq_flags & RQF_SCHED_TAGS)) 430 blk_mq_add_active_requests(data->hctx, nr); 431 /* caller already holds a reference, add for remainder */ 432 percpu_ref_get_many(&data->q->q_usage_counter, nr - 1); 433 data->nr_tags -= nr; 434 435 return rq_list_pop(data->cached_rq); 436 } 437 438 static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) 439 { 440 struct request_queue *q = data->q; 441 u64 alloc_time_ns = 0; 442 struct request *rq; 443 unsigned int tag; 444 445 /* alloc_time includes depth and tag waits */ 446 if (blk_queue_rq_alloc_time(q)) 447 alloc_time_ns = ktime_get_ns(); 448 449 if (data->cmd_flags & REQ_NOWAIT) 450 data->flags |= BLK_MQ_REQ_NOWAIT; 451 452 if (q->elevator) { 453 /* 454 * All requests use scheduler tags when an I/O scheduler is 455 * enabled for the queue. 456 */ 457 data->rq_flags |= RQF_SCHED_TAGS; 458 459 /* 460 * Flush/passthrough requests are special and go directly to the 461 * dispatch list. 462 */ 463 if ((data->cmd_flags & REQ_OP_MASK) != REQ_OP_FLUSH && 464 !blk_op_is_passthrough(data->cmd_flags)) { 465 struct elevator_mq_ops *ops = &q->elevator->type->ops; 466 467 WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED); 468 469 data->rq_flags |= RQF_USE_SCHED; 470 if (ops->limit_depth) 471 ops->limit_depth(data->cmd_flags, data); 472 } 473 } 474 475 retry: 476 data->ctx = blk_mq_get_ctx(q); 477 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); 478 if (!(data->rq_flags & RQF_SCHED_TAGS)) 479 blk_mq_tag_busy(data->hctx); 480 481 if (data->flags & BLK_MQ_REQ_RESERVED) 482 data->rq_flags |= RQF_RESV; 483 484 /* 485 * Try batched alloc if we want more than 1 tag. 486 */ 487 if (data->nr_tags > 1) { 488 rq = __blk_mq_alloc_requests_batch(data); 489 if (rq) { 490 blk_mq_rq_time_init(rq, alloc_time_ns); 491 return rq; 492 } 493 data->nr_tags = 1; 494 } 495 496 /* 497 * Waiting allocations only fail because of an inactive hctx. In that 498 * case just retry the hctx assignment and tag allocation as CPU hotplug 499 * should have migrated us to an online CPU by now. 500 */ 501 tag = blk_mq_get_tag(data); 502 if (tag == BLK_MQ_NO_TAG) { 503 if (data->flags & BLK_MQ_REQ_NOWAIT) 504 return NULL; 505 /* 506 * Give up the CPU and sleep for a random short time to 507 * ensure that thread using a realtime scheduling class 508 * are migrated off the CPU, and thus off the hctx that 509 * is going away. 510 */ 511 msleep(3); 512 goto retry; 513 } 514 515 if (!(data->rq_flags & RQF_SCHED_TAGS)) 516 blk_mq_inc_active_requests(data->hctx); 517 rq = blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag); 518 blk_mq_rq_time_init(rq, alloc_time_ns); 519 return rq; 520 } 521 522 static struct request *blk_mq_rq_cache_fill(struct request_queue *q, 523 struct blk_plug *plug, 524 blk_opf_t opf, 525 blk_mq_req_flags_t flags) 526 { 527 struct blk_mq_alloc_data data = { 528 .q = q, 529 .flags = flags, 530 .cmd_flags = opf, 531 .nr_tags = plug->nr_ios, 532 .cached_rq = &plug->cached_rq, 533 }; 534 struct request *rq; 535 536 if (blk_queue_enter(q, flags)) 537 return NULL; 538 539 plug->nr_ios = 1; 540 541 rq = __blk_mq_alloc_requests(&data); 542 if (unlikely(!rq)) 543 blk_queue_exit(q); 544 return rq; 545 } 546 547 static struct request *blk_mq_alloc_cached_request(struct request_queue *q, 548 blk_opf_t opf, 549 blk_mq_req_flags_t flags) 550 { 551 struct blk_plug *plug = current->plug; 552 struct request *rq; 553 554 if (!plug) 555 return NULL; 556 557 if (rq_list_empty(plug->cached_rq)) { 558 if (plug->nr_ios == 1) 559 return NULL; 560 rq = blk_mq_rq_cache_fill(q, plug, opf, flags); 561 if (!rq) 562 return NULL; 563 } else { 564 rq = rq_list_peek(&plug->cached_rq); 565 if (!rq || rq->q != q) 566 return NULL; 567 568 if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type) 569 return NULL; 570 if (op_is_flush(rq->cmd_flags) != op_is_flush(opf)) 571 return NULL; 572 573 plug->cached_rq = rq_list_next(rq); 574 blk_mq_rq_time_init(rq, 0); 575 } 576 577 rq->cmd_flags = opf; 578 INIT_LIST_HEAD(&rq->queuelist); 579 return rq; 580 } 581 582 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf, 583 blk_mq_req_flags_t flags) 584 { 585 struct request *rq; 586 587 rq = blk_mq_alloc_cached_request(q, opf, flags); 588 if (!rq) { 589 struct blk_mq_alloc_data data = { 590 .q = q, 591 .flags = flags, 592 .cmd_flags = opf, 593 .nr_tags = 1, 594 }; 595 int ret; 596 597 ret = blk_queue_enter(q, flags); 598 if (ret) 599 return ERR_PTR(ret); 600 601 rq = __blk_mq_alloc_requests(&data); 602 if (!rq) 603 goto out_queue_exit; 604 } 605 rq->__data_len = 0; 606 rq->__sector = (sector_t) -1; 607 rq->bio = rq->biotail = NULL; 608 return rq; 609 out_queue_exit: 610 blk_queue_exit(q); 611 return ERR_PTR(-EWOULDBLOCK); 612 } 613 EXPORT_SYMBOL(blk_mq_alloc_request); 614 615 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, 616 blk_opf_t opf, blk_mq_req_flags_t flags, unsigned int hctx_idx) 617 { 618 struct blk_mq_alloc_data data = { 619 .q = q, 620 .flags = flags, 621 .cmd_flags = opf, 622 .nr_tags = 1, 623 }; 624 u64 alloc_time_ns = 0; 625 struct request *rq; 626 unsigned int cpu; 627 unsigned int tag; 628 int ret; 629 630 /* alloc_time includes depth and tag waits */ 631 if (blk_queue_rq_alloc_time(q)) 632 alloc_time_ns = ktime_get_ns(); 633 634 /* 635 * If the tag allocator sleeps we could get an allocation for a 636 * different hardware context. No need to complicate the low level 637 * allocator for this for the rare use case of a command tied to 638 * a specific queue. 639 */ 640 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)) || 641 WARN_ON_ONCE(!(flags & BLK_MQ_REQ_RESERVED))) 642 return ERR_PTR(-EINVAL); 643 644 if (hctx_idx >= q->nr_hw_queues) 645 return ERR_PTR(-EIO); 646 647 ret = blk_queue_enter(q, flags); 648 if (ret) 649 return ERR_PTR(ret); 650 651 /* 652 * Check if the hardware context is actually mapped to anything. 653 * If not tell the caller that it should skip this queue. 654 */ 655 ret = -EXDEV; 656 data.hctx = xa_load(&q->hctx_table, hctx_idx); 657 if (!blk_mq_hw_queue_mapped(data.hctx)) 658 goto out_queue_exit; 659 cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); 660 if (cpu >= nr_cpu_ids) 661 goto out_queue_exit; 662 data.ctx = __blk_mq_get_ctx(q, cpu); 663 664 if (q->elevator) 665 data.rq_flags |= RQF_SCHED_TAGS; 666 else 667 blk_mq_tag_busy(data.hctx); 668 669 if (flags & BLK_MQ_REQ_RESERVED) 670 data.rq_flags |= RQF_RESV; 671 672 ret = -EWOULDBLOCK; 673 tag = blk_mq_get_tag(&data); 674 if (tag == BLK_MQ_NO_TAG) 675 goto out_queue_exit; 676 if (!(data.rq_flags & RQF_SCHED_TAGS)) 677 blk_mq_inc_active_requests(data.hctx); 678 rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag); 679 blk_mq_rq_time_init(rq, alloc_time_ns); 680 rq->__data_len = 0; 681 rq->__sector = (sector_t) -1; 682 rq->bio = rq->biotail = NULL; 683 return rq; 684 685 out_queue_exit: 686 blk_queue_exit(q); 687 return ERR_PTR(ret); 688 } 689 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 690 691 static void blk_mq_finish_request(struct request *rq) 692 { 693 struct request_queue *q = rq->q; 694 695 if (rq->rq_flags & RQF_USE_SCHED) { 696 q->elevator->type->ops.finish_request(rq); 697 /* 698 * For postflush request that may need to be 699 * completed twice, we should clear this flag 700 * to avoid double finish_request() on the rq. 701 */ 702 rq->rq_flags &= ~RQF_USE_SCHED; 703 } 704 } 705 706 static void __blk_mq_free_request(struct request *rq) 707 { 708 struct request_queue *q = rq->q; 709 struct blk_mq_ctx *ctx = rq->mq_ctx; 710 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 711 const int sched_tag = rq->internal_tag; 712 713 blk_crypto_free_request(rq); 714 blk_pm_mark_last_busy(rq); 715 rq->mq_hctx = NULL; 716 717 if (rq->tag != BLK_MQ_NO_TAG) { 718 blk_mq_dec_active_requests(hctx); 719 blk_mq_put_tag(hctx->tags, ctx, rq->tag); 720 } 721 if (sched_tag != BLK_MQ_NO_TAG) 722 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag); 723 blk_mq_sched_restart(hctx); 724 blk_queue_exit(q); 725 } 726 727 void blk_mq_free_request(struct request *rq) 728 { 729 struct request_queue *q = rq->q; 730 731 blk_mq_finish_request(rq); 732 733 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) 734 laptop_io_completion(q->disk->bdi); 735 736 rq_qos_done(q, rq); 737 738 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 739 if (req_ref_put_and_test(rq)) 740 __blk_mq_free_request(rq); 741 } 742 EXPORT_SYMBOL_GPL(blk_mq_free_request); 743 744 void blk_mq_free_plug_rqs(struct blk_plug *plug) 745 { 746 struct request *rq; 747 748 while ((rq = rq_list_pop(&plug->cached_rq)) != NULL) 749 blk_mq_free_request(rq); 750 } 751 752 void blk_dump_rq_flags(struct request *rq, char *msg) 753 { 754 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg, 755 rq->q->disk ? rq->q->disk->disk_name : "?", 756 (__force unsigned long long) rq->cmd_flags); 757 758 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 759 (unsigned long long)blk_rq_pos(rq), 760 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 761 printk(KERN_INFO " bio %p, biotail %p, len %u\n", 762 rq->bio, rq->biotail, blk_rq_bytes(rq)); 763 } 764 EXPORT_SYMBOL(blk_dump_rq_flags); 765 766 static void req_bio_endio(struct request *rq, struct bio *bio, 767 unsigned int nbytes, blk_status_t error) 768 { 769 if (unlikely(error)) { 770 bio->bi_status = error; 771 } else if (req_op(rq) == REQ_OP_ZONE_APPEND) { 772 /* 773 * Partial zone append completions cannot be supported as the 774 * BIO fragments may end up not being written sequentially. 775 */ 776 if (bio->bi_iter.bi_size != nbytes) 777 bio->bi_status = BLK_STS_IOERR; 778 else 779 bio->bi_iter.bi_sector = rq->__sector; 780 } 781 782 bio_advance(bio, nbytes); 783 784 if (unlikely(rq->rq_flags & RQF_QUIET)) 785 bio_set_flag(bio, BIO_QUIET); 786 /* don't actually finish bio if it's part of flush sequence */ 787 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) 788 bio_endio(bio); 789 } 790 791 static void blk_account_io_completion(struct request *req, unsigned int bytes) 792 { 793 if (req->part && blk_do_io_stat(req)) { 794 const int sgrp = op_stat_group(req_op(req)); 795 796 part_stat_lock(); 797 part_stat_add(req->part, sectors[sgrp], bytes >> 9); 798 part_stat_unlock(); 799 } 800 } 801 802 static void blk_print_req_error(struct request *req, blk_status_t status) 803 { 804 printk_ratelimited(KERN_ERR 805 "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x " 806 "phys_seg %u prio class %u\n", 807 blk_status_to_str(status), 808 req->q->disk ? req->q->disk->disk_name : "?", 809 blk_rq_pos(req), (__force u32)req_op(req), 810 blk_op_str(req_op(req)), 811 (__force u32)(req->cmd_flags & ~REQ_OP_MASK), 812 req->nr_phys_segments, 813 IOPRIO_PRIO_CLASS(req->ioprio)); 814 } 815 816 /* 817 * Fully end IO on a request. Does not support partial completions, or 818 * errors. 819 */ 820 static void blk_complete_request(struct request *req) 821 { 822 const bool is_flush = (req->rq_flags & RQF_FLUSH_SEQ) != 0; 823 int total_bytes = blk_rq_bytes(req); 824 struct bio *bio = req->bio; 825 826 trace_block_rq_complete(req, BLK_STS_OK, total_bytes); 827 828 if (!bio) 829 return; 830 831 #ifdef CONFIG_BLK_DEV_INTEGRITY 832 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ) 833 req->q->integrity.profile->complete_fn(req, total_bytes); 834 #endif 835 836 /* 837 * Upper layers may call blk_crypto_evict_key() anytime after the last 838 * bio_endio(). Therefore, the keyslot must be released before that. 839 */ 840 blk_crypto_rq_put_keyslot(req); 841 842 blk_account_io_completion(req, total_bytes); 843 844 do { 845 struct bio *next = bio->bi_next; 846 847 /* Completion has already been traced */ 848 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 849 850 if (req_op(req) == REQ_OP_ZONE_APPEND) 851 bio->bi_iter.bi_sector = req->__sector; 852 853 if (!is_flush) 854 bio_endio(bio); 855 bio = next; 856 } while (bio); 857 858 /* 859 * Reset counters so that the request stacking driver 860 * can find how many bytes remain in the request 861 * later. 862 */ 863 if (!req->end_io) { 864 req->bio = NULL; 865 req->__data_len = 0; 866 } 867 } 868 869 /** 870 * blk_update_request - Complete multiple bytes without completing the request 871 * @req: the request being processed 872 * @error: block status code 873 * @nr_bytes: number of bytes to complete for @req 874 * 875 * Description: 876 * Ends I/O on a number of bytes attached to @req, but doesn't complete 877 * the request structure even if @req doesn't have leftover. 878 * If @req has leftover, sets it up for the next range of segments. 879 * 880 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 881 * %false return from this function. 882 * 883 * Note: 884 * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function 885 * except in the consistency check at the end of this function. 886 * 887 * Return: 888 * %false - this request doesn't have any more data 889 * %true - this request has more data 890 **/ 891 bool blk_update_request(struct request *req, blk_status_t error, 892 unsigned int nr_bytes) 893 { 894 int total_bytes; 895 896 trace_block_rq_complete(req, error, nr_bytes); 897 898 if (!req->bio) 899 return false; 900 901 #ifdef CONFIG_BLK_DEV_INTEGRITY 902 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && 903 error == BLK_STS_OK) 904 req->q->integrity.profile->complete_fn(req, nr_bytes); 905 #endif 906 907 /* 908 * Upper layers may call blk_crypto_evict_key() anytime after the last 909 * bio_endio(). Therefore, the keyslot must be released before that. 910 */ 911 if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req)) 912 __blk_crypto_rq_put_keyslot(req); 913 914 if (unlikely(error && !blk_rq_is_passthrough(req) && 915 !(req->rq_flags & RQF_QUIET)) && 916 !test_bit(GD_DEAD, &req->q->disk->state)) { 917 blk_print_req_error(req, error); 918 trace_block_rq_error(req, error, nr_bytes); 919 } 920 921 blk_account_io_completion(req, nr_bytes); 922 923 total_bytes = 0; 924 while (req->bio) { 925 struct bio *bio = req->bio; 926 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); 927 928 if (bio_bytes == bio->bi_iter.bi_size) 929 req->bio = bio->bi_next; 930 931 /* Completion has already been traced */ 932 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 933 req_bio_endio(req, bio, bio_bytes, error); 934 935 total_bytes += bio_bytes; 936 nr_bytes -= bio_bytes; 937 938 if (!nr_bytes) 939 break; 940 } 941 942 /* 943 * completely done 944 */ 945 if (!req->bio) { 946 /* 947 * Reset counters so that the request stacking driver 948 * can find how many bytes remain in the request 949 * later. 950 */ 951 req->__data_len = 0; 952 return false; 953 } 954 955 req->__data_len -= total_bytes; 956 957 /* update sector only for requests with clear definition of sector */ 958 if (!blk_rq_is_passthrough(req)) 959 req->__sector += total_bytes >> 9; 960 961 /* mixed attributes always follow the first bio */ 962 if (req->rq_flags & RQF_MIXED_MERGE) { 963 req->cmd_flags &= ~REQ_FAILFAST_MASK; 964 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; 965 } 966 967 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) { 968 /* 969 * If total number of sectors is less than the first segment 970 * size, something has gone terribly wrong. 971 */ 972 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 973 blk_dump_rq_flags(req, "request botched"); 974 req->__data_len = blk_rq_cur_bytes(req); 975 } 976 977 /* recalculate the number of segments */ 978 req->nr_phys_segments = blk_recalc_rq_segments(req); 979 } 980 981 return true; 982 } 983 EXPORT_SYMBOL_GPL(blk_update_request); 984 985 static inline void blk_account_io_done(struct request *req, u64 now) 986 { 987 trace_block_io_done(req); 988 989 /* 990 * Account IO completion. flush_rq isn't accounted as a 991 * normal IO on queueing nor completion. Accounting the 992 * containing request is enough. 993 */ 994 if (blk_do_io_stat(req) && req->part && 995 !(req->rq_flags & RQF_FLUSH_SEQ)) { 996 const int sgrp = op_stat_group(req_op(req)); 997 998 part_stat_lock(); 999 update_io_ticks(req->part, jiffies, true); 1000 part_stat_inc(req->part, ios[sgrp]); 1001 part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns); 1002 part_stat_unlock(); 1003 } 1004 } 1005 1006 static inline void blk_account_io_start(struct request *req) 1007 { 1008 trace_block_io_start(req); 1009 1010 if (blk_do_io_stat(req)) { 1011 /* 1012 * All non-passthrough requests are created from a bio with one 1013 * exception: when a flush command that is part of a flush sequence 1014 * generated by the state machine in blk-flush.c is cloned onto the 1015 * lower device by dm-multipath we can get here without a bio. 1016 */ 1017 if (req->bio) 1018 req->part = req->bio->bi_bdev; 1019 else 1020 req->part = req->q->disk->part0; 1021 1022 part_stat_lock(); 1023 update_io_ticks(req->part, jiffies, false); 1024 part_stat_unlock(); 1025 } 1026 } 1027 1028 static inline void __blk_mq_end_request_acct(struct request *rq, u64 now) 1029 { 1030 if (rq->rq_flags & RQF_STATS) 1031 blk_stat_add(rq, now); 1032 1033 blk_mq_sched_completed_request(rq, now); 1034 blk_account_io_done(rq, now); 1035 } 1036 1037 inline void __blk_mq_end_request(struct request *rq, blk_status_t error) 1038 { 1039 if (blk_mq_need_time_stamp(rq)) 1040 __blk_mq_end_request_acct(rq, ktime_get_ns()); 1041 1042 blk_mq_finish_request(rq); 1043 1044 if (rq->end_io) { 1045 rq_qos_done(rq->q, rq); 1046 if (rq->end_io(rq, error) == RQ_END_IO_FREE) 1047 blk_mq_free_request(rq); 1048 } else { 1049 blk_mq_free_request(rq); 1050 } 1051 } 1052 EXPORT_SYMBOL(__blk_mq_end_request); 1053 1054 void blk_mq_end_request(struct request *rq, blk_status_t error) 1055 { 1056 if (blk_update_request(rq, error, blk_rq_bytes(rq))) 1057 BUG(); 1058 __blk_mq_end_request(rq, error); 1059 } 1060 EXPORT_SYMBOL(blk_mq_end_request); 1061 1062 #define TAG_COMP_BATCH 32 1063 1064 static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx, 1065 int *tag_array, int nr_tags) 1066 { 1067 struct request_queue *q = hctx->queue; 1068 1069 blk_mq_sub_active_requests(hctx, nr_tags); 1070 1071 blk_mq_put_tags(hctx->tags, tag_array, nr_tags); 1072 percpu_ref_put_many(&q->q_usage_counter, nr_tags); 1073 } 1074 1075 void blk_mq_end_request_batch(struct io_comp_batch *iob) 1076 { 1077 int tags[TAG_COMP_BATCH], nr_tags = 0; 1078 struct blk_mq_hw_ctx *cur_hctx = NULL; 1079 struct request *rq; 1080 u64 now = 0; 1081 1082 if (iob->need_ts) 1083 now = ktime_get_ns(); 1084 1085 while ((rq = rq_list_pop(&iob->req_list)) != NULL) { 1086 prefetch(rq->bio); 1087 prefetch(rq->rq_next); 1088 1089 blk_complete_request(rq); 1090 if (iob->need_ts) 1091 __blk_mq_end_request_acct(rq, now); 1092 1093 blk_mq_finish_request(rq); 1094 1095 rq_qos_done(rq->q, rq); 1096 1097 /* 1098 * If end_io handler returns NONE, then it still has 1099 * ownership of the request. 1100 */ 1101 if (rq->end_io && rq->end_io(rq, 0) == RQ_END_IO_NONE) 1102 continue; 1103 1104 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 1105 if (!req_ref_put_and_test(rq)) 1106 continue; 1107 1108 blk_crypto_free_request(rq); 1109 blk_pm_mark_last_busy(rq); 1110 1111 if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) { 1112 if (cur_hctx) 1113 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags); 1114 nr_tags = 0; 1115 cur_hctx = rq->mq_hctx; 1116 } 1117 tags[nr_tags++] = rq->tag; 1118 } 1119 1120 if (nr_tags) 1121 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags); 1122 } 1123 EXPORT_SYMBOL_GPL(blk_mq_end_request_batch); 1124 1125 static void blk_complete_reqs(struct llist_head *list) 1126 { 1127 struct llist_node *entry = llist_reverse_order(llist_del_all(list)); 1128 struct request *rq, *next; 1129 1130 llist_for_each_entry_safe(rq, next, entry, ipi_list) 1131 rq->q->mq_ops->complete(rq); 1132 } 1133 1134 static __latent_entropy void blk_done_softirq(struct softirq_action *h) 1135 { 1136 blk_complete_reqs(this_cpu_ptr(&blk_cpu_done)); 1137 } 1138 1139 static int blk_softirq_cpu_dead(unsigned int cpu) 1140 { 1141 blk_complete_reqs(&per_cpu(blk_cpu_done, cpu)); 1142 return 0; 1143 } 1144 1145 static void __blk_mq_complete_request_remote(void *data) 1146 { 1147 __raise_softirq_irqoff(BLOCK_SOFTIRQ); 1148 } 1149 1150 static inline bool blk_mq_complete_need_ipi(struct request *rq) 1151 { 1152 int cpu = raw_smp_processor_id(); 1153 1154 if (!IS_ENABLED(CONFIG_SMP) || 1155 !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) 1156 return false; 1157 /* 1158 * With force threaded interrupts enabled, raising softirq from an SMP 1159 * function call will always result in waking the ksoftirqd thread. 1160 * This is probably worse than completing the request on a different 1161 * cache domain. 1162 */ 1163 if (force_irqthreads()) 1164 return false; 1165 1166 /* same CPU or cache domain? Complete locally */ 1167 if (cpu == rq->mq_ctx->cpu || 1168 (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) && 1169 cpus_share_cache(cpu, rq->mq_ctx->cpu))) 1170 return false; 1171 1172 /* don't try to IPI to an offline CPU */ 1173 return cpu_online(rq->mq_ctx->cpu); 1174 } 1175 1176 static void blk_mq_complete_send_ipi(struct request *rq) 1177 { 1178 unsigned int cpu; 1179 1180 cpu = rq->mq_ctx->cpu; 1181 if (llist_add(&rq->ipi_list, &per_cpu(blk_cpu_done, cpu))) 1182 smp_call_function_single_async(cpu, &per_cpu(blk_cpu_csd, cpu)); 1183 } 1184 1185 static void blk_mq_raise_softirq(struct request *rq) 1186 { 1187 struct llist_head *list; 1188 1189 preempt_disable(); 1190 list = this_cpu_ptr(&blk_cpu_done); 1191 if (llist_add(&rq->ipi_list, list)) 1192 raise_softirq(BLOCK_SOFTIRQ); 1193 preempt_enable(); 1194 } 1195 1196 bool blk_mq_complete_request_remote(struct request *rq) 1197 { 1198 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); 1199 1200 /* 1201 * For request which hctx has only one ctx mapping, 1202 * or a polled request, always complete locally, 1203 * it's pointless to redirect the completion. 1204 */ 1205 if ((rq->mq_hctx->nr_ctx == 1 && 1206 rq->mq_ctx->cpu == raw_smp_processor_id()) || 1207 rq->cmd_flags & REQ_POLLED) 1208 return false; 1209 1210 if (blk_mq_complete_need_ipi(rq)) { 1211 blk_mq_complete_send_ipi(rq); 1212 return true; 1213 } 1214 1215 if (rq->q->nr_hw_queues == 1) { 1216 blk_mq_raise_softirq(rq); 1217 return true; 1218 } 1219 return false; 1220 } 1221 EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote); 1222 1223 /** 1224 * blk_mq_complete_request - end I/O on a request 1225 * @rq: the request being processed 1226 * 1227 * Description: 1228 * Complete a request by scheduling the ->complete_rq operation. 1229 **/ 1230 void blk_mq_complete_request(struct request *rq) 1231 { 1232 if (!blk_mq_complete_request_remote(rq)) 1233 rq->q->mq_ops->complete(rq); 1234 } 1235 EXPORT_SYMBOL(blk_mq_complete_request); 1236 1237 /** 1238 * blk_mq_start_request - Start processing a request 1239 * @rq: Pointer to request to be started 1240 * 1241 * Function used by device drivers to notify the block layer that a request 1242 * is going to be processed now, so blk layer can do proper initializations 1243 * such as starting the timeout timer. 1244 */ 1245 void blk_mq_start_request(struct request *rq) 1246 { 1247 struct request_queue *q = rq->q; 1248 1249 trace_block_rq_issue(rq); 1250 1251 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { 1252 rq->io_start_time_ns = ktime_get_ns(); 1253 rq->stats_sectors = blk_rq_sectors(rq); 1254 rq->rq_flags |= RQF_STATS; 1255 rq_qos_issue(q, rq); 1256 } 1257 1258 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE); 1259 1260 blk_add_timer(rq); 1261 WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT); 1262 rq->mq_hctx->tags->rqs[rq->tag] = rq; 1263 1264 #ifdef CONFIG_BLK_DEV_INTEGRITY 1265 if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE) 1266 q->integrity.profile->prepare_fn(rq); 1267 #endif 1268 if (rq->bio && rq->bio->bi_opf & REQ_POLLED) 1269 WRITE_ONCE(rq->bio->bi_cookie, rq->mq_hctx->queue_num); 1270 } 1271 EXPORT_SYMBOL(blk_mq_start_request); 1272 1273 /* 1274 * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple 1275 * queues. This is important for md arrays to benefit from merging 1276 * requests. 1277 */ 1278 static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug) 1279 { 1280 if (plug->multiple_queues) 1281 return BLK_MAX_REQUEST_COUNT * 2; 1282 return BLK_MAX_REQUEST_COUNT; 1283 } 1284 1285 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) 1286 { 1287 struct request *last = rq_list_peek(&plug->mq_list); 1288 1289 if (!plug->rq_count) { 1290 trace_block_plug(rq->q); 1291 } else if (plug->rq_count >= blk_plug_max_rq_count(plug) || 1292 (!blk_queue_nomerges(rq->q) && 1293 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { 1294 blk_mq_flush_plug_list(plug, false); 1295 last = NULL; 1296 trace_block_plug(rq->q); 1297 } 1298 1299 if (!plug->multiple_queues && last && last->q != rq->q) 1300 plug->multiple_queues = true; 1301 /* 1302 * Any request allocated from sched tags can't be issued to 1303 * ->queue_rqs() directly 1304 */ 1305 if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS)) 1306 plug->has_elevator = true; 1307 rq->rq_next = NULL; 1308 rq_list_add(&plug->mq_list, rq); 1309 plug->rq_count++; 1310 } 1311 1312 /** 1313 * blk_execute_rq_nowait - insert a request to I/O scheduler for execution 1314 * @rq: request to insert 1315 * @at_head: insert request at head or tail of queue 1316 * 1317 * Description: 1318 * Insert a fully prepared request at the back of the I/O scheduler queue 1319 * for execution. Don't wait for completion. 1320 * 1321 * Note: 1322 * This function will invoke @done directly if the queue is dead. 1323 */ 1324 void blk_execute_rq_nowait(struct request *rq, bool at_head) 1325 { 1326 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 1327 1328 WARN_ON(irqs_disabled()); 1329 WARN_ON(!blk_rq_is_passthrough(rq)); 1330 1331 blk_account_io_start(rq); 1332 1333 /* 1334 * As plugging can be enabled for passthrough requests on a zoned 1335 * device, directly accessing the plug instead of using blk_mq_plug() 1336 * should not have any consequences. 1337 */ 1338 if (current->plug && !at_head) { 1339 blk_add_rq_to_plug(current->plug, rq); 1340 return; 1341 } 1342 1343 blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0); 1344 blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING); 1345 } 1346 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); 1347 1348 struct blk_rq_wait { 1349 struct completion done; 1350 blk_status_t ret; 1351 }; 1352 1353 static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret) 1354 { 1355 struct blk_rq_wait *wait = rq->end_io_data; 1356 1357 wait->ret = ret; 1358 complete(&wait->done); 1359 return RQ_END_IO_NONE; 1360 } 1361 1362 bool blk_rq_is_poll(struct request *rq) 1363 { 1364 if (!rq->mq_hctx) 1365 return false; 1366 if (rq->mq_hctx->type != HCTX_TYPE_POLL) 1367 return false; 1368 return true; 1369 } 1370 EXPORT_SYMBOL_GPL(blk_rq_is_poll); 1371 1372 static void blk_rq_poll_completion(struct request *rq, struct completion *wait) 1373 { 1374 do { 1375 blk_hctx_poll(rq->q, rq->mq_hctx, NULL, 0); 1376 cond_resched(); 1377 } while (!completion_done(wait)); 1378 } 1379 1380 /** 1381 * blk_execute_rq - insert a request into queue for execution 1382 * @rq: request to insert 1383 * @at_head: insert request at head or tail of queue 1384 * 1385 * Description: 1386 * Insert a fully prepared request at the back of the I/O scheduler queue 1387 * for execution and wait for completion. 1388 * Return: The blk_status_t result provided to blk_mq_end_request(). 1389 */ 1390 blk_status_t blk_execute_rq(struct request *rq, bool at_head) 1391 { 1392 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 1393 struct blk_rq_wait wait = { 1394 .done = COMPLETION_INITIALIZER_ONSTACK(wait.done), 1395 }; 1396 1397 WARN_ON(irqs_disabled()); 1398 WARN_ON(!blk_rq_is_passthrough(rq)); 1399 1400 rq->end_io_data = &wait; 1401 rq->end_io = blk_end_sync_rq; 1402 1403 blk_account_io_start(rq); 1404 blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0); 1405 blk_mq_run_hw_queue(hctx, false); 1406 1407 if (blk_rq_is_poll(rq)) { 1408 blk_rq_poll_completion(rq, &wait.done); 1409 } else { 1410 /* 1411 * Prevent hang_check timer from firing at us during very long 1412 * I/O 1413 */ 1414 unsigned long hang_check = sysctl_hung_task_timeout_secs; 1415 1416 if (hang_check) 1417 while (!wait_for_completion_io_timeout(&wait.done, 1418 hang_check * (HZ/2))) 1419 ; 1420 else 1421 wait_for_completion_io(&wait.done); 1422 } 1423 1424 return wait.ret; 1425 } 1426 EXPORT_SYMBOL(blk_execute_rq); 1427 1428 static void __blk_mq_requeue_request(struct request *rq) 1429 { 1430 struct request_queue *q = rq->q; 1431 1432 blk_mq_put_driver_tag(rq); 1433 1434 trace_block_rq_requeue(rq); 1435 rq_qos_requeue(q, rq); 1436 1437 if (blk_mq_request_started(rq)) { 1438 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 1439 rq->rq_flags &= ~RQF_TIMED_OUT; 1440 } 1441 } 1442 1443 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) 1444 { 1445 struct request_queue *q = rq->q; 1446 unsigned long flags; 1447 1448 __blk_mq_requeue_request(rq); 1449 1450 /* this request will be re-inserted to io scheduler queue */ 1451 blk_mq_sched_requeue_request(rq); 1452 1453 spin_lock_irqsave(&q->requeue_lock, flags); 1454 list_add_tail(&rq->queuelist, &q->requeue_list); 1455 spin_unlock_irqrestore(&q->requeue_lock, flags); 1456 1457 if (kick_requeue_list) 1458 blk_mq_kick_requeue_list(q); 1459 } 1460 EXPORT_SYMBOL(blk_mq_requeue_request); 1461 1462 static void blk_mq_requeue_work(struct work_struct *work) 1463 { 1464 struct request_queue *q = 1465 container_of(work, struct request_queue, requeue_work.work); 1466 LIST_HEAD(rq_list); 1467 LIST_HEAD(flush_list); 1468 struct request *rq; 1469 1470 spin_lock_irq(&q->requeue_lock); 1471 list_splice_init(&q->requeue_list, &rq_list); 1472 list_splice_init(&q->flush_list, &flush_list); 1473 spin_unlock_irq(&q->requeue_lock); 1474 1475 while (!list_empty(&rq_list)) { 1476 rq = list_entry(rq_list.next, struct request, queuelist); 1477 /* 1478 * If RQF_DONTPREP ist set, the request has been started by the 1479 * driver already and might have driver-specific data allocated 1480 * already. Insert it into the hctx dispatch list to avoid 1481 * block layer merges for the request. 1482 */ 1483 if (rq->rq_flags & RQF_DONTPREP) { 1484 list_del_init(&rq->queuelist); 1485 blk_mq_request_bypass_insert(rq, 0); 1486 } else { 1487 list_del_init(&rq->queuelist); 1488 blk_mq_insert_request(rq, BLK_MQ_INSERT_AT_HEAD); 1489 } 1490 } 1491 1492 while (!list_empty(&flush_list)) { 1493 rq = list_entry(flush_list.next, struct request, queuelist); 1494 list_del_init(&rq->queuelist); 1495 blk_mq_insert_request(rq, 0); 1496 } 1497 1498 blk_mq_run_hw_queues(q, false); 1499 } 1500 1501 void blk_mq_kick_requeue_list(struct request_queue *q) 1502 { 1503 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0); 1504 } 1505 EXPORT_SYMBOL(blk_mq_kick_requeue_list); 1506 1507 void blk_mq_delay_kick_requeue_list(struct request_queue *q, 1508 unsigned long msecs) 1509 { 1510 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 1511 msecs_to_jiffies(msecs)); 1512 } 1513 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); 1514 1515 static bool blk_is_flush_data_rq(struct request *rq) 1516 { 1517 return (rq->rq_flags & RQF_FLUSH_SEQ) && !is_flush_rq(rq); 1518 } 1519 1520 static bool blk_mq_rq_inflight(struct request *rq, void *priv) 1521 { 1522 /* 1523 * If we find a request that isn't idle we know the queue is busy 1524 * as it's checked in the iter. 1525 * Return false to stop the iteration. 1526 * 1527 * In case of queue quiesce, if one flush data request is completed, 1528 * don't count it as inflight given the flush sequence is suspended, 1529 * and the original flush data request is invisible to driver, just 1530 * like other pending requests because of quiesce 1531 */ 1532 if (blk_mq_request_started(rq) && !(blk_queue_quiesced(rq->q) && 1533 blk_is_flush_data_rq(rq) && 1534 blk_mq_request_completed(rq))) { 1535 bool *busy = priv; 1536 1537 *busy = true; 1538 return false; 1539 } 1540 1541 return true; 1542 } 1543 1544 bool blk_mq_queue_inflight(struct request_queue *q) 1545 { 1546 bool busy = false; 1547 1548 blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy); 1549 return busy; 1550 } 1551 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight); 1552 1553 static void blk_mq_rq_timed_out(struct request *req) 1554 { 1555 req->rq_flags |= RQF_TIMED_OUT; 1556 if (req->q->mq_ops->timeout) { 1557 enum blk_eh_timer_return ret; 1558 1559 ret = req->q->mq_ops->timeout(req); 1560 if (ret == BLK_EH_DONE) 1561 return; 1562 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER); 1563 } 1564 1565 blk_add_timer(req); 1566 } 1567 1568 struct blk_expired_data { 1569 bool has_timedout_rq; 1570 unsigned long next; 1571 unsigned long timeout_start; 1572 }; 1573 1574 static bool blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired) 1575 { 1576 unsigned long deadline; 1577 1578 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT) 1579 return false; 1580 if (rq->rq_flags & RQF_TIMED_OUT) 1581 return false; 1582 1583 deadline = READ_ONCE(rq->deadline); 1584 if (time_after_eq(expired->timeout_start, deadline)) 1585 return true; 1586 1587 if (expired->next == 0) 1588 expired->next = deadline; 1589 else if (time_after(expired->next, deadline)) 1590 expired->next = deadline; 1591 return false; 1592 } 1593 1594 void blk_mq_put_rq_ref(struct request *rq) 1595 { 1596 if (is_flush_rq(rq)) { 1597 if (rq->end_io(rq, 0) == RQ_END_IO_FREE) 1598 blk_mq_free_request(rq); 1599 } else if (req_ref_put_and_test(rq)) { 1600 __blk_mq_free_request(rq); 1601 } 1602 } 1603 1604 static bool blk_mq_check_expired(struct request *rq, void *priv) 1605 { 1606 struct blk_expired_data *expired = priv; 1607 1608 /* 1609 * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot 1610 * be reallocated underneath the timeout handler's processing, then 1611 * the expire check is reliable. If the request is not expired, then 1612 * it was completed and reallocated as a new request after returning 1613 * from blk_mq_check_expired(). 1614 */ 1615 if (blk_mq_req_expired(rq, expired)) { 1616 expired->has_timedout_rq = true; 1617 return false; 1618 } 1619 return true; 1620 } 1621 1622 static bool blk_mq_handle_expired(struct request *rq, void *priv) 1623 { 1624 struct blk_expired_data *expired = priv; 1625 1626 if (blk_mq_req_expired(rq, expired)) 1627 blk_mq_rq_timed_out(rq); 1628 return true; 1629 } 1630 1631 static void blk_mq_timeout_work(struct work_struct *work) 1632 { 1633 struct request_queue *q = 1634 container_of(work, struct request_queue, timeout_work); 1635 struct blk_expired_data expired = { 1636 .timeout_start = jiffies, 1637 }; 1638 struct blk_mq_hw_ctx *hctx; 1639 unsigned long i; 1640 1641 /* A deadlock might occur if a request is stuck requiring a 1642 * timeout at the same time a queue freeze is waiting 1643 * completion, since the timeout code would not be able to 1644 * acquire the queue reference here. 1645 * 1646 * That's why we don't use blk_queue_enter here; instead, we use 1647 * percpu_ref_tryget directly, because we need to be able to 1648 * obtain a reference even in the short window between the queue 1649 * starting to freeze, by dropping the first reference in 1650 * blk_freeze_queue_start, and the moment the last request is 1651 * consumed, marked by the instant q_usage_counter reaches 1652 * zero. 1653 */ 1654 if (!percpu_ref_tryget(&q->q_usage_counter)) 1655 return; 1656 1657 /* check if there is any timed-out request */ 1658 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &expired); 1659 if (expired.has_timedout_rq) { 1660 /* 1661 * Before walking tags, we must ensure any submit started 1662 * before the current time has finished. Since the submit 1663 * uses srcu or rcu, wait for a synchronization point to 1664 * ensure all running submits have finished 1665 */ 1666 blk_mq_wait_quiesce_done(q->tag_set); 1667 1668 expired.next = 0; 1669 blk_mq_queue_tag_busy_iter(q, blk_mq_handle_expired, &expired); 1670 } 1671 1672 if (expired.next != 0) { 1673 mod_timer(&q->timeout, expired.next); 1674 } else { 1675 /* 1676 * Request timeouts are handled as a forward rolling timer. If 1677 * we end up here it means that no requests are pending and 1678 * also that no request has been pending for a while. Mark 1679 * each hctx as idle. 1680 */ 1681 queue_for_each_hw_ctx(q, hctx, i) { 1682 /* the hctx may be unmapped, so check it here */ 1683 if (blk_mq_hw_queue_mapped(hctx)) 1684 blk_mq_tag_idle(hctx); 1685 } 1686 } 1687 blk_queue_exit(q); 1688 } 1689 1690 struct flush_busy_ctx_data { 1691 struct blk_mq_hw_ctx *hctx; 1692 struct list_head *list; 1693 }; 1694 1695 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data) 1696 { 1697 struct flush_busy_ctx_data *flush_data = data; 1698 struct blk_mq_hw_ctx *hctx = flush_data->hctx; 1699 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 1700 enum hctx_type type = hctx->type; 1701 1702 spin_lock(&ctx->lock); 1703 list_splice_tail_init(&ctx->rq_lists[type], flush_data->list); 1704 sbitmap_clear_bit(sb, bitnr); 1705 spin_unlock(&ctx->lock); 1706 return true; 1707 } 1708 1709 /* 1710 * Process software queues that have been marked busy, splicing them 1711 * to the for-dispatch 1712 */ 1713 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) 1714 { 1715 struct flush_busy_ctx_data data = { 1716 .hctx = hctx, 1717 .list = list, 1718 }; 1719 1720 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data); 1721 } 1722 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs); 1723 1724 struct dispatch_rq_data { 1725 struct blk_mq_hw_ctx *hctx; 1726 struct request *rq; 1727 }; 1728 1729 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr, 1730 void *data) 1731 { 1732 struct dispatch_rq_data *dispatch_data = data; 1733 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; 1734 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; 1735 enum hctx_type type = hctx->type; 1736 1737 spin_lock(&ctx->lock); 1738 if (!list_empty(&ctx->rq_lists[type])) { 1739 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next); 1740 list_del_init(&dispatch_data->rq->queuelist); 1741 if (list_empty(&ctx->rq_lists[type])) 1742 sbitmap_clear_bit(sb, bitnr); 1743 } 1744 spin_unlock(&ctx->lock); 1745 1746 return !dispatch_data->rq; 1747 } 1748 1749 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 1750 struct blk_mq_ctx *start) 1751 { 1752 unsigned off = start ? start->index_hw[hctx->type] : 0; 1753 struct dispatch_rq_data data = { 1754 .hctx = hctx, 1755 .rq = NULL, 1756 }; 1757 1758 __sbitmap_for_each_set(&hctx->ctx_map, off, 1759 dispatch_rq_from_ctx, &data); 1760 1761 return data.rq; 1762 } 1763 1764 bool __blk_mq_alloc_driver_tag(struct request *rq) 1765 { 1766 struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags; 1767 unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags; 1768 int tag; 1769 1770 blk_mq_tag_busy(rq->mq_hctx); 1771 1772 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) { 1773 bt = &rq->mq_hctx->tags->breserved_tags; 1774 tag_offset = 0; 1775 } else { 1776 if (!hctx_may_queue(rq->mq_hctx, bt)) 1777 return false; 1778 } 1779 1780 tag = __sbitmap_queue_get(bt); 1781 if (tag == BLK_MQ_NO_TAG) 1782 return false; 1783 1784 rq->tag = tag + tag_offset; 1785 blk_mq_inc_active_requests(rq->mq_hctx); 1786 return true; 1787 } 1788 1789 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, 1790 int flags, void *key) 1791 { 1792 struct blk_mq_hw_ctx *hctx; 1793 1794 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); 1795 1796 spin_lock(&hctx->dispatch_wait_lock); 1797 if (!list_empty(&wait->entry)) { 1798 struct sbitmap_queue *sbq; 1799 1800 list_del_init(&wait->entry); 1801 sbq = &hctx->tags->bitmap_tags; 1802 atomic_dec(&sbq->ws_active); 1803 } 1804 spin_unlock(&hctx->dispatch_wait_lock); 1805 1806 blk_mq_run_hw_queue(hctx, true); 1807 return 1; 1808 } 1809 1810 /* 1811 * Mark us waiting for a tag. For shared tags, this involves hooking us into 1812 * the tag wakeups. For non-shared tags, we can simply mark us needing a 1813 * restart. For both cases, take care to check the condition again after 1814 * marking us as waiting. 1815 */ 1816 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, 1817 struct request *rq) 1818 { 1819 struct sbitmap_queue *sbq; 1820 struct wait_queue_head *wq; 1821 wait_queue_entry_t *wait; 1822 bool ret; 1823 1824 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) && 1825 !(blk_mq_is_shared_tags(hctx->flags))) { 1826 blk_mq_sched_mark_restart_hctx(hctx); 1827 1828 /* 1829 * It's possible that a tag was freed in the window between the 1830 * allocation failure and adding the hardware queue to the wait 1831 * queue. 1832 * 1833 * Don't clear RESTART here, someone else could have set it. 1834 * At most this will cost an extra queue run. 1835 */ 1836 return blk_mq_get_driver_tag(rq); 1837 } 1838 1839 wait = &hctx->dispatch_wait; 1840 if (!list_empty_careful(&wait->entry)) 1841 return false; 1842 1843 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) 1844 sbq = &hctx->tags->breserved_tags; 1845 else 1846 sbq = &hctx->tags->bitmap_tags; 1847 wq = &bt_wait_ptr(sbq, hctx)->wait; 1848 1849 spin_lock_irq(&wq->lock); 1850 spin_lock(&hctx->dispatch_wait_lock); 1851 if (!list_empty(&wait->entry)) { 1852 spin_unlock(&hctx->dispatch_wait_lock); 1853 spin_unlock_irq(&wq->lock); 1854 return false; 1855 } 1856 1857 atomic_inc(&sbq->ws_active); 1858 wait->flags &= ~WQ_FLAG_EXCLUSIVE; 1859 __add_wait_queue(wq, wait); 1860 1861 /* 1862 * It's possible that a tag was freed in the window between the 1863 * allocation failure and adding the hardware queue to the wait 1864 * queue. 1865 */ 1866 ret = blk_mq_get_driver_tag(rq); 1867 if (!ret) { 1868 spin_unlock(&hctx->dispatch_wait_lock); 1869 spin_unlock_irq(&wq->lock); 1870 return false; 1871 } 1872 1873 /* 1874 * We got a tag, remove ourselves from the wait queue to ensure 1875 * someone else gets the wakeup. 1876 */ 1877 list_del_init(&wait->entry); 1878 atomic_dec(&sbq->ws_active); 1879 spin_unlock(&hctx->dispatch_wait_lock); 1880 spin_unlock_irq(&wq->lock); 1881 1882 return true; 1883 } 1884 1885 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT 8 1886 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR 4 1887 /* 1888 * Update dispatch busy with the Exponential Weighted Moving Average(EWMA): 1889 * - EWMA is one simple way to compute running average value 1890 * - weight(7/8 and 1/8) is applied so that it can decrease exponentially 1891 * - take 4 as factor for avoiding to get too small(0) result, and this 1892 * factor doesn't matter because EWMA decreases exponentially 1893 */ 1894 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy) 1895 { 1896 unsigned int ewma; 1897 1898 ewma = hctx->dispatch_busy; 1899 1900 if (!ewma && !busy) 1901 return; 1902 1903 ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1; 1904 if (busy) 1905 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR; 1906 ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT; 1907 1908 hctx->dispatch_busy = ewma; 1909 } 1910 1911 #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */ 1912 1913 static void blk_mq_handle_dev_resource(struct request *rq, 1914 struct list_head *list) 1915 { 1916 list_add(&rq->queuelist, list); 1917 __blk_mq_requeue_request(rq); 1918 } 1919 1920 static void blk_mq_handle_zone_resource(struct request *rq, 1921 struct list_head *zone_list) 1922 { 1923 /* 1924 * If we end up here it is because we cannot dispatch a request to a 1925 * specific zone due to LLD level zone-write locking or other zone 1926 * related resource not being available. In this case, set the request 1927 * aside in zone_list for retrying it later. 1928 */ 1929 list_add(&rq->queuelist, zone_list); 1930 __blk_mq_requeue_request(rq); 1931 } 1932 1933 enum prep_dispatch { 1934 PREP_DISPATCH_OK, 1935 PREP_DISPATCH_NO_TAG, 1936 PREP_DISPATCH_NO_BUDGET, 1937 }; 1938 1939 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq, 1940 bool need_budget) 1941 { 1942 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 1943 int budget_token = -1; 1944 1945 if (need_budget) { 1946 budget_token = blk_mq_get_dispatch_budget(rq->q); 1947 if (budget_token < 0) { 1948 blk_mq_put_driver_tag(rq); 1949 return PREP_DISPATCH_NO_BUDGET; 1950 } 1951 blk_mq_set_rq_budget_token(rq, budget_token); 1952 } 1953 1954 if (!blk_mq_get_driver_tag(rq)) { 1955 /* 1956 * The initial allocation attempt failed, so we need to 1957 * rerun the hardware queue when a tag is freed. The 1958 * waitqueue takes care of that. If the queue is run 1959 * before we add this entry back on the dispatch list, 1960 * we'll re-run it below. 1961 */ 1962 if (!blk_mq_mark_tag_wait(hctx, rq)) { 1963 /* 1964 * All budgets not got from this function will be put 1965 * together during handling partial dispatch 1966 */ 1967 if (need_budget) 1968 blk_mq_put_dispatch_budget(rq->q, budget_token); 1969 return PREP_DISPATCH_NO_TAG; 1970 } 1971 } 1972 1973 return PREP_DISPATCH_OK; 1974 } 1975 1976 /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */ 1977 static void blk_mq_release_budgets(struct request_queue *q, 1978 struct list_head *list) 1979 { 1980 struct request *rq; 1981 1982 list_for_each_entry(rq, list, queuelist) { 1983 int budget_token = blk_mq_get_rq_budget_token(rq); 1984 1985 if (budget_token >= 0) 1986 blk_mq_put_dispatch_budget(q, budget_token); 1987 } 1988 } 1989 1990 /* 1991 * blk_mq_commit_rqs will notify driver using bd->last that there is no 1992 * more requests. (See comment in struct blk_mq_ops for commit_rqs for 1993 * details) 1994 * Attention, we should explicitly call this in unusual cases: 1995 * 1) did not queue everything initially scheduled to queue 1996 * 2) the last attempt to queue a request failed 1997 */ 1998 static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued, 1999 bool from_schedule) 2000 { 2001 if (hctx->queue->mq_ops->commit_rqs && queued) { 2002 trace_block_unplug(hctx->queue, queued, !from_schedule); 2003 hctx->queue->mq_ops->commit_rqs(hctx); 2004 } 2005 } 2006 2007 /* 2008 * Returns true if we did some work AND can potentially do more. 2009 */ 2010 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, 2011 unsigned int nr_budgets) 2012 { 2013 enum prep_dispatch prep; 2014 struct request_queue *q = hctx->queue; 2015 struct request *rq; 2016 int queued; 2017 blk_status_t ret = BLK_STS_OK; 2018 LIST_HEAD(zone_list); 2019 bool needs_resource = false; 2020 2021 if (list_empty(list)) 2022 return false; 2023 2024 /* 2025 * Now process all the entries, sending them to the driver. 2026 */ 2027 queued = 0; 2028 do { 2029 struct blk_mq_queue_data bd; 2030 2031 rq = list_first_entry(list, struct request, queuelist); 2032 2033 WARN_ON_ONCE(hctx != rq->mq_hctx); 2034 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets); 2035 if (prep != PREP_DISPATCH_OK) 2036 break; 2037 2038 list_del_init(&rq->queuelist); 2039 2040 bd.rq = rq; 2041 bd.last = list_empty(list); 2042 2043 /* 2044 * once the request is queued to lld, no need to cover the 2045 * budget any more 2046 */ 2047 if (nr_budgets) 2048 nr_budgets--; 2049 ret = q->mq_ops->queue_rq(hctx, &bd); 2050 switch (ret) { 2051 case BLK_STS_OK: 2052 queued++; 2053 break; 2054 case BLK_STS_RESOURCE: 2055 needs_resource = true; 2056 fallthrough; 2057 case BLK_STS_DEV_RESOURCE: 2058 blk_mq_handle_dev_resource(rq, list); 2059 goto out; 2060 case BLK_STS_ZONE_RESOURCE: 2061 /* 2062 * Move the request to zone_list and keep going through 2063 * the dispatch list to find more requests the drive can 2064 * accept. 2065 */ 2066 blk_mq_handle_zone_resource(rq, &zone_list); 2067 needs_resource = true; 2068 break; 2069 default: 2070 blk_mq_end_request(rq, ret); 2071 } 2072 } while (!list_empty(list)); 2073 out: 2074 if (!list_empty(&zone_list)) 2075 list_splice_tail_init(&zone_list, list); 2076 2077 /* If we didn't flush the entire list, we could have told the driver 2078 * there was more coming, but that turned out to be a lie. 2079 */ 2080 if (!list_empty(list) || ret != BLK_STS_OK) 2081 blk_mq_commit_rqs(hctx, queued, false); 2082 2083 /* 2084 * Any items that need requeuing? Stuff them into hctx->dispatch, 2085 * that is where we will continue on next queue run. 2086 */ 2087 if (!list_empty(list)) { 2088 bool needs_restart; 2089 /* For non-shared tags, the RESTART check will suffice */ 2090 bool no_tag = prep == PREP_DISPATCH_NO_TAG && 2091 ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) || 2092 blk_mq_is_shared_tags(hctx->flags)); 2093 2094 if (nr_budgets) 2095 blk_mq_release_budgets(q, list); 2096 2097 spin_lock(&hctx->lock); 2098 list_splice_tail_init(list, &hctx->dispatch); 2099 spin_unlock(&hctx->lock); 2100 2101 /* 2102 * Order adding requests to hctx->dispatch and checking 2103 * SCHED_RESTART flag. The pair of this smp_mb() is the one 2104 * in blk_mq_sched_restart(). Avoid restart code path to 2105 * miss the new added requests to hctx->dispatch, meantime 2106 * SCHED_RESTART is observed here. 2107 */ 2108 smp_mb(); 2109 2110 /* 2111 * If SCHED_RESTART was set by the caller of this function and 2112 * it is no longer set that means that it was cleared by another 2113 * thread and hence that a queue rerun is needed. 2114 * 2115 * If 'no_tag' is set, that means that we failed getting 2116 * a driver tag with an I/O scheduler attached. If our dispatch 2117 * waitqueue is no longer active, ensure that we run the queue 2118 * AFTER adding our entries back to the list. 2119 * 2120 * If no I/O scheduler has been configured it is possible that 2121 * the hardware queue got stopped and restarted before requests 2122 * were pushed back onto the dispatch list. Rerun the queue to 2123 * avoid starvation. Notes: 2124 * - blk_mq_run_hw_queue() checks whether or not a queue has 2125 * been stopped before rerunning a queue. 2126 * - Some but not all block drivers stop a queue before 2127 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq 2128 * and dm-rq. 2129 * 2130 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART 2131 * bit is set, run queue after a delay to avoid IO stalls 2132 * that could otherwise occur if the queue is idle. We'll do 2133 * similar if we couldn't get budget or couldn't lock a zone 2134 * and SCHED_RESTART is set. 2135 */ 2136 needs_restart = blk_mq_sched_needs_restart(hctx); 2137 if (prep == PREP_DISPATCH_NO_BUDGET) 2138 needs_resource = true; 2139 if (!needs_restart || 2140 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) 2141 blk_mq_run_hw_queue(hctx, true); 2142 else if (needs_resource) 2143 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); 2144 2145 blk_mq_update_dispatch_busy(hctx, true); 2146 return false; 2147 } 2148 2149 blk_mq_update_dispatch_busy(hctx, false); 2150 return true; 2151 } 2152 2153 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) 2154 { 2155 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask); 2156 2157 if (cpu >= nr_cpu_ids) 2158 cpu = cpumask_first(hctx->cpumask); 2159 return cpu; 2160 } 2161 2162 /* 2163 * It'd be great if the workqueue API had a way to pass 2164 * in a mask and had some smarts for more clever placement. 2165 * For now we just round-robin here, switching for every 2166 * BLK_MQ_CPU_WORK_BATCH queued items. 2167 */ 2168 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) 2169 { 2170 bool tried = false; 2171 int next_cpu = hctx->next_cpu; 2172 2173 if (hctx->queue->nr_hw_queues == 1) 2174 return WORK_CPU_UNBOUND; 2175 2176 if (--hctx->next_cpu_batch <= 0) { 2177 select_cpu: 2178 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, 2179 cpu_online_mask); 2180 if (next_cpu >= nr_cpu_ids) 2181 next_cpu = blk_mq_first_mapped_cpu(hctx); 2182 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 2183 } 2184 2185 /* 2186 * Do unbound schedule if we can't find a online CPU for this hctx, 2187 * and it should only happen in the path of handling CPU DEAD. 2188 */ 2189 if (!cpu_online(next_cpu)) { 2190 if (!tried) { 2191 tried = true; 2192 goto select_cpu; 2193 } 2194 2195 /* 2196 * Make sure to re-select CPU next time once after CPUs 2197 * in hctx->cpumask become online again. 2198 */ 2199 hctx->next_cpu = next_cpu; 2200 hctx->next_cpu_batch = 1; 2201 return WORK_CPU_UNBOUND; 2202 } 2203 2204 hctx->next_cpu = next_cpu; 2205 return next_cpu; 2206 } 2207 2208 /** 2209 * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously. 2210 * @hctx: Pointer to the hardware queue to run. 2211 * @msecs: Milliseconds of delay to wait before running the queue. 2212 * 2213 * Run a hardware queue asynchronously with a delay of @msecs. 2214 */ 2215 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) 2216 { 2217 if (unlikely(blk_mq_hctx_stopped(hctx))) 2218 return; 2219 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, 2220 msecs_to_jiffies(msecs)); 2221 } 2222 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue); 2223 2224 /** 2225 * blk_mq_run_hw_queue - Start to run a hardware queue. 2226 * @hctx: Pointer to the hardware queue to run. 2227 * @async: If we want to run the queue asynchronously. 2228 * 2229 * Check if the request queue is not in a quiesced state and if there are 2230 * pending requests to be sent. If this is true, run the queue to send requests 2231 * to hardware. 2232 */ 2233 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 2234 { 2235 bool need_run; 2236 2237 /* 2238 * We can't run the queue inline with interrupts disabled. 2239 */ 2240 WARN_ON_ONCE(!async && in_interrupt()); 2241 2242 might_sleep_if(!async && hctx->flags & BLK_MQ_F_BLOCKING); 2243 2244 /* 2245 * When queue is quiesced, we may be switching io scheduler, or 2246 * updating nr_hw_queues, or other things, and we can't run queue 2247 * any more, even __blk_mq_hctx_has_pending() can't be called safely. 2248 * 2249 * And queue will be rerun in blk_mq_unquiesce_queue() if it is 2250 * quiesced. 2251 */ 2252 __blk_mq_run_dispatch_ops(hctx->queue, false, 2253 need_run = !blk_queue_quiesced(hctx->queue) && 2254 blk_mq_hctx_has_pending(hctx)); 2255 2256 if (!need_run) 2257 return; 2258 2259 if (async || !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) { 2260 blk_mq_delay_run_hw_queue(hctx, 0); 2261 return; 2262 } 2263 2264 blk_mq_run_dispatch_ops(hctx->queue, 2265 blk_mq_sched_dispatch_requests(hctx)); 2266 } 2267 EXPORT_SYMBOL(blk_mq_run_hw_queue); 2268 2269 /* 2270 * Return prefered queue to dispatch from (if any) for non-mq aware IO 2271 * scheduler. 2272 */ 2273 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q) 2274 { 2275 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); 2276 /* 2277 * If the IO scheduler does not respect hardware queues when 2278 * dispatching, we just don't bother with multiple HW queues and 2279 * dispatch from hctx for the current CPU since running multiple queues 2280 * just causes lock contention inside the scheduler and pointless cache 2281 * bouncing. 2282 */ 2283 struct blk_mq_hw_ctx *hctx = ctx->hctxs[HCTX_TYPE_DEFAULT]; 2284 2285 if (!blk_mq_hctx_stopped(hctx)) 2286 return hctx; 2287 return NULL; 2288 } 2289 2290 /** 2291 * blk_mq_run_hw_queues - Run all hardware queues in a request queue. 2292 * @q: Pointer to the request queue to run. 2293 * @async: If we want to run the queue asynchronously. 2294 */ 2295 void blk_mq_run_hw_queues(struct request_queue *q, bool async) 2296 { 2297 struct blk_mq_hw_ctx *hctx, *sq_hctx; 2298 unsigned long i; 2299 2300 sq_hctx = NULL; 2301 if (blk_queue_sq_sched(q)) 2302 sq_hctx = blk_mq_get_sq_hctx(q); 2303 queue_for_each_hw_ctx(q, hctx, i) { 2304 if (blk_mq_hctx_stopped(hctx)) 2305 continue; 2306 /* 2307 * Dispatch from this hctx either if there's no hctx preferred 2308 * by IO scheduler or if it has requests that bypass the 2309 * scheduler. 2310 */ 2311 if (!sq_hctx || sq_hctx == hctx || 2312 !list_empty_careful(&hctx->dispatch)) 2313 blk_mq_run_hw_queue(hctx, async); 2314 } 2315 } 2316 EXPORT_SYMBOL(blk_mq_run_hw_queues); 2317 2318 /** 2319 * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously. 2320 * @q: Pointer to the request queue to run. 2321 * @msecs: Milliseconds of delay to wait before running the queues. 2322 */ 2323 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs) 2324 { 2325 struct blk_mq_hw_ctx *hctx, *sq_hctx; 2326 unsigned long i; 2327 2328 sq_hctx = NULL; 2329 if (blk_queue_sq_sched(q)) 2330 sq_hctx = blk_mq_get_sq_hctx(q); 2331 queue_for_each_hw_ctx(q, hctx, i) { 2332 if (blk_mq_hctx_stopped(hctx)) 2333 continue; 2334 /* 2335 * If there is already a run_work pending, leave the 2336 * pending delay untouched. Otherwise, a hctx can stall 2337 * if another hctx is re-delaying the other's work 2338 * before the work executes. 2339 */ 2340 if (delayed_work_pending(&hctx->run_work)) 2341 continue; 2342 /* 2343 * Dispatch from this hctx either if there's no hctx preferred 2344 * by IO scheduler or if it has requests that bypass the 2345 * scheduler. 2346 */ 2347 if (!sq_hctx || sq_hctx == hctx || 2348 !list_empty_careful(&hctx->dispatch)) 2349 blk_mq_delay_run_hw_queue(hctx, msecs); 2350 } 2351 } 2352 EXPORT_SYMBOL(blk_mq_delay_run_hw_queues); 2353 2354 /* 2355 * This function is often used for pausing .queue_rq() by driver when 2356 * there isn't enough resource or some conditions aren't satisfied, and 2357 * BLK_STS_RESOURCE is usually returned. 2358 * 2359 * We do not guarantee that dispatch can be drained or blocked 2360 * after blk_mq_stop_hw_queue() returns. Please use 2361 * blk_mq_quiesce_queue() for that requirement. 2362 */ 2363 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) 2364 { 2365 cancel_delayed_work(&hctx->run_work); 2366 2367 set_bit(BLK_MQ_S_STOPPED, &hctx->state); 2368 } 2369 EXPORT_SYMBOL(blk_mq_stop_hw_queue); 2370 2371 /* 2372 * This function is often used for pausing .queue_rq() by driver when 2373 * there isn't enough resource or some conditions aren't satisfied, and 2374 * BLK_STS_RESOURCE is usually returned. 2375 * 2376 * We do not guarantee that dispatch can be drained or blocked 2377 * after blk_mq_stop_hw_queues() returns. Please use 2378 * blk_mq_quiesce_queue() for that requirement. 2379 */ 2380 void blk_mq_stop_hw_queues(struct request_queue *q) 2381 { 2382 struct blk_mq_hw_ctx *hctx; 2383 unsigned long i; 2384 2385 queue_for_each_hw_ctx(q, hctx, i) 2386 blk_mq_stop_hw_queue(hctx); 2387 } 2388 EXPORT_SYMBOL(blk_mq_stop_hw_queues); 2389 2390 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) 2391 { 2392 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 2393 2394 blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING); 2395 } 2396 EXPORT_SYMBOL(blk_mq_start_hw_queue); 2397 2398 void blk_mq_start_hw_queues(struct request_queue *q) 2399 { 2400 struct blk_mq_hw_ctx *hctx; 2401 unsigned long i; 2402 2403 queue_for_each_hw_ctx(q, hctx, i) 2404 blk_mq_start_hw_queue(hctx); 2405 } 2406 EXPORT_SYMBOL(blk_mq_start_hw_queues); 2407 2408 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 2409 { 2410 if (!blk_mq_hctx_stopped(hctx)) 2411 return; 2412 2413 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 2414 blk_mq_run_hw_queue(hctx, async); 2415 } 2416 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue); 2417 2418 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) 2419 { 2420 struct blk_mq_hw_ctx *hctx; 2421 unsigned long i; 2422 2423 queue_for_each_hw_ctx(q, hctx, i) 2424 blk_mq_start_stopped_hw_queue(hctx, async || 2425 (hctx->flags & BLK_MQ_F_BLOCKING)); 2426 } 2427 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); 2428 2429 static void blk_mq_run_work_fn(struct work_struct *work) 2430 { 2431 struct blk_mq_hw_ctx *hctx = 2432 container_of(work, struct blk_mq_hw_ctx, run_work.work); 2433 2434 blk_mq_run_dispatch_ops(hctx->queue, 2435 blk_mq_sched_dispatch_requests(hctx)); 2436 } 2437 2438 /** 2439 * blk_mq_request_bypass_insert - Insert a request at dispatch list. 2440 * @rq: Pointer to request to be inserted. 2441 * @flags: BLK_MQ_INSERT_* 2442 * 2443 * Should only be used carefully, when the caller knows we want to 2444 * bypass a potential IO scheduler on the target device. 2445 */ 2446 static void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags) 2447 { 2448 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 2449 2450 spin_lock(&hctx->lock); 2451 if (flags & BLK_MQ_INSERT_AT_HEAD) 2452 list_add(&rq->queuelist, &hctx->dispatch); 2453 else 2454 list_add_tail(&rq->queuelist, &hctx->dispatch); 2455 spin_unlock(&hctx->lock); 2456 } 2457 2458 static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, 2459 struct blk_mq_ctx *ctx, struct list_head *list, 2460 bool run_queue_async) 2461 { 2462 struct request *rq; 2463 enum hctx_type type = hctx->type; 2464 2465 /* 2466 * Try to issue requests directly if the hw queue isn't busy to save an 2467 * extra enqueue & dequeue to the sw queue. 2468 */ 2469 if (!hctx->dispatch_busy && !run_queue_async) { 2470 blk_mq_run_dispatch_ops(hctx->queue, 2471 blk_mq_try_issue_list_directly(hctx, list)); 2472 if (list_empty(list)) 2473 goto out; 2474 } 2475 2476 /* 2477 * preemption doesn't flush plug list, so it's possible ctx->cpu is 2478 * offline now 2479 */ 2480 list_for_each_entry(rq, list, queuelist) { 2481 BUG_ON(rq->mq_ctx != ctx); 2482 trace_block_rq_insert(rq); 2483 if (rq->cmd_flags & REQ_NOWAIT) 2484 run_queue_async = true; 2485 } 2486 2487 spin_lock(&ctx->lock); 2488 list_splice_tail_init(list, &ctx->rq_lists[type]); 2489 blk_mq_hctx_mark_pending(hctx, ctx); 2490 spin_unlock(&ctx->lock); 2491 out: 2492 blk_mq_run_hw_queue(hctx, run_queue_async); 2493 } 2494 2495 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags) 2496 { 2497 struct request_queue *q = rq->q; 2498 struct blk_mq_ctx *ctx = rq->mq_ctx; 2499 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 2500 2501 if (blk_rq_is_passthrough(rq)) { 2502 /* 2503 * Passthrough request have to be added to hctx->dispatch 2504 * directly. The device may be in a situation where it can't 2505 * handle FS request, and always returns BLK_STS_RESOURCE for 2506 * them, which gets them added to hctx->dispatch. 2507 * 2508 * If a passthrough request is required to unblock the queues, 2509 * and it is added to the scheduler queue, there is no chance to 2510 * dispatch it given we prioritize requests in hctx->dispatch. 2511 */ 2512 blk_mq_request_bypass_insert(rq, flags); 2513 } else if (req_op(rq) == REQ_OP_FLUSH) { 2514 /* 2515 * Firstly normal IO request is inserted to scheduler queue or 2516 * sw queue, meantime we add flush request to dispatch queue( 2517 * hctx->dispatch) directly and there is at most one in-flight 2518 * flush request for each hw queue, so it doesn't matter to add 2519 * flush request to tail or front of the dispatch queue. 2520 * 2521 * Secondly in case of NCQ, flush request belongs to non-NCQ 2522 * command, and queueing it will fail when there is any 2523 * in-flight normal IO request(NCQ command). When adding flush 2524 * rq to the front of hctx->dispatch, it is easier to introduce 2525 * extra time to flush rq's latency because of S_SCHED_RESTART 2526 * compared with adding to the tail of dispatch queue, then 2527 * chance of flush merge is increased, and less flush requests 2528 * will be issued to controller. It is observed that ~10% time 2529 * is saved in blktests block/004 on disk attached to AHCI/NCQ 2530 * drive when adding flush rq to the front of hctx->dispatch. 2531 * 2532 * Simply queue flush rq to the front of hctx->dispatch so that 2533 * intensive flush workloads can benefit in case of NCQ HW. 2534 */ 2535 blk_mq_request_bypass_insert(rq, BLK_MQ_INSERT_AT_HEAD); 2536 } else if (q->elevator) { 2537 LIST_HEAD(list); 2538 2539 WARN_ON_ONCE(rq->tag != BLK_MQ_NO_TAG); 2540 2541 list_add(&rq->queuelist, &list); 2542 q->elevator->type->ops.insert_requests(hctx, &list, flags); 2543 } else { 2544 trace_block_rq_insert(rq); 2545 2546 spin_lock(&ctx->lock); 2547 if (flags & BLK_MQ_INSERT_AT_HEAD) 2548 list_add(&rq->queuelist, &ctx->rq_lists[hctx->type]); 2549 else 2550 list_add_tail(&rq->queuelist, 2551 &ctx->rq_lists[hctx->type]); 2552 blk_mq_hctx_mark_pending(hctx, ctx); 2553 spin_unlock(&ctx->lock); 2554 } 2555 } 2556 2557 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, 2558 unsigned int nr_segs) 2559 { 2560 int err; 2561 2562 if (bio->bi_opf & REQ_RAHEAD) 2563 rq->cmd_flags |= REQ_FAILFAST_MASK; 2564 2565 rq->__sector = bio->bi_iter.bi_sector; 2566 blk_rq_bio_prep(rq, bio, nr_segs); 2567 2568 /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */ 2569 err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO); 2570 WARN_ON_ONCE(err); 2571 2572 blk_account_io_start(rq); 2573 } 2574 2575 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, 2576 struct request *rq, bool last) 2577 { 2578 struct request_queue *q = rq->q; 2579 struct blk_mq_queue_data bd = { 2580 .rq = rq, 2581 .last = last, 2582 }; 2583 blk_status_t ret; 2584 2585 /* 2586 * For OK queue, we are done. For error, caller may kill it. 2587 * Any other error (busy), just add it to our list as we 2588 * previously would have done. 2589 */ 2590 ret = q->mq_ops->queue_rq(hctx, &bd); 2591 switch (ret) { 2592 case BLK_STS_OK: 2593 blk_mq_update_dispatch_busy(hctx, false); 2594 break; 2595 case BLK_STS_RESOURCE: 2596 case BLK_STS_DEV_RESOURCE: 2597 blk_mq_update_dispatch_busy(hctx, true); 2598 __blk_mq_requeue_request(rq); 2599 break; 2600 default: 2601 blk_mq_update_dispatch_busy(hctx, false); 2602 break; 2603 } 2604 2605 return ret; 2606 } 2607 2608 static bool blk_mq_get_budget_and_tag(struct request *rq) 2609 { 2610 int budget_token; 2611 2612 budget_token = blk_mq_get_dispatch_budget(rq->q); 2613 if (budget_token < 0) 2614 return false; 2615 blk_mq_set_rq_budget_token(rq, budget_token); 2616 if (!blk_mq_get_driver_tag(rq)) { 2617 blk_mq_put_dispatch_budget(rq->q, budget_token); 2618 return false; 2619 } 2620 return true; 2621 } 2622 2623 /** 2624 * blk_mq_try_issue_directly - Try to send a request directly to device driver. 2625 * @hctx: Pointer of the associated hardware queue. 2626 * @rq: Pointer to request to be sent. 2627 * 2628 * If the device has enough resources to accept a new request now, send the 2629 * request directly to device driver. Else, insert at hctx->dispatch queue, so 2630 * we can try send it another time in the future. Requests inserted at this 2631 * queue have higher priority. 2632 */ 2633 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 2634 struct request *rq) 2635 { 2636 blk_status_t ret; 2637 2638 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) { 2639 blk_mq_insert_request(rq, 0); 2640 return; 2641 } 2642 2643 if ((rq->rq_flags & RQF_USE_SCHED) || !blk_mq_get_budget_and_tag(rq)) { 2644 blk_mq_insert_request(rq, 0); 2645 blk_mq_run_hw_queue(hctx, rq->cmd_flags & REQ_NOWAIT); 2646 return; 2647 } 2648 2649 ret = __blk_mq_issue_directly(hctx, rq, true); 2650 switch (ret) { 2651 case BLK_STS_OK: 2652 break; 2653 case BLK_STS_RESOURCE: 2654 case BLK_STS_DEV_RESOURCE: 2655 blk_mq_request_bypass_insert(rq, 0); 2656 blk_mq_run_hw_queue(hctx, false); 2657 break; 2658 default: 2659 blk_mq_end_request(rq, ret); 2660 break; 2661 } 2662 } 2663 2664 static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) 2665 { 2666 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 2667 2668 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) { 2669 blk_mq_insert_request(rq, 0); 2670 return BLK_STS_OK; 2671 } 2672 2673 if (!blk_mq_get_budget_and_tag(rq)) 2674 return BLK_STS_RESOURCE; 2675 return __blk_mq_issue_directly(hctx, rq, last); 2676 } 2677 2678 static void blk_mq_plug_issue_direct(struct blk_plug *plug) 2679 { 2680 struct blk_mq_hw_ctx *hctx = NULL; 2681 struct request *rq; 2682 int queued = 0; 2683 blk_status_t ret = BLK_STS_OK; 2684 2685 while ((rq = rq_list_pop(&plug->mq_list))) { 2686 bool last = rq_list_empty(plug->mq_list); 2687 2688 if (hctx != rq->mq_hctx) { 2689 if (hctx) { 2690 blk_mq_commit_rqs(hctx, queued, false); 2691 queued = 0; 2692 } 2693 hctx = rq->mq_hctx; 2694 } 2695 2696 ret = blk_mq_request_issue_directly(rq, last); 2697 switch (ret) { 2698 case BLK_STS_OK: 2699 queued++; 2700 break; 2701 case BLK_STS_RESOURCE: 2702 case BLK_STS_DEV_RESOURCE: 2703 blk_mq_request_bypass_insert(rq, 0); 2704 blk_mq_run_hw_queue(hctx, false); 2705 goto out; 2706 default: 2707 blk_mq_end_request(rq, ret); 2708 break; 2709 } 2710 } 2711 2712 out: 2713 if (ret != BLK_STS_OK) 2714 blk_mq_commit_rqs(hctx, queued, false); 2715 } 2716 2717 static void __blk_mq_flush_plug_list(struct request_queue *q, 2718 struct blk_plug *plug) 2719 { 2720 if (blk_queue_quiesced(q)) 2721 return; 2722 q->mq_ops->queue_rqs(&plug->mq_list); 2723 } 2724 2725 static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) 2726 { 2727 struct blk_mq_hw_ctx *this_hctx = NULL; 2728 struct blk_mq_ctx *this_ctx = NULL; 2729 struct request *requeue_list = NULL; 2730 struct request **requeue_lastp = &requeue_list; 2731 unsigned int depth = 0; 2732 bool is_passthrough = false; 2733 LIST_HEAD(list); 2734 2735 do { 2736 struct request *rq = rq_list_pop(&plug->mq_list); 2737 2738 if (!this_hctx) { 2739 this_hctx = rq->mq_hctx; 2740 this_ctx = rq->mq_ctx; 2741 is_passthrough = blk_rq_is_passthrough(rq); 2742 } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx || 2743 is_passthrough != blk_rq_is_passthrough(rq)) { 2744 rq_list_add_tail(&requeue_lastp, rq); 2745 continue; 2746 } 2747 list_add(&rq->queuelist, &list); 2748 depth++; 2749 } while (!rq_list_empty(plug->mq_list)); 2750 2751 plug->mq_list = requeue_list; 2752 trace_block_unplug(this_hctx->queue, depth, !from_sched); 2753 2754 percpu_ref_get(&this_hctx->queue->q_usage_counter); 2755 /* passthrough requests should never be issued to the I/O scheduler */ 2756 if (is_passthrough) { 2757 spin_lock(&this_hctx->lock); 2758 list_splice_tail_init(&list, &this_hctx->dispatch); 2759 spin_unlock(&this_hctx->lock); 2760 blk_mq_run_hw_queue(this_hctx, from_sched); 2761 } else if (this_hctx->queue->elevator) { 2762 this_hctx->queue->elevator->type->ops.insert_requests(this_hctx, 2763 &list, 0); 2764 blk_mq_run_hw_queue(this_hctx, from_sched); 2765 } else { 2766 blk_mq_insert_requests(this_hctx, this_ctx, &list, from_sched); 2767 } 2768 percpu_ref_put(&this_hctx->queue->q_usage_counter); 2769 } 2770 2771 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) 2772 { 2773 struct request *rq; 2774 2775 /* 2776 * We may have been called recursively midway through handling 2777 * plug->mq_list via a schedule() in the driver's queue_rq() callback. 2778 * To avoid mq_list changing under our feet, clear rq_count early and 2779 * bail out specifically if rq_count is 0 rather than checking 2780 * whether the mq_list is empty. 2781 */ 2782 if (plug->rq_count == 0) 2783 return; 2784 plug->rq_count = 0; 2785 2786 if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) { 2787 struct request_queue *q; 2788 2789 rq = rq_list_peek(&plug->mq_list); 2790 q = rq->q; 2791 2792 /* 2793 * Peek first request and see if we have a ->queue_rqs() hook. 2794 * If we do, we can dispatch the whole plug list in one go. We 2795 * already know at this point that all requests belong to the 2796 * same queue, caller must ensure that's the case. 2797 */ 2798 if (q->mq_ops->queue_rqs) { 2799 blk_mq_run_dispatch_ops(q, 2800 __blk_mq_flush_plug_list(q, plug)); 2801 if (rq_list_empty(plug->mq_list)) 2802 return; 2803 } 2804 2805 blk_mq_run_dispatch_ops(q, 2806 blk_mq_plug_issue_direct(plug)); 2807 if (rq_list_empty(plug->mq_list)) 2808 return; 2809 } 2810 2811 do { 2812 blk_mq_dispatch_plug_list(plug, from_schedule); 2813 } while (!rq_list_empty(plug->mq_list)); 2814 } 2815 2816 static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, 2817 struct list_head *list) 2818 { 2819 int queued = 0; 2820 blk_status_t ret = BLK_STS_OK; 2821 2822 while (!list_empty(list)) { 2823 struct request *rq = list_first_entry(list, struct request, 2824 queuelist); 2825 2826 list_del_init(&rq->queuelist); 2827 ret = blk_mq_request_issue_directly(rq, list_empty(list)); 2828 switch (ret) { 2829 case BLK_STS_OK: 2830 queued++; 2831 break; 2832 case BLK_STS_RESOURCE: 2833 case BLK_STS_DEV_RESOURCE: 2834 blk_mq_request_bypass_insert(rq, 0); 2835 if (list_empty(list)) 2836 blk_mq_run_hw_queue(hctx, false); 2837 goto out; 2838 default: 2839 blk_mq_end_request(rq, ret); 2840 break; 2841 } 2842 } 2843 2844 out: 2845 if (ret != BLK_STS_OK) 2846 blk_mq_commit_rqs(hctx, queued, false); 2847 } 2848 2849 static bool blk_mq_attempt_bio_merge(struct request_queue *q, 2850 struct bio *bio, unsigned int nr_segs) 2851 { 2852 if (!blk_queue_nomerges(q) && bio_mergeable(bio)) { 2853 if (blk_attempt_plug_merge(q, bio, nr_segs)) 2854 return true; 2855 if (blk_mq_sched_bio_merge(q, bio, nr_segs)) 2856 return true; 2857 } 2858 return false; 2859 } 2860 2861 static struct request *blk_mq_get_new_requests(struct request_queue *q, 2862 struct blk_plug *plug, 2863 struct bio *bio, 2864 unsigned int nsegs) 2865 { 2866 struct blk_mq_alloc_data data = { 2867 .q = q, 2868 .nr_tags = 1, 2869 .cmd_flags = bio->bi_opf, 2870 }; 2871 struct request *rq; 2872 2873 if (blk_mq_attempt_bio_merge(q, bio, nsegs)) 2874 return NULL; 2875 2876 rq_qos_throttle(q, bio); 2877 2878 if (plug) { 2879 data.nr_tags = plug->nr_ios; 2880 plug->nr_ios = 1; 2881 data.cached_rq = &plug->cached_rq; 2882 } 2883 2884 rq = __blk_mq_alloc_requests(&data); 2885 if (rq) 2886 return rq; 2887 rq_qos_cleanup(q, bio); 2888 if (bio->bi_opf & REQ_NOWAIT) 2889 bio_wouldblock_error(bio); 2890 return NULL; 2891 } 2892 2893 /* return true if this @rq can be used for @bio */ 2894 static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug, 2895 struct bio *bio) 2896 { 2897 enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf); 2898 enum hctx_type hctx_type = rq->mq_hctx->type; 2899 2900 WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq); 2901 2902 if (type != hctx_type && 2903 !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT)) 2904 return false; 2905 if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf)) 2906 return false; 2907 2908 /* 2909 * If any qos ->throttle() end up blocking, we will have flushed the 2910 * plug and hence killed the cached_rq list as well. Pop this entry 2911 * before we throttle. 2912 */ 2913 plug->cached_rq = rq_list_next(rq); 2914 rq_qos_throttle(rq->q, bio); 2915 2916 blk_mq_rq_time_init(rq, 0); 2917 rq->cmd_flags = bio->bi_opf; 2918 INIT_LIST_HEAD(&rq->queuelist); 2919 return true; 2920 } 2921 2922 static void bio_set_ioprio(struct bio *bio) 2923 { 2924 /* Nobody set ioprio so far? Initialize it based on task's nice value */ 2925 if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE) 2926 bio->bi_ioprio = get_current_ioprio(); 2927 blkcg_set_ioprio(bio); 2928 } 2929 2930 /** 2931 * blk_mq_submit_bio - Create and send a request to block device. 2932 * @bio: Bio pointer. 2933 * 2934 * Builds up a request structure from @q and @bio and send to the device. The 2935 * request may not be queued directly to hardware if: 2936 * * This request can be merged with another one 2937 * * We want to place request at plug queue for possible future merging 2938 * * There is an IO scheduler active at this queue 2939 * 2940 * It will not queue the request if there is an error with the bio, or at the 2941 * request creation. 2942 */ 2943 void blk_mq_submit_bio(struct bio *bio) 2944 { 2945 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 2946 struct blk_plug *plug = blk_mq_plug(bio); 2947 const int is_sync = op_is_sync(bio->bi_opf); 2948 struct blk_mq_hw_ctx *hctx; 2949 struct request *rq = NULL; 2950 unsigned int nr_segs = 1; 2951 blk_status_t ret; 2952 2953 bio = blk_queue_bounce(bio, q); 2954 if (bio_may_exceed_limits(bio, &q->limits)) { 2955 bio = __bio_split_to_limits(bio, &q->limits, &nr_segs); 2956 if (!bio) 2957 return; 2958 } 2959 2960 bio_set_ioprio(bio); 2961 2962 if (plug) { 2963 rq = rq_list_peek(&plug->cached_rq); 2964 if (rq && rq->q != q) 2965 rq = NULL; 2966 } 2967 if (rq) { 2968 if (!bio_integrity_prep(bio)) 2969 return; 2970 if (blk_mq_attempt_bio_merge(q, bio, nr_segs)) 2971 return; 2972 if (blk_mq_can_use_cached_rq(rq, plug, bio)) 2973 goto done; 2974 percpu_ref_get(&q->q_usage_counter); 2975 } else { 2976 if (unlikely(bio_queue_enter(bio))) 2977 return; 2978 if (!bio_integrity_prep(bio)) 2979 goto fail; 2980 } 2981 2982 rq = blk_mq_get_new_requests(q, plug, bio, nr_segs); 2983 if (unlikely(!rq)) { 2984 fail: 2985 blk_queue_exit(q); 2986 return; 2987 } 2988 2989 done: 2990 trace_block_getrq(bio); 2991 2992 rq_qos_track(q, rq, bio); 2993 2994 blk_mq_bio_to_request(rq, bio, nr_segs); 2995 2996 ret = blk_crypto_rq_get_keyslot(rq); 2997 if (ret != BLK_STS_OK) { 2998 bio->bi_status = ret; 2999 bio_endio(bio); 3000 blk_mq_free_request(rq); 3001 return; 3002 } 3003 3004 if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq)) 3005 return; 3006 3007 if (plug) { 3008 blk_add_rq_to_plug(plug, rq); 3009 return; 3010 } 3011 3012 hctx = rq->mq_hctx; 3013 if ((rq->rq_flags & RQF_USE_SCHED) || 3014 (hctx->dispatch_busy && (q->nr_hw_queues == 1 || !is_sync))) { 3015 blk_mq_insert_request(rq, 0); 3016 blk_mq_run_hw_queue(hctx, true); 3017 } else { 3018 blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq)); 3019 } 3020 } 3021 3022 #ifdef CONFIG_BLK_MQ_STACKING 3023 /** 3024 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 3025 * @rq: the request being queued 3026 */ 3027 blk_status_t blk_insert_cloned_request(struct request *rq) 3028 { 3029 struct request_queue *q = rq->q; 3030 unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq)); 3031 unsigned int max_segments = blk_rq_get_max_segments(rq); 3032 blk_status_t ret; 3033 3034 if (blk_rq_sectors(rq) > max_sectors) { 3035 /* 3036 * SCSI device does not have a good way to return if 3037 * Write Same/Zero is actually supported. If a device rejects 3038 * a non-read/write command (discard, write same,etc.) the 3039 * low-level device driver will set the relevant queue limit to 3040 * 0 to prevent blk-lib from issuing more of the offending 3041 * operations. Commands queued prior to the queue limit being 3042 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O 3043 * errors being propagated to upper layers. 3044 */ 3045 if (max_sectors == 0) 3046 return BLK_STS_NOTSUPP; 3047 3048 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n", 3049 __func__, blk_rq_sectors(rq), max_sectors); 3050 return BLK_STS_IOERR; 3051 } 3052 3053 /* 3054 * The queue settings related to segment counting may differ from the 3055 * original queue. 3056 */ 3057 rq->nr_phys_segments = blk_recalc_rq_segments(rq); 3058 if (rq->nr_phys_segments > max_segments) { 3059 printk(KERN_ERR "%s: over max segments limit. (%u > %u)\n", 3060 __func__, rq->nr_phys_segments, max_segments); 3061 return BLK_STS_IOERR; 3062 } 3063 3064 if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq))) 3065 return BLK_STS_IOERR; 3066 3067 ret = blk_crypto_rq_get_keyslot(rq); 3068 if (ret != BLK_STS_OK) 3069 return ret; 3070 3071 blk_account_io_start(rq); 3072 3073 /* 3074 * Since we have a scheduler attached on the top device, 3075 * bypass a potential scheduler on the bottom device for 3076 * insert. 3077 */ 3078 blk_mq_run_dispatch_ops(q, 3079 ret = blk_mq_request_issue_directly(rq, true)); 3080 if (ret) 3081 blk_account_io_done(rq, ktime_get_ns()); 3082 return ret; 3083 } 3084 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 3085 3086 /** 3087 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 3088 * @rq: the clone request to be cleaned up 3089 * 3090 * Description: 3091 * Free all bios in @rq for a cloned request. 3092 */ 3093 void blk_rq_unprep_clone(struct request *rq) 3094 { 3095 struct bio *bio; 3096 3097 while ((bio = rq->bio) != NULL) { 3098 rq->bio = bio->bi_next; 3099 3100 bio_put(bio); 3101 } 3102 } 3103 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 3104 3105 /** 3106 * blk_rq_prep_clone - Helper function to setup clone request 3107 * @rq: the request to be setup 3108 * @rq_src: original request to be cloned 3109 * @bs: bio_set that bios for clone are allocated from 3110 * @gfp_mask: memory allocation mask for bio 3111 * @bio_ctr: setup function to be called for each clone bio. 3112 * Returns %0 for success, non %0 for failure. 3113 * @data: private data to be passed to @bio_ctr 3114 * 3115 * Description: 3116 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 3117 * Also, pages which the original bios are pointing to are not copied 3118 * and the cloned bios just point same pages. 3119 * So cloned bios must be completed before original bios, which means 3120 * the caller must complete @rq before @rq_src. 3121 */ 3122 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 3123 struct bio_set *bs, gfp_t gfp_mask, 3124 int (*bio_ctr)(struct bio *, struct bio *, void *), 3125 void *data) 3126 { 3127 struct bio *bio, *bio_src; 3128 3129 if (!bs) 3130 bs = &fs_bio_set; 3131 3132 __rq_for_each_bio(bio_src, rq_src) { 3133 bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask, 3134 bs); 3135 if (!bio) 3136 goto free_and_out; 3137 3138 if (bio_ctr && bio_ctr(bio, bio_src, data)) 3139 goto free_and_out; 3140 3141 if (rq->bio) { 3142 rq->biotail->bi_next = bio; 3143 rq->biotail = bio; 3144 } else { 3145 rq->bio = rq->biotail = bio; 3146 } 3147 bio = NULL; 3148 } 3149 3150 /* Copy attributes of the original request to the clone request. */ 3151 rq->__sector = blk_rq_pos(rq_src); 3152 rq->__data_len = blk_rq_bytes(rq_src); 3153 if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) { 3154 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; 3155 rq->special_vec = rq_src->special_vec; 3156 } 3157 rq->nr_phys_segments = rq_src->nr_phys_segments; 3158 rq->ioprio = rq_src->ioprio; 3159 3160 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0) 3161 goto free_and_out; 3162 3163 return 0; 3164 3165 free_and_out: 3166 if (bio) 3167 bio_put(bio); 3168 blk_rq_unprep_clone(rq); 3169 3170 return -ENOMEM; 3171 } 3172 EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 3173 #endif /* CONFIG_BLK_MQ_STACKING */ 3174 3175 /* 3176 * Steal bios from a request and add them to a bio list. 3177 * The request must not have been partially completed before. 3178 */ 3179 void blk_steal_bios(struct bio_list *list, struct request *rq) 3180 { 3181 if (rq->bio) { 3182 if (list->tail) 3183 list->tail->bi_next = rq->bio; 3184 else 3185 list->head = rq->bio; 3186 list->tail = rq->biotail; 3187 3188 rq->bio = NULL; 3189 rq->biotail = NULL; 3190 } 3191 3192 rq->__data_len = 0; 3193 } 3194 EXPORT_SYMBOL_GPL(blk_steal_bios); 3195 3196 static size_t order_to_size(unsigned int order) 3197 { 3198 return (size_t)PAGE_SIZE << order; 3199 } 3200 3201 /* called before freeing request pool in @tags */ 3202 static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags, 3203 struct blk_mq_tags *tags) 3204 { 3205 struct page *page; 3206 unsigned long flags; 3207 3208 /* 3209 * There is no need to clear mapping if driver tags is not initialized 3210 * or the mapping belongs to the driver tags. 3211 */ 3212 if (!drv_tags || drv_tags == tags) 3213 return; 3214 3215 list_for_each_entry(page, &tags->page_list, lru) { 3216 unsigned long start = (unsigned long)page_address(page); 3217 unsigned long end = start + order_to_size(page->private); 3218 int i; 3219 3220 for (i = 0; i < drv_tags->nr_tags; i++) { 3221 struct request *rq = drv_tags->rqs[i]; 3222 unsigned long rq_addr = (unsigned long)rq; 3223 3224 if (rq_addr >= start && rq_addr < end) { 3225 WARN_ON_ONCE(req_ref_read(rq) != 0); 3226 cmpxchg(&drv_tags->rqs[i], rq, NULL); 3227 } 3228 } 3229 } 3230 3231 /* 3232 * Wait until all pending iteration is done. 3233 * 3234 * Request reference is cleared and it is guaranteed to be observed 3235 * after the ->lock is released. 3236 */ 3237 spin_lock_irqsave(&drv_tags->lock, flags); 3238 spin_unlock_irqrestore(&drv_tags->lock, flags); 3239 } 3240 3241 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 3242 unsigned int hctx_idx) 3243 { 3244 struct blk_mq_tags *drv_tags; 3245 struct page *page; 3246 3247 if (list_empty(&tags->page_list)) 3248 return; 3249 3250 if (blk_mq_is_shared_tags(set->flags)) 3251 drv_tags = set->shared_tags; 3252 else 3253 drv_tags = set->tags[hctx_idx]; 3254 3255 if (tags->static_rqs && set->ops->exit_request) { 3256 int i; 3257 3258 for (i = 0; i < tags->nr_tags; i++) { 3259 struct request *rq = tags->static_rqs[i]; 3260 3261 if (!rq) 3262 continue; 3263 set->ops->exit_request(set, rq, hctx_idx); 3264 tags->static_rqs[i] = NULL; 3265 } 3266 } 3267 3268 blk_mq_clear_rq_mapping(drv_tags, tags); 3269 3270 while (!list_empty(&tags->page_list)) { 3271 page = list_first_entry(&tags->page_list, struct page, lru); 3272 list_del_init(&page->lru); 3273 /* 3274 * Remove kmemleak object previously allocated in 3275 * blk_mq_alloc_rqs(). 3276 */ 3277 kmemleak_free(page_address(page)); 3278 __free_pages(page, page->private); 3279 } 3280 } 3281 3282 void blk_mq_free_rq_map(struct blk_mq_tags *tags) 3283 { 3284 kfree(tags->rqs); 3285 tags->rqs = NULL; 3286 kfree(tags->static_rqs); 3287 tags->static_rqs = NULL; 3288 3289 blk_mq_free_tags(tags); 3290 } 3291 3292 static enum hctx_type hctx_idx_to_type(struct blk_mq_tag_set *set, 3293 unsigned int hctx_idx) 3294 { 3295 int i; 3296 3297 for (i = 0; i < set->nr_maps; i++) { 3298 unsigned int start = set->map[i].queue_offset; 3299 unsigned int end = start + set->map[i].nr_queues; 3300 3301 if (hctx_idx >= start && hctx_idx < end) 3302 break; 3303 } 3304 3305 if (i >= set->nr_maps) 3306 i = HCTX_TYPE_DEFAULT; 3307 3308 return i; 3309 } 3310 3311 static int blk_mq_get_hctx_node(struct blk_mq_tag_set *set, 3312 unsigned int hctx_idx) 3313 { 3314 enum hctx_type type = hctx_idx_to_type(set, hctx_idx); 3315 3316 return blk_mq_hw_queue_to_node(&set->map[type], hctx_idx); 3317 } 3318 3319 static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, 3320 unsigned int hctx_idx, 3321 unsigned int nr_tags, 3322 unsigned int reserved_tags) 3323 { 3324 int node = blk_mq_get_hctx_node(set, hctx_idx); 3325 struct blk_mq_tags *tags; 3326 3327 if (node == NUMA_NO_NODE) 3328 node = set->numa_node; 3329 3330 tags = blk_mq_init_tags(nr_tags, reserved_tags, node, 3331 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); 3332 if (!tags) 3333 return NULL; 3334 3335 tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *), 3336 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 3337 node); 3338 if (!tags->rqs) 3339 goto err_free_tags; 3340 3341 tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *), 3342 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, 3343 node); 3344 if (!tags->static_rqs) 3345 goto err_free_rqs; 3346 3347 return tags; 3348 3349 err_free_rqs: 3350 kfree(tags->rqs); 3351 err_free_tags: 3352 blk_mq_free_tags(tags); 3353 return NULL; 3354 } 3355 3356 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, 3357 unsigned int hctx_idx, int node) 3358 { 3359 int ret; 3360 3361 if (set->ops->init_request) { 3362 ret = set->ops->init_request(set, rq, hctx_idx, node); 3363 if (ret) 3364 return ret; 3365 } 3366 3367 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 3368 return 0; 3369 } 3370 3371 static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, 3372 struct blk_mq_tags *tags, 3373 unsigned int hctx_idx, unsigned int depth) 3374 { 3375 unsigned int i, j, entries_per_page, max_order = 4; 3376 int node = blk_mq_get_hctx_node(set, hctx_idx); 3377 size_t rq_size, left; 3378 3379 if (node == NUMA_NO_NODE) 3380 node = set->numa_node; 3381 3382 INIT_LIST_HEAD(&tags->page_list); 3383 3384 /* 3385 * rq_size is the size of the request plus driver payload, rounded 3386 * to the cacheline size 3387 */ 3388 rq_size = round_up(sizeof(struct request) + set->cmd_size, 3389 cache_line_size()); 3390 left = rq_size * depth; 3391 3392 for (i = 0; i < depth; ) { 3393 int this_order = max_order; 3394 struct page *page; 3395 int to_do; 3396 void *p; 3397 3398 while (this_order && left < order_to_size(this_order - 1)) 3399 this_order--; 3400 3401 do { 3402 page = alloc_pages_node(node, 3403 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, 3404 this_order); 3405 if (page) 3406 break; 3407 if (!this_order--) 3408 break; 3409 if (order_to_size(this_order) < rq_size) 3410 break; 3411 } while (1); 3412 3413 if (!page) 3414 goto fail; 3415 3416 page->private = this_order; 3417 list_add_tail(&page->lru, &tags->page_list); 3418 3419 p = page_address(page); 3420 /* 3421 * Allow kmemleak to scan these pages as they contain pointers 3422 * to additional allocations like via ops->init_request(). 3423 */ 3424 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO); 3425 entries_per_page = order_to_size(this_order) / rq_size; 3426 to_do = min(entries_per_page, depth - i); 3427 left -= to_do * rq_size; 3428 for (j = 0; j < to_do; j++) { 3429 struct request *rq = p; 3430 3431 tags->static_rqs[i] = rq; 3432 if (blk_mq_init_request(set, rq, hctx_idx, node)) { 3433 tags->static_rqs[i] = NULL; 3434 goto fail; 3435 } 3436 3437 p += rq_size; 3438 i++; 3439 } 3440 } 3441 return 0; 3442 3443 fail: 3444 blk_mq_free_rqs(set, tags, hctx_idx); 3445 return -ENOMEM; 3446 } 3447 3448 struct rq_iter_data { 3449 struct blk_mq_hw_ctx *hctx; 3450 bool has_rq; 3451 }; 3452 3453 static bool blk_mq_has_request(struct request *rq, void *data) 3454 { 3455 struct rq_iter_data *iter_data = data; 3456 3457 if (rq->mq_hctx != iter_data->hctx) 3458 return true; 3459 iter_data->has_rq = true; 3460 return false; 3461 } 3462 3463 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx) 3464 { 3465 struct blk_mq_tags *tags = hctx->sched_tags ? 3466 hctx->sched_tags : hctx->tags; 3467 struct rq_iter_data data = { 3468 .hctx = hctx, 3469 }; 3470 3471 blk_mq_all_tag_iter(tags, blk_mq_has_request, &data); 3472 return data.has_rq; 3473 } 3474 3475 static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu, 3476 struct blk_mq_hw_ctx *hctx) 3477 { 3478 if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu) 3479 return false; 3480 if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids) 3481 return false; 3482 return true; 3483 } 3484 3485 static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node) 3486 { 3487 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, 3488 struct blk_mq_hw_ctx, cpuhp_online); 3489 3490 if (!cpumask_test_cpu(cpu, hctx->cpumask) || 3491 !blk_mq_last_cpu_in_hctx(cpu, hctx)) 3492 return 0; 3493 3494 /* 3495 * Prevent new request from being allocated on the current hctx. 3496 * 3497 * The smp_mb__after_atomic() Pairs with the implied barrier in 3498 * test_and_set_bit_lock in sbitmap_get(). Ensures the inactive flag is 3499 * seen once we return from the tag allocator. 3500 */ 3501 set_bit(BLK_MQ_S_INACTIVE, &hctx->state); 3502 smp_mb__after_atomic(); 3503 3504 /* 3505 * Try to grab a reference to the queue and wait for any outstanding 3506 * requests. If we could not grab a reference the queue has been 3507 * frozen and there are no requests. 3508 */ 3509 if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) { 3510 while (blk_mq_hctx_has_requests(hctx)) 3511 msleep(5); 3512 percpu_ref_put(&hctx->queue->q_usage_counter); 3513 } 3514 3515 return 0; 3516 } 3517 3518 static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node) 3519 { 3520 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, 3521 struct blk_mq_hw_ctx, cpuhp_online); 3522 3523 if (cpumask_test_cpu(cpu, hctx->cpumask)) 3524 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state); 3525 return 0; 3526 } 3527 3528 /* 3529 * 'cpu' is going away. splice any existing rq_list entries from this 3530 * software queue to the hw queue dispatch list, and ensure that it 3531 * gets run. 3532 */ 3533 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node) 3534 { 3535 struct blk_mq_hw_ctx *hctx; 3536 struct blk_mq_ctx *ctx; 3537 LIST_HEAD(tmp); 3538 enum hctx_type type; 3539 3540 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead); 3541 if (!cpumask_test_cpu(cpu, hctx->cpumask)) 3542 return 0; 3543 3544 ctx = __blk_mq_get_ctx(hctx->queue, cpu); 3545 type = hctx->type; 3546 3547 spin_lock(&ctx->lock); 3548 if (!list_empty(&ctx->rq_lists[type])) { 3549 list_splice_init(&ctx->rq_lists[type], &tmp); 3550 blk_mq_hctx_clear_pending(hctx, ctx); 3551 } 3552 spin_unlock(&ctx->lock); 3553 3554 if (list_empty(&tmp)) 3555 return 0; 3556 3557 spin_lock(&hctx->lock); 3558 list_splice_tail_init(&tmp, &hctx->dispatch); 3559 spin_unlock(&hctx->lock); 3560 3561 blk_mq_run_hw_queue(hctx, true); 3562 return 0; 3563 } 3564 3565 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) 3566 { 3567 if (!(hctx->flags & BLK_MQ_F_STACKING)) 3568 cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, 3569 &hctx->cpuhp_online); 3570 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD, 3571 &hctx->cpuhp_dead); 3572 } 3573 3574 /* 3575 * Before freeing hw queue, clearing the flush request reference in 3576 * tags->rqs[] for avoiding potential UAF. 3577 */ 3578 static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags, 3579 unsigned int queue_depth, struct request *flush_rq) 3580 { 3581 int i; 3582 unsigned long flags; 3583 3584 /* The hw queue may not be mapped yet */ 3585 if (!tags) 3586 return; 3587 3588 WARN_ON_ONCE(req_ref_read(flush_rq) != 0); 3589 3590 for (i = 0; i < queue_depth; i++) 3591 cmpxchg(&tags->rqs[i], flush_rq, NULL); 3592 3593 /* 3594 * Wait until all pending iteration is done. 3595 * 3596 * Request reference is cleared and it is guaranteed to be observed 3597 * after the ->lock is released. 3598 */ 3599 spin_lock_irqsave(&tags->lock, flags); 3600 spin_unlock_irqrestore(&tags->lock, flags); 3601 } 3602 3603 /* hctx->ctxs will be freed in queue's release handler */ 3604 static void blk_mq_exit_hctx(struct request_queue *q, 3605 struct blk_mq_tag_set *set, 3606 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 3607 { 3608 struct request *flush_rq = hctx->fq->flush_rq; 3609 3610 if (blk_mq_hw_queue_mapped(hctx)) 3611 blk_mq_tag_idle(hctx); 3612 3613 if (blk_queue_init_done(q)) 3614 blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx], 3615 set->queue_depth, flush_rq); 3616 if (set->ops->exit_request) 3617 set->ops->exit_request(set, flush_rq, hctx_idx); 3618 3619 if (set->ops->exit_hctx) 3620 set->ops->exit_hctx(hctx, hctx_idx); 3621 3622 blk_mq_remove_cpuhp(hctx); 3623 3624 xa_erase(&q->hctx_table, hctx_idx); 3625 3626 spin_lock(&q->unused_hctx_lock); 3627 list_add(&hctx->hctx_list, &q->unused_hctx_list); 3628 spin_unlock(&q->unused_hctx_lock); 3629 } 3630 3631 static void blk_mq_exit_hw_queues(struct request_queue *q, 3632 struct blk_mq_tag_set *set, int nr_queue) 3633 { 3634 struct blk_mq_hw_ctx *hctx; 3635 unsigned long i; 3636 3637 queue_for_each_hw_ctx(q, hctx, i) { 3638 if (i == nr_queue) 3639 break; 3640 blk_mq_exit_hctx(q, set, hctx, i); 3641 } 3642 } 3643 3644 static int blk_mq_init_hctx(struct request_queue *q, 3645 struct blk_mq_tag_set *set, 3646 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) 3647 { 3648 hctx->queue_num = hctx_idx; 3649 3650 if (!(hctx->flags & BLK_MQ_F_STACKING)) 3651 cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, 3652 &hctx->cpuhp_online); 3653 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); 3654 3655 hctx->tags = set->tags[hctx_idx]; 3656 3657 if (set->ops->init_hctx && 3658 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) 3659 goto unregister_cpu_notifier; 3660 3661 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, 3662 hctx->numa_node)) 3663 goto exit_hctx; 3664 3665 if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL)) 3666 goto exit_flush_rq; 3667 3668 return 0; 3669 3670 exit_flush_rq: 3671 if (set->ops->exit_request) 3672 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); 3673 exit_hctx: 3674 if (set->ops->exit_hctx) 3675 set->ops->exit_hctx(hctx, hctx_idx); 3676 unregister_cpu_notifier: 3677 blk_mq_remove_cpuhp(hctx); 3678 return -1; 3679 } 3680 3681 static struct blk_mq_hw_ctx * 3682 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, 3683 int node) 3684 { 3685 struct blk_mq_hw_ctx *hctx; 3686 gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY; 3687 3688 hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node); 3689 if (!hctx) 3690 goto fail_alloc_hctx; 3691 3692 if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node)) 3693 goto free_hctx; 3694 3695 atomic_set(&hctx->nr_active, 0); 3696 if (node == NUMA_NO_NODE) 3697 node = set->numa_node; 3698 hctx->numa_node = node; 3699 3700 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); 3701 spin_lock_init(&hctx->lock); 3702 INIT_LIST_HEAD(&hctx->dispatch); 3703 hctx->queue = q; 3704 hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED; 3705 3706 INIT_LIST_HEAD(&hctx->hctx_list); 3707 3708 /* 3709 * Allocate space for all possible cpus to avoid allocation at 3710 * runtime 3711 */ 3712 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), 3713 gfp, node); 3714 if (!hctx->ctxs) 3715 goto free_cpumask; 3716 3717 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), 3718 gfp, node, false, false)) 3719 goto free_ctxs; 3720 hctx->nr_ctx = 0; 3721 3722 spin_lock_init(&hctx->dispatch_wait_lock); 3723 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); 3724 INIT_LIST_HEAD(&hctx->dispatch_wait.entry); 3725 3726 hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp); 3727 if (!hctx->fq) 3728 goto free_bitmap; 3729 3730 blk_mq_hctx_kobj_init(hctx); 3731 3732 return hctx; 3733 3734 free_bitmap: 3735 sbitmap_free(&hctx->ctx_map); 3736 free_ctxs: 3737 kfree(hctx->ctxs); 3738 free_cpumask: 3739 free_cpumask_var(hctx->cpumask); 3740 free_hctx: 3741 kfree(hctx); 3742 fail_alloc_hctx: 3743 return NULL; 3744 } 3745 3746 static void blk_mq_init_cpu_queues(struct request_queue *q, 3747 unsigned int nr_hw_queues) 3748 { 3749 struct blk_mq_tag_set *set = q->tag_set; 3750 unsigned int i, j; 3751 3752 for_each_possible_cpu(i) { 3753 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); 3754 struct blk_mq_hw_ctx *hctx; 3755 int k; 3756 3757 __ctx->cpu = i; 3758 spin_lock_init(&__ctx->lock); 3759 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++) 3760 INIT_LIST_HEAD(&__ctx->rq_lists[k]); 3761 3762 __ctx->queue = q; 3763 3764 /* 3765 * Set local node, IFF we have more than one hw queue. If 3766 * not, we remain on the home node of the device 3767 */ 3768 for (j = 0; j < set->nr_maps; j++) { 3769 hctx = blk_mq_map_queue_type(q, j, i); 3770 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) 3771 hctx->numa_node = cpu_to_node(i); 3772 } 3773 } 3774 } 3775 3776 struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, 3777 unsigned int hctx_idx, 3778 unsigned int depth) 3779 { 3780 struct blk_mq_tags *tags; 3781 int ret; 3782 3783 tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags); 3784 if (!tags) 3785 return NULL; 3786 3787 ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth); 3788 if (ret) { 3789 blk_mq_free_rq_map(tags); 3790 return NULL; 3791 } 3792 3793 return tags; 3794 } 3795 3796 static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, 3797 int hctx_idx) 3798 { 3799 if (blk_mq_is_shared_tags(set->flags)) { 3800 set->tags[hctx_idx] = set->shared_tags; 3801 3802 return true; 3803 } 3804 3805 set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx, 3806 set->queue_depth); 3807 3808 return set->tags[hctx_idx]; 3809 } 3810 3811 void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, 3812 struct blk_mq_tags *tags, 3813 unsigned int hctx_idx) 3814 { 3815 if (tags) { 3816 blk_mq_free_rqs(set, tags, hctx_idx); 3817 blk_mq_free_rq_map(tags); 3818 } 3819 } 3820 3821 static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, 3822 unsigned int hctx_idx) 3823 { 3824 if (!blk_mq_is_shared_tags(set->flags)) 3825 blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx); 3826 3827 set->tags[hctx_idx] = NULL; 3828 } 3829 3830 static void blk_mq_map_swqueue(struct request_queue *q) 3831 { 3832 unsigned int j, hctx_idx; 3833 unsigned long i; 3834 struct blk_mq_hw_ctx *hctx; 3835 struct blk_mq_ctx *ctx; 3836 struct blk_mq_tag_set *set = q->tag_set; 3837 3838 queue_for_each_hw_ctx(q, hctx, i) { 3839 cpumask_clear(hctx->cpumask); 3840 hctx->nr_ctx = 0; 3841 hctx->dispatch_from = NULL; 3842 } 3843 3844 /* 3845 * Map software to hardware queues. 3846 * 3847 * If the cpu isn't present, the cpu is mapped to first hctx. 3848 */ 3849 for_each_possible_cpu(i) { 3850 3851 ctx = per_cpu_ptr(q->queue_ctx, i); 3852 for (j = 0; j < set->nr_maps; j++) { 3853 if (!set->map[j].nr_queues) { 3854 ctx->hctxs[j] = blk_mq_map_queue_type(q, 3855 HCTX_TYPE_DEFAULT, i); 3856 continue; 3857 } 3858 hctx_idx = set->map[j].mq_map[i]; 3859 /* unmapped hw queue can be remapped after CPU topo changed */ 3860 if (!set->tags[hctx_idx] && 3861 !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) { 3862 /* 3863 * If tags initialization fail for some hctx, 3864 * that hctx won't be brought online. In this 3865 * case, remap the current ctx to hctx[0] which 3866 * is guaranteed to always have tags allocated 3867 */ 3868 set->map[j].mq_map[i] = 0; 3869 } 3870 3871 hctx = blk_mq_map_queue_type(q, j, i); 3872 ctx->hctxs[j] = hctx; 3873 /* 3874 * If the CPU is already set in the mask, then we've 3875 * mapped this one already. This can happen if 3876 * devices share queues across queue maps. 3877 */ 3878 if (cpumask_test_cpu(i, hctx->cpumask)) 3879 continue; 3880 3881 cpumask_set_cpu(i, hctx->cpumask); 3882 hctx->type = j; 3883 ctx->index_hw[hctx->type] = hctx->nr_ctx; 3884 hctx->ctxs[hctx->nr_ctx++] = ctx; 3885 3886 /* 3887 * If the nr_ctx type overflows, we have exceeded the 3888 * amount of sw queues we can support. 3889 */ 3890 BUG_ON(!hctx->nr_ctx); 3891 } 3892 3893 for (; j < HCTX_MAX_TYPES; j++) 3894 ctx->hctxs[j] = blk_mq_map_queue_type(q, 3895 HCTX_TYPE_DEFAULT, i); 3896 } 3897 3898 queue_for_each_hw_ctx(q, hctx, i) { 3899 /* 3900 * If no software queues are mapped to this hardware queue, 3901 * disable it and free the request entries. 3902 */ 3903 if (!hctx->nr_ctx) { 3904 /* Never unmap queue 0. We need it as a 3905 * fallback in case of a new remap fails 3906 * allocation 3907 */ 3908 if (i) 3909 __blk_mq_free_map_and_rqs(set, i); 3910 3911 hctx->tags = NULL; 3912 continue; 3913 } 3914 3915 hctx->tags = set->tags[i]; 3916 WARN_ON(!hctx->tags); 3917 3918 /* 3919 * Set the map size to the number of mapped software queues. 3920 * This is more accurate and more efficient than looping 3921 * over all possibly mapped software queues. 3922 */ 3923 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx); 3924 3925 /* 3926 * Initialize batch roundrobin counts 3927 */ 3928 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); 3929 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 3930 } 3931 } 3932 3933 /* 3934 * Caller needs to ensure that we're either frozen/quiesced, or that 3935 * the queue isn't live yet. 3936 */ 3937 static void queue_set_hctx_shared(struct request_queue *q, bool shared) 3938 { 3939 struct blk_mq_hw_ctx *hctx; 3940 unsigned long i; 3941 3942 queue_for_each_hw_ctx(q, hctx, i) { 3943 if (shared) { 3944 hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; 3945 } else { 3946 blk_mq_tag_idle(hctx); 3947 hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; 3948 } 3949 } 3950 } 3951 3952 static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set, 3953 bool shared) 3954 { 3955 struct request_queue *q; 3956 3957 lockdep_assert_held(&set->tag_list_lock); 3958 3959 list_for_each_entry(q, &set->tag_list, tag_set_list) { 3960 blk_mq_freeze_queue(q); 3961 queue_set_hctx_shared(q, shared); 3962 blk_mq_unfreeze_queue(q); 3963 } 3964 } 3965 3966 static void blk_mq_del_queue_tag_set(struct request_queue *q) 3967 { 3968 struct blk_mq_tag_set *set = q->tag_set; 3969 3970 mutex_lock(&set->tag_list_lock); 3971 list_del(&q->tag_set_list); 3972 if (list_is_singular(&set->tag_list)) { 3973 /* just transitioned to unshared */ 3974 set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; 3975 /* update existing queue */ 3976 blk_mq_update_tag_set_shared(set, false); 3977 } 3978 mutex_unlock(&set->tag_list_lock); 3979 INIT_LIST_HEAD(&q->tag_set_list); 3980 } 3981 3982 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, 3983 struct request_queue *q) 3984 { 3985 mutex_lock(&set->tag_list_lock); 3986 3987 /* 3988 * Check to see if we're transitioning to shared (from 1 to 2 queues). 3989 */ 3990 if (!list_empty(&set->tag_list) && 3991 !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { 3992 set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; 3993 /* update existing queue */ 3994 blk_mq_update_tag_set_shared(set, true); 3995 } 3996 if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED) 3997 queue_set_hctx_shared(q, true); 3998 list_add_tail(&q->tag_set_list, &set->tag_list); 3999 4000 mutex_unlock(&set->tag_list_lock); 4001 } 4002 4003 /* All allocations will be freed in release handler of q->mq_kobj */ 4004 static int blk_mq_alloc_ctxs(struct request_queue *q) 4005 { 4006 struct blk_mq_ctxs *ctxs; 4007 int cpu; 4008 4009 ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL); 4010 if (!ctxs) 4011 return -ENOMEM; 4012 4013 ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx); 4014 if (!ctxs->queue_ctx) 4015 goto fail; 4016 4017 for_each_possible_cpu(cpu) { 4018 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu); 4019 ctx->ctxs = ctxs; 4020 } 4021 4022 q->mq_kobj = &ctxs->kobj; 4023 q->queue_ctx = ctxs->queue_ctx; 4024 4025 return 0; 4026 fail: 4027 kfree(ctxs); 4028 return -ENOMEM; 4029 } 4030 4031 /* 4032 * It is the actual release handler for mq, but we do it from 4033 * request queue's release handler for avoiding use-after-free 4034 * and headache because q->mq_kobj shouldn't have been introduced, 4035 * but we can't group ctx/kctx kobj without it. 4036 */ 4037 void blk_mq_release(struct request_queue *q) 4038 { 4039 struct blk_mq_hw_ctx *hctx, *next; 4040 unsigned long i; 4041 4042 queue_for_each_hw_ctx(q, hctx, i) 4043 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list)); 4044 4045 /* all hctx are in .unused_hctx_list now */ 4046 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) { 4047 list_del_init(&hctx->hctx_list); 4048 kobject_put(&hctx->kobj); 4049 } 4050 4051 xa_destroy(&q->hctx_table); 4052 4053 /* 4054 * release .mq_kobj and sw queue's kobject now because 4055 * both share lifetime with request queue. 4056 */ 4057 blk_mq_sysfs_deinit(q); 4058 } 4059 4060 static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set, 4061 void *queuedata) 4062 { 4063 struct request_queue *q; 4064 int ret; 4065 4066 q = blk_alloc_queue(set->numa_node); 4067 if (!q) 4068 return ERR_PTR(-ENOMEM); 4069 q->queuedata = queuedata; 4070 ret = blk_mq_init_allocated_queue(set, q); 4071 if (ret) { 4072 blk_put_queue(q); 4073 return ERR_PTR(ret); 4074 } 4075 return q; 4076 } 4077 4078 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) 4079 { 4080 return blk_mq_init_queue_data(set, NULL); 4081 } 4082 EXPORT_SYMBOL(blk_mq_init_queue); 4083 4084 /** 4085 * blk_mq_destroy_queue - shutdown a request queue 4086 * @q: request queue to shutdown 4087 * 4088 * This shuts down a request queue allocated by blk_mq_init_queue(). All future 4089 * requests will be failed with -ENODEV. The caller is responsible for dropping 4090 * the reference from blk_mq_init_queue() by calling blk_put_queue(). 4091 * 4092 * Context: can sleep 4093 */ 4094 void blk_mq_destroy_queue(struct request_queue *q) 4095 { 4096 WARN_ON_ONCE(!queue_is_mq(q)); 4097 WARN_ON_ONCE(blk_queue_registered(q)); 4098 4099 might_sleep(); 4100 4101 blk_queue_flag_set(QUEUE_FLAG_DYING, q); 4102 blk_queue_start_drain(q); 4103 blk_mq_freeze_queue_wait(q); 4104 4105 blk_sync_queue(q); 4106 blk_mq_cancel_work_sync(q); 4107 blk_mq_exit_queue(q); 4108 } 4109 EXPORT_SYMBOL(blk_mq_destroy_queue); 4110 4111 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata, 4112 struct lock_class_key *lkclass) 4113 { 4114 struct request_queue *q; 4115 struct gendisk *disk; 4116 4117 q = blk_mq_init_queue_data(set, queuedata); 4118 if (IS_ERR(q)) 4119 return ERR_CAST(q); 4120 4121 disk = __alloc_disk_node(q, set->numa_node, lkclass); 4122 if (!disk) { 4123 blk_mq_destroy_queue(q); 4124 blk_put_queue(q); 4125 return ERR_PTR(-ENOMEM); 4126 } 4127 set_bit(GD_OWNS_QUEUE, &disk->state); 4128 return disk; 4129 } 4130 EXPORT_SYMBOL(__blk_mq_alloc_disk); 4131 4132 struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q, 4133 struct lock_class_key *lkclass) 4134 { 4135 struct gendisk *disk; 4136 4137 if (!blk_get_queue(q)) 4138 return NULL; 4139 disk = __alloc_disk_node(q, NUMA_NO_NODE, lkclass); 4140 if (!disk) 4141 blk_put_queue(q); 4142 return disk; 4143 } 4144 EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue); 4145 4146 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx( 4147 struct blk_mq_tag_set *set, struct request_queue *q, 4148 int hctx_idx, int node) 4149 { 4150 struct blk_mq_hw_ctx *hctx = NULL, *tmp; 4151 4152 /* reuse dead hctx first */ 4153 spin_lock(&q->unused_hctx_lock); 4154 list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) { 4155 if (tmp->numa_node == node) { 4156 hctx = tmp; 4157 break; 4158 } 4159 } 4160 if (hctx) 4161 list_del_init(&hctx->hctx_list); 4162 spin_unlock(&q->unused_hctx_lock); 4163 4164 if (!hctx) 4165 hctx = blk_mq_alloc_hctx(q, set, node); 4166 if (!hctx) 4167 goto fail; 4168 4169 if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) 4170 goto free_hctx; 4171 4172 return hctx; 4173 4174 free_hctx: 4175 kobject_put(&hctx->kobj); 4176 fail: 4177 return NULL; 4178 } 4179 4180 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, 4181 struct request_queue *q) 4182 { 4183 struct blk_mq_hw_ctx *hctx; 4184 unsigned long i, j; 4185 4186 /* protect against switching io scheduler */ 4187 mutex_lock(&q->sysfs_lock); 4188 for (i = 0; i < set->nr_hw_queues; i++) { 4189 int old_node; 4190 int node = blk_mq_get_hctx_node(set, i); 4191 struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i); 4192 4193 if (old_hctx) { 4194 old_node = old_hctx->numa_node; 4195 blk_mq_exit_hctx(q, set, old_hctx, i); 4196 } 4197 4198 if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) { 4199 if (!old_hctx) 4200 break; 4201 pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n", 4202 node, old_node); 4203 hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node); 4204 WARN_ON_ONCE(!hctx); 4205 } 4206 } 4207 /* 4208 * Increasing nr_hw_queues fails. Free the newly allocated 4209 * hctxs and keep the previous q->nr_hw_queues. 4210 */ 4211 if (i != set->nr_hw_queues) { 4212 j = q->nr_hw_queues; 4213 } else { 4214 j = i; 4215 q->nr_hw_queues = set->nr_hw_queues; 4216 } 4217 4218 xa_for_each_start(&q->hctx_table, j, hctx, j) 4219 blk_mq_exit_hctx(q, set, hctx, j); 4220 mutex_unlock(&q->sysfs_lock); 4221 } 4222 4223 static void blk_mq_update_poll_flag(struct request_queue *q) 4224 { 4225 struct blk_mq_tag_set *set = q->tag_set; 4226 4227 if (set->nr_maps > HCTX_TYPE_POLL && 4228 set->map[HCTX_TYPE_POLL].nr_queues) 4229 blk_queue_flag_set(QUEUE_FLAG_POLL, q); 4230 else 4231 blk_queue_flag_clear(QUEUE_FLAG_POLL, q); 4232 } 4233 4234 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 4235 struct request_queue *q) 4236 { 4237 /* mark the queue as mq asap */ 4238 q->mq_ops = set->ops; 4239 4240 if (blk_mq_alloc_ctxs(q)) 4241 goto err_exit; 4242 4243 /* init q->mq_kobj and sw queues' kobjects */ 4244 blk_mq_sysfs_init(q); 4245 4246 INIT_LIST_HEAD(&q->unused_hctx_list); 4247 spin_lock_init(&q->unused_hctx_lock); 4248 4249 xa_init(&q->hctx_table); 4250 4251 blk_mq_realloc_hw_ctxs(set, q); 4252 if (!q->nr_hw_queues) 4253 goto err_hctxs; 4254 4255 INIT_WORK(&q->timeout_work, blk_mq_timeout_work); 4256 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); 4257 4258 q->tag_set = set; 4259 4260 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; 4261 blk_mq_update_poll_flag(q); 4262 4263 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); 4264 INIT_LIST_HEAD(&q->flush_list); 4265 INIT_LIST_HEAD(&q->requeue_list); 4266 spin_lock_init(&q->requeue_lock); 4267 4268 q->nr_requests = set->queue_depth; 4269 4270 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 4271 blk_mq_add_queue_tag_set(set, q); 4272 blk_mq_map_swqueue(q); 4273 return 0; 4274 4275 err_hctxs: 4276 blk_mq_release(q); 4277 err_exit: 4278 q->mq_ops = NULL; 4279 return -ENOMEM; 4280 } 4281 EXPORT_SYMBOL(blk_mq_init_allocated_queue); 4282 4283 /* tags can _not_ be used after returning from blk_mq_exit_queue */ 4284 void blk_mq_exit_queue(struct request_queue *q) 4285 { 4286 struct blk_mq_tag_set *set = q->tag_set; 4287 4288 /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */ 4289 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); 4290 /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */ 4291 blk_mq_del_queue_tag_set(q); 4292 } 4293 4294 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 4295 { 4296 int i; 4297 4298 if (blk_mq_is_shared_tags(set->flags)) { 4299 set->shared_tags = blk_mq_alloc_map_and_rqs(set, 4300 BLK_MQ_NO_HCTX_IDX, 4301 set->queue_depth); 4302 if (!set->shared_tags) 4303 return -ENOMEM; 4304 } 4305 4306 for (i = 0; i < set->nr_hw_queues; i++) { 4307 if (!__blk_mq_alloc_map_and_rqs(set, i)) 4308 goto out_unwind; 4309 cond_resched(); 4310 } 4311 4312 return 0; 4313 4314 out_unwind: 4315 while (--i >= 0) 4316 __blk_mq_free_map_and_rqs(set, i); 4317 4318 if (blk_mq_is_shared_tags(set->flags)) { 4319 blk_mq_free_map_and_rqs(set, set->shared_tags, 4320 BLK_MQ_NO_HCTX_IDX); 4321 } 4322 4323 return -ENOMEM; 4324 } 4325 4326 /* 4327 * Allocate the request maps associated with this tag_set. Note that this 4328 * may reduce the depth asked for, if memory is tight. set->queue_depth 4329 * will be updated to reflect the allocated depth. 4330 */ 4331 static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set) 4332 { 4333 unsigned int depth; 4334 int err; 4335 4336 depth = set->queue_depth; 4337 do { 4338 err = __blk_mq_alloc_rq_maps(set); 4339 if (!err) 4340 break; 4341 4342 set->queue_depth >>= 1; 4343 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { 4344 err = -ENOMEM; 4345 break; 4346 } 4347 } while (set->queue_depth); 4348 4349 if (!set->queue_depth || err) { 4350 pr_err("blk-mq: failed to allocate request map\n"); 4351 return -ENOMEM; 4352 } 4353 4354 if (depth != set->queue_depth) 4355 pr_info("blk-mq: reduced tag depth (%u -> %u)\n", 4356 depth, set->queue_depth); 4357 4358 return 0; 4359 } 4360 4361 static void blk_mq_update_queue_map(struct blk_mq_tag_set *set) 4362 { 4363 /* 4364 * blk_mq_map_queues() and multiple .map_queues() implementations 4365 * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the 4366 * number of hardware queues. 4367 */ 4368 if (set->nr_maps == 1) 4369 set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues; 4370 4371 if (set->ops->map_queues && !is_kdump_kernel()) { 4372 int i; 4373 4374 /* 4375 * transport .map_queues is usually done in the following 4376 * way: 4377 * 4378 * for (queue = 0; queue < set->nr_hw_queues; queue++) { 4379 * mask = get_cpu_mask(queue) 4380 * for_each_cpu(cpu, mask) 4381 * set->map[x].mq_map[cpu] = queue; 4382 * } 4383 * 4384 * When we need to remap, the table has to be cleared for 4385 * killing stale mapping since one CPU may not be mapped 4386 * to any hw queue. 4387 */ 4388 for (i = 0; i < set->nr_maps; i++) 4389 blk_mq_clear_mq_map(&set->map[i]); 4390 4391 set->ops->map_queues(set); 4392 } else { 4393 BUG_ON(set->nr_maps > 1); 4394 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); 4395 } 4396 } 4397 4398 static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set, 4399 int new_nr_hw_queues) 4400 { 4401 struct blk_mq_tags **new_tags; 4402 int i; 4403 4404 if (set->nr_hw_queues >= new_nr_hw_queues) 4405 goto done; 4406 4407 new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *), 4408 GFP_KERNEL, set->numa_node); 4409 if (!new_tags) 4410 return -ENOMEM; 4411 4412 if (set->tags) 4413 memcpy(new_tags, set->tags, set->nr_hw_queues * 4414 sizeof(*set->tags)); 4415 kfree(set->tags); 4416 set->tags = new_tags; 4417 4418 for (i = set->nr_hw_queues; i < new_nr_hw_queues; i++) { 4419 if (!__blk_mq_alloc_map_and_rqs(set, i)) { 4420 while (--i >= set->nr_hw_queues) 4421 __blk_mq_free_map_and_rqs(set, i); 4422 return -ENOMEM; 4423 } 4424 cond_resched(); 4425 } 4426 4427 done: 4428 set->nr_hw_queues = new_nr_hw_queues; 4429 return 0; 4430 } 4431 4432 /* 4433 * Alloc a tag set to be associated with one or more request queues. 4434 * May fail with EINVAL for various error conditions. May adjust the 4435 * requested depth down, if it's too large. In that case, the set 4436 * value will be stored in set->queue_depth. 4437 */ 4438 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) 4439 { 4440 int i, ret; 4441 4442 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS); 4443 4444 if (!set->nr_hw_queues) 4445 return -EINVAL; 4446 if (!set->queue_depth) 4447 return -EINVAL; 4448 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) 4449 return -EINVAL; 4450 4451 if (!set->ops->queue_rq) 4452 return -EINVAL; 4453 4454 if (!set->ops->get_budget ^ !set->ops->put_budget) 4455 return -EINVAL; 4456 4457 if (set->queue_depth > BLK_MQ_MAX_DEPTH) { 4458 pr_info("blk-mq: reduced tag depth to %u\n", 4459 BLK_MQ_MAX_DEPTH); 4460 set->queue_depth = BLK_MQ_MAX_DEPTH; 4461 } 4462 4463 if (!set->nr_maps) 4464 set->nr_maps = 1; 4465 else if (set->nr_maps > HCTX_MAX_TYPES) 4466 return -EINVAL; 4467 4468 /* 4469 * If a crashdump is active, then we are potentially in a very 4470 * memory constrained environment. Limit us to 1 queue and 4471 * 64 tags to prevent using too much memory. 4472 */ 4473 if (is_kdump_kernel()) { 4474 set->nr_hw_queues = 1; 4475 set->nr_maps = 1; 4476 set->queue_depth = min(64U, set->queue_depth); 4477 } 4478 /* 4479 * There is no use for more h/w queues than cpus if we just have 4480 * a single map 4481 */ 4482 if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids) 4483 set->nr_hw_queues = nr_cpu_ids; 4484 4485 if (set->flags & BLK_MQ_F_BLOCKING) { 4486 set->srcu = kmalloc(sizeof(*set->srcu), GFP_KERNEL); 4487 if (!set->srcu) 4488 return -ENOMEM; 4489 ret = init_srcu_struct(set->srcu); 4490 if (ret) 4491 goto out_free_srcu; 4492 } 4493 4494 ret = -ENOMEM; 4495 set->tags = kcalloc_node(set->nr_hw_queues, 4496 sizeof(struct blk_mq_tags *), GFP_KERNEL, 4497 set->numa_node); 4498 if (!set->tags) 4499 goto out_cleanup_srcu; 4500 4501 for (i = 0; i < set->nr_maps; i++) { 4502 set->map[i].mq_map = kcalloc_node(nr_cpu_ids, 4503 sizeof(set->map[i].mq_map[0]), 4504 GFP_KERNEL, set->numa_node); 4505 if (!set->map[i].mq_map) 4506 goto out_free_mq_map; 4507 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues; 4508 } 4509 4510 blk_mq_update_queue_map(set); 4511 4512 ret = blk_mq_alloc_set_map_and_rqs(set); 4513 if (ret) 4514 goto out_free_mq_map; 4515 4516 mutex_init(&set->tag_list_lock); 4517 INIT_LIST_HEAD(&set->tag_list); 4518 4519 return 0; 4520 4521 out_free_mq_map: 4522 for (i = 0; i < set->nr_maps; i++) { 4523 kfree(set->map[i].mq_map); 4524 set->map[i].mq_map = NULL; 4525 } 4526 kfree(set->tags); 4527 set->tags = NULL; 4528 out_cleanup_srcu: 4529 if (set->flags & BLK_MQ_F_BLOCKING) 4530 cleanup_srcu_struct(set->srcu); 4531 out_free_srcu: 4532 if (set->flags & BLK_MQ_F_BLOCKING) 4533 kfree(set->srcu); 4534 return ret; 4535 } 4536 EXPORT_SYMBOL(blk_mq_alloc_tag_set); 4537 4538 /* allocate and initialize a tagset for a simple single-queue device */ 4539 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set, 4540 const struct blk_mq_ops *ops, unsigned int queue_depth, 4541 unsigned int set_flags) 4542 { 4543 memset(set, 0, sizeof(*set)); 4544 set->ops = ops; 4545 set->nr_hw_queues = 1; 4546 set->nr_maps = 1; 4547 set->queue_depth = queue_depth; 4548 set->numa_node = NUMA_NO_NODE; 4549 set->flags = set_flags; 4550 return blk_mq_alloc_tag_set(set); 4551 } 4552 EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set); 4553 4554 void blk_mq_free_tag_set(struct blk_mq_tag_set *set) 4555 { 4556 int i, j; 4557 4558 for (i = 0; i < set->nr_hw_queues; i++) 4559 __blk_mq_free_map_and_rqs(set, i); 4560 4561 if (blk_mq_is_shared_tags(set->flags)) { 4562 blk_mq_free_map_and_rqs(set, set->shared_tags, 4563 BLK_MQ_NO_HCTX_IDX); 4564 } 4565 4566 for (j = 0; j < set->nr_maps; j++) { 4567 kfree(set->map[j].mq_map); 4568 set->map[j].mq_map = NULL; 4569 } 4570 4571 kfree(set->tags); 4572 set->tags = NULL; 4573 if (set->flags & BLK_MQ_F_BLOCKING) { 4574 cleanup_srcu_struct(set->srcu); 4575 kfree(set->srcu); 4576 } 4577 } 4578 EXPORT_SYMBOL(blk_mq_free_tag_set); 4579 4580 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) 4581 { 4582 struct blk_mq_tag_set *set = q->tag_set; 4583 struct blk_mq_hw_ctx *hctx; 4584 int ret; 4585 unsigned long i; 4586 4587 if (!set) 4588 return -EINVAL; 4589 4590 if (q->nr_requests == nr) 4591 return 0; 4592 4593 blk_mq_freeze_queue(q); 4594 blk_mq_quiesce_queue(q); 4595 4596 ret = 0; 4597 queue_for_each_hw_ctx(q, hctx, i) { 4598 if (!hctx->tags) 4599 continue; 4600 /* 4601 * If we're using an MQ scheduler, just update the scheduler 4602 * queue depth. This is similar to what the old code would do. 4603 */ 4604 if (hctx->sched_tags) { 4605 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, 4606 nr, true); 4607 } else { 4608 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, 4609 false); 4610 } 4611 if (ret) 4612 break; 4613 if (q->elevator && q->elevator->type->ops.depth_updated) 4614 q->elevator->type->ops.depth_updated(hctx); 4615 } 4616 if (!ret) { 4617 q->nr_requests = nr; 4618 if (blk_mq_is_shared_tags(set->flags)) { 4619 if (q->elevator) 4620 blk_mq_tag_update_sched_shared_tags(q); 4621 else 4622 blk_mq_tag_resize_shared_tags(set, nr); 4623 } 4624 } 4625 4626 blk_mq_unquiesce_queue(q); 4627 blk_mq_unfreeze_queue(q); 4628 4629 return ret; 4630 } 4631 4632 /* 4633 * request_queue and elevator_type pair. 4634 * It is just used by __blk_mq_update_nr_hw_queues to cache 4635 * the elevator_type associated with a request_queue. 4636 */ 4637 struct blk_mq_qe_pair { 4638 struct list_head node; 4639 struct request_queue *q; 4640 struct elevator_type *type; 4641 }; 4642 4643 /* 4644 * Cache the elevator_type in qe pair list and switch the 4645 * io scheduler to 'none' 4646 */ 4647 static bool blk_mq_elv_switch_none(struct list_head *head, 4648 struct request_queue *q) 4649 { 4650 struct blk_mq_qe_pair *qe; 4651 4652 qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY); 4653 if (!qe) 4654 return false; 4655 4656 /* q->elevator needs protection from ->sysfs_lock */ 4657 mutex_lock(&q->sysfs_lock); 4658 4659 /* the check has to be done with holding sysfs_lock */ 4660 if (!q->elevator) { 4661 kfree(qe); 4662 goto unlock; 4663 } 4664 4665 INIT_LIST_HEAD(&qe->node); 4666 qe->q = q; 4667 qe->type = q->elevator->type; 4668 /* keep a reference to the elevator module as we'll switch back */ 4669 __elevator_get(qe->type); 4670 list_add(&qe->node, head); 4671 elevator_disable(q); 4672 unlock: 4673 mutex_unlock(&q->sysfs_lock); 4674 4675 return true; 4676 } 4677 4678 static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head, 4679 struct request_queue *q) 4680 { 4681 struct blk_mq_qe_pair *qe; 4682 4683 list_for_each_entry(qe, head, node) 4684 if (qe->q == q) 4685 return qe; 4686 4687 return NULL; 4688 } 4689 4690 static void blk_mq_elv_switch_back(struct list_head *head, 4691 struct request_queue *q) 4692 { 4693 struct blk_mq_qe_pair *qe; 4694 struct elevator_type *t; 4695 4696 qe = blk_lookup_qe_pair(head, q); 4697 if (!qe) 4698 return; 4699 t = qe->type; 4700 list_del(&qe->node); 4701 kfree(qe); 4702 4703 mutex_lock(&q->sysfs_lock); 4704 elevator_switch(q, t); 4705 /* drop the reference acquired in blk_mq_elv_switch_none */ 4706 elevator_put(t); 4707 mutex_unlock(&q->sysfs_lock); 4708 } 4709 4710 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, 4711 int nr_hw_queues) 4712 { 4713 struct request_queue *q; 4714 LIST_HEAD(head); 4715 int prev_nr_hw_queues = set->nr_hw_queues; 4716 int i; 4717 4718 lockdep_assert_held(&set->tag_list_lock); 4719 4720 if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids) 4721 nr_hw_queues = nr_cpu_ids; 4722 if (nr_hw_queues < 1) 4723 return; 4724 if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues) 4725 return; 4726 4727 list_for_each_entry(q, &set->tag_list, tag_set_list) 4728 blk_mq_freeze_queue(q); 4729 /* 4730 * Switch IO scheduler to 'none', cleaning up the data associated 4731 * with the previous scheduler. We will switch back once we are done 4732 * updating the new sw to hw queue mappings. 4733 */ 4734 list_for_each_entry(q, &set->tag_list, tag_set_list) 4735 if (!blk_mq_elv_switch_none(&head, q)) 4736 goto switch_back; 4737 4738 list_for_each_entry(q, &set->tag_list, tag_set_list) { 4739 blk_mq_debugfs_unregister_hctxs(q); 4740 blk_mq_sysfs_unregister_hctxs(q); 4741 } 4742 4743 if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0) 4744 goto reregister; 4745 4746 fallback: 4747 blk_mq_update_queue_map(set); 4748 list_for_each_entry(q, &set->tag_list, tag_set_list) { 4749 blk_mq_realloc_hw_ctxs(set, q); 4750 blk_mq_update_poll_flag(q); 4751 if (q->nr_hw_queues != set->nr_hw_queues) { 4752 int i = prev_nr_hw_queues; 4753 4754 pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n", 4755 nr_hw_queues, prev_nr_hw_queues); 4756 for (; i < set->nr_hw_queues; i++) 4757 __blk_mq_free_map_and_rqs(set, i); 4758 4759 set->nr_hw_queues = prev_nr_hw_queues; 4760 goto fallback; 4761 } 4762 blk_mq_map_swqueue(q); 4763 } 4764 4765 reregister: 4766 list_for_each_entry(q, &set->tag_list, tag_set_list) { 4767 blk_mq_sysfs_register_hctxs(q); 4768 blk_mq_debugfs_register_hctxs(q); 4769 } 4770 4771 switch_back: 4772 list_for_each_entry(q, &set->tag_list, tag_set_list) 4773 blk_mq_elv_switch_back(&head, q); 4774 4775 list_for_each_entry(q, &set->tag_list, tag_set_list) 4776 blk_mq_unfreeze_queue(q); 4777 4778 /* Free the excess tags when nr_hw_queues shrink. */ 4779 for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++) 4780 __blk_mq_free_map_and_rqs(set, i); 4781 } 4782 4783 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) 4784 { 4785 mutex_lock(&set->tag_list_lock); 4786 __blk_mq_update_nr_hw_queues(set, nr_hw_queues); 4787 mutex_unlock(&set->tag_list_lock); 4788 } 4789 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); 4790 4791 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx, 4792 struct io_comp_batch *iob, unsigned int flags) 4793 { 4794 long state = get_current_state(); 4795 int ret; 4796 4797 do { 4798 ret = q->mq_ops->poll(hctx, iob); 4799 if (ret > 0) { 4800 __set_current_state(TASK_RUNNING); 4801 return ret; 4802 } 4803 4804 if (signal_pending_state(state, current)) 4805 __set_current_state(TASK_RUNNING); 4806 if (task_is_running(current)) 4807 return 1; 4808 4809 if (ret < 0 || (flags & BLK_POLL_ONESHOT)) 4810 break; 4811 cpu_relax(); 4812 } while (!need_resched()); 4813 4814 __set_current_state(TASK_RUNNING); 4815 return 0; 4816 } 4817 4818 int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, 4819 struct io_comp_batch *iob, unsigned int flags) 4820 { 4821 struct blk_mq_hw_ctx *hctx = xa_load(&q->hctx_table, cookie); 4822 4823 return blk_hctx_poll(q, hctx, iob, flags); 4824 } 4825 4826 int blk_rq_poll(struct request *rq, struct io_comp_batch *iob, 4827 unsigned int poll_flags) 4828 { 4829 struct request_queue *q = rq->q; 4830 int ret; 4831 4832 if (!blk_rq_is_poll(rq)) 4833 return 0; 4834 if (!percpu_ref_tryget(&q->q_usage_counter)) 4835 return 0; 4836 4837 ret = blk_hctx_poll(q, rq->mq_hctx, iob, poll_flags); 4838 blk_queue_exit(q); 4839 4840 return ret; 4841 } 4842 EXPORT_SYMBOL_GPL(blk_rq_poll); 4843 4844 unsigned int blk_mq_rq_cpu(struct request *rq) 4845 { 4846 return rq->mq_ctx->cpu; 4847 } 4848 EXPORT_SYMBOL(blk_mq_rq_cpu); 4849 4850 void blk_mq_cancel_work_sync(struct request_queue *q) 4851 { 4852 struct blk_mq_hw_ctx *hctx; 4853 unsigned long i; 4854 4855 cancel_delayed_work_sync(&q->requeue_work); 4856 4857 queue_for_each_hw_ctx(q, hctx, i) 4858 cancel_delayed_work_sync(&hctx->run_work); 4859 } 4860 4861 static int __init blk_mq_init(void) 4862 { 4863 int i; 4864 4865 for_each_possible_cpu(i) 4866 init_llist_head(&per_cpu(blk_cpu_done, i)); 4867 for_each_possible_cpu(i) 4868 INIT_CSD(&per_cpu(blk_cpu_csd, i), 4869 __blk_mq_complete_request_remote, NULL); 4870 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); 4871 4872 cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, 4873 "block/softirq:dead", NULL, 4874 blk_softirq_cpu_dead); 4875 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, 4876 blk_mq_hctx_notify_dead); 4877 cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online", 4878 blk_mq_hctx_notify_online, 4879 blk_mq_hctx_notify_offline); 4880 return 0; 4881 } 4882 subsys_initcall(blk_mq_init); 4883