blk-mq.c (2af8cbe30531eca73c8f3ba277f155fc0020b01a) blk-mq.c (bd166ef183c263c5ced656d49ef19c7da4adc774)
1/*
2 * Block multiqueue core code
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 * Copyright (C) 2013-2014 Christoph Hellwig
6 */
7#include <linux/kernel.h>
8#include <linux/module.h>

--- 18 unchanged lines hidden (view full) ---

27#include <trace/events/block.h>
28
29#include <linux/blk-mq.h>
30#include "blk.h"
31#include "blk-mq.h"
32#include "blk-mq-tag.h"
33#include "blk-stat.h"
34#include "blk-wbt.h"
1/*
2 * Block multiqueue core code
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 * Copyright (C) 2013-2014 Christoph Hellwig
6 */
7#include <linux/kernel.h>
8#include <linux/module.h>

--- 18 unchanged lines hidden (view full) ---

27#include <trace/events/block.h>
28
29#include <linux/blk-mq.h>
30#include "blk.h"
31#include "blk-mq.h"
32#include "blk-mq-tag.h"
33#include "blk-stat.h"
34#include "blk-wbt.h"
35#include "blk-mq-sched.h"
35
36static DEFINE_MUTEX(all_q_mutex);
37static LIST_HEAD(all_q_list);
38
39/*
40 * Check if any of the ctx's have pending work in this hardware queue
41 */
42static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
43{
36
37static DEFINE_MUTEX(all_q_mutex);
38static LIST_HEAD(all_q_list);
39
40/*
41 * Check if any of the ctx's have pending work in this hardware queue
42 */
43static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
44{
44 return sbitmap_any_bit_set(&hctx->ctx_map);
45 return sbitmap_any_bit_set(&hctx->ctx_map) ||
46 !list_empty_careful(&hctx->dispatch) ||
47 blk_mq_sched_has_work(hctx);
45}
46
47/*
48 * Mark this ctx as having pending work in this hardware queue
49 */
50static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
51 struct blk_mq_ctx *ctx)
52{

--- 165 unchanged lines hidden (view full) ---

218struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
219 unsigned int op)
220{
221 struct request *rq;
222 unsigned int tag;
223
224 tag = blk_mq_get_tag(data);
225 if (tag != BLK_MQ_TAG_FAIL) {
48}
49
50/*
51 * Mark this ctx as having pending work in this hardware queue
52 */
53static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
54 struct blk_mq_ctx *ctx)
55{

--- 165 unchanged lines hidden (view full) ---

221struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
222 unsigned int op)
223{
224 struct request *rq;
225 unsigned int tag;
226
227 tag = blk_mq_get_tag(data);
228 if (tag != BLK_MQ_TAG_FAIL) {
226 rq = data->hctx->tags->static_rqs[tag];
229 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
227
230
231 rq = tags->static_rqs[tag];
232
228 if (blk_mq_tag_busy(data->hctx)) {
229 rq->rq_flags = RQF_MQ_INFLIGHT;
230 atomic_inc(&data->hctx->nr_active);
231 }
232
233 if (blk_mq_tag_busy(data->hctx)) {
234 rq->rq_flags = RQF_MQ_INFLIGHT;
235 atomic_inc(&data->hctx->nr_active);
236 }
237
233 rq->tag = tag;
234 data->hctx->tags->rqs[tag] = rq;
238 if (data->flags & BLK_MQ_REQ_INTERNAL) {
239 rq->tag = -1;
240 rq->internal_tag = tag;
241 } else {
242 rq->tag = tag;
243 rq->internal_tag = -1;
244 }
245
235 blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
236 return rq;
237 }
238
239 return NULL;
240}
241EXPORT_SYMBOL_GPL(__blk_mq_alloc_request);
242
243struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
244 unsigned int flags)
245{
246 blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
247 return rq;
248 }
249
250 return NULL;
251}
252EXPORT_SYMBOL_GPL(__blk_mq_alloc_request);
253
254struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
255 unsigned int flags)
256{
246 struct blk_mq_ctx *ctx;
247 struct blk_mq_hw_ctx *hctx;
248 struct request *rq;
249 struct blk_mq_alloc_data alloc_data;
257 struct blk_mq_alloc_data alloc_data;
258 struct request *rq;
250 int ret;
251
252 ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
253 if (ret)
254 return ERR_PTR(ret);
255
259 int ret;
260
261 ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
262 if (ret)
263 return ERR_PTR(ret);
264
256 ctx = blk_mq_get_ctx(q);
257 hctx = blk_mq_map_queue(q, ctx->cpu);
258 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
259 rq = __blk_mq_alloc_request(&alloc_data, rw);
260 blk_mq_put_ctx(ctx);
265 rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
261
266
262 if (!rq) {
263 blk_queue_exit(q);
267 blk_mq_put_ctx(alloc_data.ctx);
268 blk_queue_exit(q);
269
270 if (!rq)
264 return ERR_PTR(-EWOULDBLOCK);
271 return ERR_PTR(-EWOULDBLOCK);
265 }
266
267 rq->__data_len = 0;
268 rq->__sector = (sector_t) -1;
269 rq->bio = rq->biotail = NULL;
270 return rq;
271}
272EXPORT_SYMBOL(blk_mq_alloc_request);
273

--- 43 unchanged lines hidden (view full) ---

317 return rq;
318
319out_queue_exit:
320 blk_queue_exit(q);
321 return ERR_PTR(ret);
322}
323EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
324
272
273 rq->__data_len = 0;
274 rq->__sector = (sector_t) -1;
275 rq->bio = rq->biotail = NULL;
276 return rq;
277}
278EXPORT_SYMBOL(blk_mq_alloc_request);
279

--- 43 unchanged lines hidden (view full) ---

323 return rq;
324
325out_queue_exit:
326 blk_queue_exit(q);
327 return ERR_PTR(ret);
328}
329EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
330
325void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
326 struct request *rq)
331void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
332 struct request *rq)
327{
333{
328 const int tag = rq->tag;
334 const int sched_tag = rq->internal_tag;
329 struct request_queue *q = rq->q;
330
331 if (rq->rq_flags & RQF_MQ_INFLIGHT)
332 atomic_dec(&hctx->nr_active);
333
334 wbt_done(q->rq_wb, &rq->issue_stat);
335 rq->rq_flags = 0;
336
337 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
338 clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
335 struct request_queue *q = rq->q;
336
337 if (rq->rq_flags & RQF_MQ_INFLIGHT)
338 atomic_dec(&hctx->nr_active);
339
340 wbt_done(q->rq_wb, &rq->issue_stat);
341 rq->rq_flags = 0;
342
343 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
344 clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
339 blk_mq_put_tag(hctx, hctx->tags, ctx, tag);
345 if (rq->tag != -1)
346 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
347 if (sched_tag != -1)
348 blk_mq_sched_completed_request(hctx, rq);
340 blk_queue_exit(q);
341}
342
349 blk_queue_exit(q);
350}
351
343static void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx,
352static void blk_mq_finish_hctx_request(struct blk_mq_hw_ctx *hctx,
344 struct request *rq)
345{
346 struct blk_mq_ctx *ctx = rq->mq_ctx;
347
348 ctx->rq_completed[rq_is_sync(rq)]++;
353 struct request *rq)
354{
355 struct blk_mq_ctx *ctx = rq->mq_ctx;
356
357 ctx->rq_completed[rq_is_sync(rq)]++;
349 __blk_mq_free_request(hctx, ctx, rq);
358 __blk_mq_finish_request(hctx, ctx, rq);
350}
351
359}
360
361void blk_mq_finish_request(struct request *rq)
362{
363 blk_mq_finish_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
364}
365
352void blk_mq_free_request(struct request *rq)
353{
366void blk_mq_free_request(struct request *rq)
367{
354 blk_mq_free_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
368 blk_mq_sched_put_request(rq);
355}
356EXPORT_SYMBOL_GPL(blk_mq_free_request);
357
358inline void __blk_mq_end_request(struct request *rq, int error)
359{
360 blk_account_io_done(rq);
361
362 if (rq->end_io) {

--- 101 unchanged lines hidden (view full) ---

464 return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
465}
466EXPORT_SYMBOL_GPL(blk_mq_request_started);
467
468void blk_mq_start_request(struct request *rq)
469{
470 struct request_queue *q = rq->q;
471
369}
370EXPORT_SYMBOL_GPL(blk_mq_free_request);
371
372inline void __blk_mq_end_request(struct request *rq, int error)
373{
374 blk_account_io_done(rq);
375
376 if (rq->end_io) {

--- 101 unchanged lines hidden (view full) ---

478 return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
479}
480EXPORT_SYMBOL_GPL(blk_mq_request_started);
481
482void blk_mq_start_request(struct request *rq)
483{
484 struct request_queue *q = rq->q;
485
486 blk_mq_sched_started_request(rq);
487
472 trace_block_rq_issue(q, rq);
473
474 rq->resid_len = blk_rq_bytes(rq);
475 if (unlikely(blk_bidi_rq(rq)))
476 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
477
478 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
479 blk_stat_set_issue_time(&rq->issue_stat);

--- 32 unchanged lines hidden (view full) ---

512EXPORT_SYMBOL(blk_mq_start_request);
513
514static void __blk_mq_requeue_request(struct request *rq)
515{
516 struct request_queue *q = rq->q;
517
518 trace_block_rq_requeue(q, rq);
519 wbt_requeue(q->rq_wb, &rq->issue_stat);
488 trace_block_rq_issue(q, rq);
489
490 rq->resid_len = blk_rq_bytes(rq);
491 if (unlikely(blk_bidi_rq(rq)))
492 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
493
494 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
495 blk_stat_set_issue_time(&rq->issue_stat);

--- 32 unchanged lines hidden (view full) ---

528EXPORT_SYMBOL(blk_mq_start_request);
529
530static void __blk_mq_requeue_request(struct request *rq)
531{
532 struct request_queue *q = rq->q;
533
534 trace_block_rq_requeue(q, rq);
535 wbt_requeue(q->rq_wb, &rq->issue_stat);
536 blk_mq_sched_requeue_request(rq);
520
521 if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
522 if (q->dma_drain_size && blk_rq_bytes(rq))
523 rq->nr_phys_segments--;
524 }
525}
526
527void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)

--- 18 unchanged lines hidden (view full) ---

546 spin_unlock_irqrestore(&q->requeue_lock, flags);
547
548 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
549 if (!(rq->rq_flags & RQF_SOFTBARRIER))
550 continue;
551
552 rq->rq_flags &= ~RQF_SOFTBARRIER;
553 list_del_init(&rq->queuelist);
537
538 if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
539 if (q->dma_drain_size && blk_rq_bytes(rq))
540 rq->nr_phys_segments--;
541 }
542}
543
544void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)

--- 18 unchanged lines hidden (view full) ---

563 spin_unlock_irqrestore(&q->requeue_lock, flags);
564
565 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
566 if (!(rq->rq_flags & RQF_SOFTBARRIER))
567 continue;
568
569 rq->rq_flags &= ~RQF_SOFTBARRIER;
570 list_del_init(&rq->queuelist);
554 blk_mq_insert_request(rq, true, false, false);
571 blk_mq_sched_insert_request(rq, true, false, false);
555 }
556
557 while (!list_empty(&rq_list)) {
558 rq = list_entry(rq_list.next, struct request, queuelist);
559 list_del_init(&rq->queuelist);
572 }
573
574 while (!list_empty(&rq_list)) {
575 rq = list_entry(rq_list.next, struct request, queuelist);
576 list_del_init(&rq->queuelist);
560 blk_mq_insert_request(rq, false, false, false);
577 blk_mq_sched_insert_request(rq, false, false, false);
561 }
562
563 blk_mq_run_hw_queues(q, false);
564}
565
566void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
567 bool kick_requeue_list)
568{

--- 191 unchanged lines hidden (view full) ---

760
761 if (!checked--)
762 break;
763
764 if (!blk_rq_merge_ok(rq, bio))
765 continue;
766
767 el_ret = blk_try_merge(rq, bio);
578 }
579
580 blk_mq_run_hw_queues(q, false);
581}
582
583void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
584 bool kick_requeue_list)
585{

--- 191 unchanged lines hidden (view full) ---

777
778 if (!checked--)
779 break;
780
781 if (!blk_rq_merge_ok(rq, bio))
782 continue;
783
784 el_ret = blk_try_merge(rq, bio);
785 if (el_ret == ELEVATOR_NO_MERGE)
786 continue;
787
788 if (!blk_mq_sched_allow_merge(q, rq, bio))
789 break;
790
768 if (el_ret == ELEVATOR_BACK_MERGE) {
769 if (bio_attempt_back_merge(q, rq, bio)) {
770 ctx->rq_merged++;
771 return true;
772 }
773 break;
774 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
775 if (bio_attempt_front_merge(q, rq, bio)) {

--- 43 unchanged lines hidden (view full) ---

819static inline unsigned int queued_to_index(unsigned int queued)
820{
821 if (!queued)
822 return 0;
823
824 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
825}
826
791 if (el_ret == ELEVATOR_BACK_MERGE) {
792 if (bio_attempt_back_merge(q, rq, bio)) {
793 ctx->rq_merged++;
794 return true;
795 }
796 break;
797 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
798 if (bio_attempt_front_merge(q, rq, bio)) {

--- 43 unchanged lines hidden (view full) ---

842static inline unsigned int queued_to_index(unsigned int queued)
843{
844 if (!queued)
845 return 0;
846
847 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
848}
849
850static bool blk_mq_get_driver_tag(struct request *rq,
851 struct blk_mq_hw_ctx **hctx, bool wait)
852{
853 struct blk_mq_alloc_data data = {
854 .q = rq->q,
855 .ctx = rq->mq_ctx,
856 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
857 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
858 };
859
860 if (blk_mq_hctx_stopped(data.hctx))
861 return false;
862
863 if (rq->tag != -1) {
864done:
865 if (hctx)
866 *hctx = data.hctx;
867 return true;
868 }
869
870 rq->tag = blk_mq_get_tag(&data);
871 if (rq->tag >= 0) {
872 data.hctx->tags->rqs[rq->tag] = rq;
873 goto done;
874 }
875
876 return false;
877}
878
879/*
880 * If we fail getting a driver tag because all the driver tags are already
881 * assigned and on the dispatch list, BUT the first entry does not have a
882 * tag, then we could deadlock. For that case, move entries with assigned
883 * driver tags to the front, leaving the set of tagged requests in the
884 * same order, and the untagged set in the same order.
885 */
886static bool reorder_tags_to_front(struct list_head *list)
887{
888 struct request *rq, *tmp, *first = NULL;
889
890 list_for_each_entry_safe_reverse(rq, tmp, list, queuelist) {
891 if (rq == first)
892 break;
893 if (rq->tag != -1) {
894 list_move(&rq->queuelist, list);
895 if (!first)
896 first = rq;
897 }
898 }
899
900 return first != NULL;
901}
902
827bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
828{
829 struct request_queue *q = hctx->queue;
830 struct request *rq;
831 LIST_HEAD(driver_list);
832 struct list_head *dptr;
833 int queued, ret = BLK_MQ_RQ_QUEUE_OK;
834

--- 6 unchanged lines hidden (view full) ---

841 /*
842 * Now process all the entries, sending them to the driver.
843 */
844 queued = 0;
845 while (!list_empty(list)) {
846 struct blk_mq_queue_data bd;
847
848 rq = list_first_entry(list, struct request, queuelist);
903bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
904{
905 struct request_queue *q = hctx->queue;
906 struct request *rq;
907 LIST_HEAD(driver_list);
908 struct list_head *dptr;
909 int queued, ret = BLK_MQ_RQ_QUEUE_OK;
910

--- 6 unchanged lines hidden (view full) ---

917 /*
918 * Now process all the entries, sending them to the driver.
919 */
920 queued = 0;
921 while (!list_empty(list)) {
922 struct blk_mq_queue_data bd;
923
924 rq = list_first_entry(list, struct request, queuelist);
925 if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
926 if (!queued && reorder_tags_to_front(list))
927 continue;
928 blk_mq_sched_mark_restart(hctx);
929 break;
930 }
849 list_del_init(&rq->queuelist);
850
851 bd.rq = rq;
852 bd.list = dptr;
853 bd.last = list_empty(list);
854
855 ret = q->mq_ops->queue_rq(hctx, &bd);
856 switch (ret) {

--- 37 unchanged lines hidden (view full) ---

894 /*
895 * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
896 * it's possible the queue is stopped and restarted again
897 * before this. Queue restart will dispatch requests. And since
898 * requests in rq_list aren't added into hctx->dispatch yet,
899 * the requests in rq_list might get lost.
900 *
901 * blk_mq_run_hw_queue() already checks the STOPPED bit
931 list_del_init(&rq->queuelist);
932
933 bd.rq = rq;
934 bd.list = dptr;
935 bd.last = list_empty(list);
936
937 ret = q->mq_ops->queue_rq(hctx, &bd);
938 switch (ret) {

--- 37 unchanged lines hidden (view full) ---

976 /*
977 * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
978 * it's possible the queue is stopped and restarted again
979 * before this. Queue restart will dispatch requests. And since
980 * requests in rq_list aren't added into hctx->dispatch yet,
981 * the requests in rq_list might get lost.
982 *
983 * blk_mq_run_hw_queue() already checks the STOPPED bit
902 **/
903 blk_mq_run_hw_queue(hctx, true);
984 *
985 * If RESTART is set, then let completion restart the queue
986 * instead of potentially looping here.
987 */
988 if (!blk_mq_sched_needs_restart(hctx))
989 blk_mq_run_hw_queue(hctx, true);
904 }
905
906 return ret != BLK_MQ_RQ_QUEUE_BUSY;
907}
908
990 }
991
992 return ret != BLK_MQ_RQ_QUEUE_BUSY;
993}
994
909/*
910 * Run this hardware queue, pulling any software queues mapped to it in.
911 * Note that this function currently has various problems around ordering
912 * of IO. In particular, we'd like FIFO behaviour on handling existing
913 * items on the hctx->dispatch list. Ignore that for now.
914 */
915static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx)
916{
917 LIST_HEAD(rq_list);
918 LIST_HEAD(driver_list);
919
920 if (unlikely(blk_mq_hctx_stopped(hctx)))
921 return;
922
923 hctx->run++;
924
925 /*
926 * Touch any software queue that has pending entries.
927 */
928 blk_mq_flush_busy_ctxs(hctx, &rq_list);
929
930 /*
931 * If we have previous entries on our dispatch list, grab them
932 * and stuff them at the front for more fair dispatch.
933 */
934 if (!list_empty_careful(&hctx->dispatch)) {
935 spin_lock(&hctx->lock);
936 if (!list_empty(&hctx->dispatch))
937 list_splice_init(&hctx->dispatch, &rq_list);
938 spin_unlock(&hctx->lock);
939 }
940
941 blk_mq_dispatch_rq_list(hctx, &rq_list);
942}
943
944static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
945{
946 int srcu_idx;
947
948 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
949 cpu_online(hctx->next_cpu));
950
951 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
952 rcu_read_lock();
995static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
996{
997 int srcu_idx;
998
999 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1000 cpu_online(hctx->next_cpu));
1001
1002 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1003 rcu_read_lock();
953 blk_mq_process_rq_list(hctx);
1004 blk_mq_sched_dispatch_requests(hctx);
954 rcu_read_unlock();
955 } else {
956 srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
1005 rcu_read_unlock();
1006 } else {
1007 srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
957 blk_mq_process_rq_list(hctx);
1008 blk_mq_sched_dispatch_requests(hctx);
958 srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
959 }
960}
961
962/*
963 * It'd be great if the workqueue API had a way to pass
964 * in a mask and had some smarts for more clever placement.
965 * For now we just round-robin here, switching for every

--- 39 unchanged lines hidden (view full) ---

1005}
1006
1007void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1008{
1009 struct blk_mq_hw_ctx *hctx;
1010 int i;
1011
1012 queue_for_each_hw_ctx(q, hctx, i) {
1009 srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
1010 }
1011}
1012
1013/*
1014 * It'd be great if the workqueue API had a way to pass
1015 * in a mask and had some smarts for more clever placement.
1016 * For now we just round-robin here, switching for every

--- 39 unchanged lines hidden (view full) ---

1056}
1057
1058void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1059{
1060 struct blk_mq_hw_ctx *hctx;
1061 int i;
1062
1063 queue_for_each_hw_ctx(q, hctx, i) {
1013 if ((!blk_mq_hctx_has_pending(hctx) &&
1014 list_empty_careful(&hctx->dispatch)) ||
1064 if (!blk_mq_hctx_has_pending(hctx) ||
1015 blk_mq_hctx_stopped(hctx))
1016 continue;
1017
1018 blk_mq_run_hw_queue(hctx, async);
1019 }
1020}
1021EXPORT_SYMBOL(blk_mq_run_hw_queues);
1022

--- 120 unchanged lines hidden (view full) ---

1143 bool at_head)
1144{
1145 struct blk_mq_ctx *ctx = rq->mq_ctx;
1146
1147 __blk_mq_insert_req_list(hctx, rq, at_head);
1148 blk_mq_hctx_mark_pending(hctx, ctx);
1149}
1150
1065 blk_mq_hctx_stopped(hctx))
1066 continue;
1067
1068 blk_mq_run_hw_queue(hctx, async);
1069 }
1070}
1071EXPORT_SYMBOL(blk_mq_run_hw_queues);
1072

--- 120 unchanged lines hidden (view full) ---

1193 bool at_head)
1194{
1195 struct blk_mq_ctx *ctx = rq->mq_ctx;
1196
1197 __blk_mq_insert_req_list(hctx, rq, at_head);
1198 blk_mq_hctx_mark_pending(hctx, ctx);
1199}
1200
1151void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
1152 bool async)
1153{
1154 struct blk_mq_ctx *ctx = rq->mq_ctx;
1155 struct request_queue *q = rq->q;
1156 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
1201void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1202 struct list_head *list)
1157
1203
1158 spin_lock(&ctx->lock);
1159 __blk_mq_insert_request(hctx, rq, at_head);
1160 spin_unlock(&ctx->lock);
1161
1162 if (run_queue)
1163 blk_mq_run_hw_queue(hctx, async);
1164}
1165
1166static void blk_mq_insert_requests(struct request_queue *q,
1167 struct blk_mq_ctx *ctx,
1168 struct list_head *list,
1169 int depth,
1170 bool from_schedule)
1171
1172{
1204{
1173 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
1174
1175 trace_block_unplug(q, depth, !from_schedule);
1176
1177 /*
1178 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1179 * offline now
1180 */
1181 spin_lock(&ctx->lock);
1182 while (!list_empty(list)) {
1183 struct request *rq;
1184
1185 rq = list_first_entry(list, struct request, queuelist);
1186 BUG_ON(rq->mq_ctx != ctx);
1187 list_del_init(&rq->queuelist);
1188 __blk_mq_insert_req_list(hctx, rq, false);
1189 }
1190 blk_mq_hctx_mark_pending(hctx, ctx);
1191 spin_unlock(&ctx->lock);
1205 /*
1206 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1207 * offline now
1208 */
1209 spin_lock(&ctx->lock);
1210 while (!list_empty(list)) {
1211 struct request *rq;
1212
1213 rq = list_first_entry(list, struct request, queuelist);
1214 BUG_ON(rq->mq_ctx != ctx);
1215 list_del_init(&rq->queuelist);
1216 __blk_mq_insert_req_list(hctx, rq, false);
1217 }
1218 blk_mq_hctx_mark_pending(hctx, ctx);
1219 spin_unlock(&ctx->lock);
1192
1193 blk_mq_run_hw_queue(hctx, from_schedule);
1194}
1195
1196static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1197{
1198 struct request *rqa = container_of(a, struct request, queuelist);
1199 struct request *rqb = container_of(b, struct request, queuelist);
1200
1201 return !(rqa->mq_ctx < rqb->mq_ctx ||

--- 19 unchanged lines hidden (view full) ---

1221 depth = 0;
1222
1223 while (!list_empty(&list)) {
1224 rq = list_entry_rq(list.next);
1225 list_del_init(&rq->queuelist);
1226 BUG_ON(!rq->q);
1227 if (rq->mq_ctx != this_ctx) {
1228 if (this_ctx) {
1220}
1221
1222static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1223{
1224 struct request *rqa = container_of(a, struct request, queuelist);
1225 struct request *rqb = container_of(b, struct request, queuelist);
1226
1227 return !(rqa->mq_ctx < rqb->mq_ctx ||

--- 19 unchanged lines hidden (view full) ---

1247 depth = 0;
1248
1249 while (!list_empty(&list)) {
1250 rq = list_entry_rq(list.next);
1251 list_del_init(&rq->queuelist);
1252 BUG_ON(!rq->q);
1253 if (rq->mq_ctx != this_ctx) {
1254 if (this_ctx) {
1229 blk_mq_insert_requests(this_q, this_ctx,
1230 &ctx_list, depth,
1231 from_schedule);
1255 trace_block_unplug(this_q, depth, from_schedule);
1256 blk_mq_sched_insert_requests(this_q, this_ctx,
1257 &ctx_list,
1258 from_schedule);
1232 }
1233
1234 this_ctx = rq->mq_ctx;
1235 this_q = rq->q;
1236 depth = 0;
1237 }
1238
1239 depth++;
1240 list_add_tail(&rq->queuelist, &ctx_list);
1241 }
1242
1243 /*
1244 * If 'this_ctx' is set, we know we have entries to complete
1245 * on 'ctx_list'. Do those.
1246 */
1247 if (this_ctx) {
1259 }
1260
1261 this_ctx = rq->mq_ctx;
1262 this_q = rq->q;
1263 depth = 0;
1264 }
1265
1266 depth++;
1267 list_add_tail(&rq->queuelist, &ctx_list);
1268 }
1269
1270 /*
1271 * If 'this_ctx' is set, we know we have entries to complete
1272 * on 'ctx_list'. Do those.
1273 */
1274 if (this_ctx) {
1248 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1249 from_schedule);
1275 trace_block_unplug(this_q, depth, from_schedule);
1276 blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
1277 from_schedule);
1250 }
1251}
1252
1253static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1254{
1255 init_request_from_bio(rq, bio);
1256
1257 blk_account_io_start(rq, true);

--- 21 unchanged lines hidden (view full) ---

1279
1280 spin_lock(&ctx->lock);
1281 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1282 blk_mq_bio_to_request(rq, bio);
1283 goto insert_rq;
1284 }
1285
1286 spin_unlock(&ctx->lock);
1278 }
1279}
1280
1281static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1282{
1283 init_request_from_bio(rq, bio);
1284
1285 blk_account_io_start(rq, true);

--- 21 unchanged lines hidden (view full) ---

1307
1308 spin_lock(&ctx->lock);
1309 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1310 blk_mq_bio_to_request(rq, bio);
1311 goto insert_rq;
1312 }
1313
1314 spin_unlock(&ctx->lock);
1287 __blk_mq_free_request(hctx, ctx, rq);
1315 __blk_mq_finish_request(hctx, ctx, rq);
1288 return true;
1289 }
1290}
1291
1316 return true;
1317 }
1318}
1319
1292static struct request *blk_mq_map_request(struct request_queue *q,
1293 struct bio *bio,
1294 struct blk_mq_alloc_data *data)
1320static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1295{
1321{
1296 struct blk_mq_hw_ctx *hctx;
1297 struct blk_mq_ctx *ctx;
1298 struct request *rq;
1322 if (rq->tag != -1)
1323 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
1299
1324
1300 blk_queue_enter_live(q);
1301 ctx = blk_mq_get_ctx(q);
1302 hctx = blk_mq_map_queue(q, ctx->cpu);
1303
1304 trace_block_getrq(q, bio, bio->bi_opf);
1305 blk_mq_set_alloc_data(data, q, 0, ctx, hctx);
1306 rq = __blk_mq_alloc_request(data, bio->bi_opf);
1307
1308 data->hctx->queued++;
1309 return rq;
1325 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
1310}
1311
1326}
1327
1312static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1313{
1314 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
1315}
1316
1317static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
1318{
1328static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
1329{
1319 int ret;
1320 struct request_queue *q = rq->q;
1330 struct request_queue *q = rq->q;
1321 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
1322 struct blk_mq_queue_data bd = {
1323 .rq = rq,
1324 .list = NULL,
1325 .last = 1
1326 };
1331 struct blk_mq_queue_data bd = {
1332 .rq = rq,
1333 .list = NULL,
1334 .last = 1
1335 };
1327 blk_qc_t new_cookie = request_to_qc_t(hctx, rq);
1336 struct blk_mq_hw_ctx *hctx;
1337 blk_qc_t new_cookie;
1338 int ret;
1328
1339
1329 if (blk_mq_hctx_stopped(hctx))
1340 if (q->elevator)
1330 goto insert;
1331
1341 goto insert;
1342
1343 if (!blk_mq_get_driver_tag(rq, &hctx, false))
1344 goto insert;
1345
1346 new_cookie = request_to_qc_t(hctx, rq);
1347
1332 /*
1333 * For OK queue, we are done. For error, kill it. Any other
1334 * error (busy), just add it to our list as we previously
1335 * would have done
1336 */
1337 ret = q->mq_ops->queue_rq(hctx, &bd);
1338 if (ret == BLK_MQ_RQ_QUEUE_OK) {
1339 *cookie = new_cookie;

--- 5 unchanged lines hidden (view full) ---

1345 if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1346 *cookie = BLK_QC_T_NONE;
1347 rq->errors = -EIO;
1348 blk_mq_end_request(rq, rq->errors);
1349 return;
1350 }
1351
1352insert:
1348 /*
1349 * For OK queue, we are done. For error, kill it. Any other
1350 * error (busy), just add it to our list as we previously
1351 * would have done
1352 */
1353 ret = q->mq_ops->queue_rq(hctx, &bd);
1354 if (ret == BLK_MQ_RQ_QUEUE_OK) {
1355 *cookie = new_cookie;

--- 5 unchanged lines hidden (view full) ---

1361 if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1362 *cookie = BLK_QC_T_NONE;
1363 rq->errors = -EIO;
1364 blk_mq_end_request(rq, rq->errors);
1365 return;
1366 }
1367
1368insert:
1353 blk_mq_insert_request(rq, false, true, true);
1369 blk_mq_sched_insert_request(rq, false, true, true);
1354}
1355
1356/*
1357 * Multiple hardware queue variant. This will not use per-process plugs,
1358 * but will attempt to bypass the hctx queueing if we can go straight to
1359 * hardware for SYNC IO.
1360 */
1361static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)

--- 16 unchanged lines hidden (view full) ---

1378 }
1379
1380 blk_queue_split(q, &bio, q->bio_split);
1381
1382 if (!is_flush_fua && !blk_queue_nomerges(q) &&
1383 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1384 return BLK_QC_T_NONE;
1385
1370}
1371
1372/*
1373 * Multiple hardware queue variant. This will not use per-process plugs,
1374 * but will attempt to bypass the hctx queueing if we can go straight to
1375 * hardware for SYNC IO.
1376 */
1377static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)

--- 16 unchanged lines hidden (view full) ---

1394 }
1395
1396 blk_queue_split(q, &bio, q->bio_split);
1397
1398 if (!is_flush_fua && !blk_queue_nomerges(q) &&
1399 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1400 return BLK_QC_T_NONE;
1401
1402 if (blk_mq_sched_bio_merge(q, bio))
1403 return BLK_QC_T_NONE;
1404
1386 wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1387
1405 wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1406
1388 rq = blk_mq_map_request(q, bio, &data);
1407 trace_block_getrq(q, bio, bio->bi_opf);
1408
1409 rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
1389 if (unlikely(!rq)) {
1390 __wbt_done(q->rq_wb, wb_acct);
1391 return BLK_QC_T_NONE;
1392 }
1393
1394 wbt_track(&rq->issue_stat, wb_acct);
1395
1396 cookie = request_to_qc_t(data.hctx, rq);
1397
1398 if (unlikely(is_flush_fua)) {
1399 blk_mq_bio_to_request(rq, bio);
1410 if (unlikely(!rq)) {
1411 __wbt_done(q->rq_wb, wb_acct);
1412 return BLK_QC_T_NONE;
1413 }
1414
1415 wbt_track(&rq->issue_stat, wb_acct);
1416
1417 cookie = request_to_qc_t(data.hctx, rq);
1418
1419 if (unlikely(is_flush_fua)) {
1420 blk_mq_bio_to_request(rq, bio);
1421 blk_mq_get_driver_tag(rq, NULL, true);
1400 blk_insert_flush(rq);
1401 goto run_queue;
1402 }
1403
1404 plug = current->plug;
1405 /*
1406 * If the driver supports defer issued based on 'last', then
1407 * queue it up like normal since we can potentially save some

--- 34 unchanged lines hidden (view full) ---

1442 } else {
1443 srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
1444 blk_mq_try_issue_directly(old_rq, &cookie);
1445 srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
1446 }
1447 goto done;
1448 }
1449
1422 blk_insert_flush(rq);
1423 goto run_queue;
1424 }
1425
1426 plug = current->plug;
1427 /*
1428 * If the driver supports defer issued based on 'last', then
1429 * queue it up like normal since we can potentially save some

--- 34 unchanged lines hidden (view full) ---

1464 } else {
1465 srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
1466 blk_mq_try_issue_directly(old_rq, &cookie);
1467 srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
1468 }
1469 goto done;
1470 }
1471
1472 if (q->elevator) {
1473 blk_mq_put_ctx(data.ctx);
1474 blk_mq_bio_to_request(rq, bio);
1475 blk_mq_sched_insert_request(rq, false, true, true);
1476 goto done;
1477 }
1450 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1451 /*
1452 * For a SYNC request, send it to the hardware immediately. For
1453 * an ASYNC request, just ensure that we run it later on. The
1454 * latter allows for merging opportunities and more efficient
1455 * dispatching.
1456 */
1457run_queue:

--- 29 unchanged lines hidden (view full) ---

1487 blk_queue_split(q, &bio, q->bio_split);
1488
1489 if (!is_flush_fua && !blk_queue_nomerges(q)) {
1490 if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
1491 return BLK_QC_T_NONE;
1492 } else
1493 request_count = blk_plug_queued_count(q);
1494
1478 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1479 /*
1480 * For a SYNC request, send it to the hardware immediately. For
1481 * an ASYNC request, just ensure that we run it later on. The
1482 * latter allows for merging opportunities and more efficient
1483 * dispatching.
1484 */
1485run_queue:

--- 29 unchanged lines hidden (view full) ---

1515 blk_queue_split(q, &bio, q->bio_split);
1516
1517 if (!is_flush_fua && !blk_queue_nomerges(q)) {
1518 if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
1519 return BLK_QC_T_NONE;
1520 } else
1521 request_count = blk_plug_queued_count(q);
1522
1523 if (blk_mq_sched_bio_merge(q, bio))
1524 return BLK_QC_T_NONE;
1525
1495 wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1496
1526 wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1527
1497 rq = blk_mq_map_request(q, bio, &data);
1528 trace_block_getrq(q, bio, bio->bi_opf);
1529
1530 rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
1498 if (unlikely(!rq)) {
1499 __wbt_done(q->rq_wb, wb_acct);
1500 return BLK_QC_T_NONE;
1501 }
1502
1503 wbt_track(&rq->issue_stat, wb_acct);
1504
1505 cookie = request_to_qc_t(data.hctx, rq);
1506
1507 if (unlikely(is_flush_fua)) {
1508 blk_mq_bio_to_request(rq, bio);
1531 if (unlikely(!rq)) {
1532 __wbt_done(q->rq_wb, wb_acct);
1533 return BLK_QC_T_NONE;
1534 }
1535
1536 wbt_track(&rq->issue_stat, wb_acct);
1537
1538 cookie = request_to_qc_t(data.hctx, rq);
1539
1540 if (unlikely(is_flush_fua)) {
1541 blk_mq_bio_to_request(rq, bio);
1542 blk_mq_get_driver_tag(rq, NULL, true);
1509 blk_insert_flush(rq);
1510 goto run_queue;
1511 }
1512
1513 /*
1514 * A task plug currently exists. Since this is completely lockless,
1515 * utilize that to temporarily store requests until the task is
1516 * either done or scheduled away.

--- 22 unchanged lines hidden (view full) ---

1539 blk_flush_plug_list(plug, false);
1540 trace_block_plug(q);
1541 }
1542
1543 list_add_tail(&rq->queuelist, &plug->mq_list);
1544 return cookie;
1545 }
1546
1543 blk_insert_flush(rq);
1544 goto run_queue;
1545 }
1546
1547 /*
1548 * A task plug currently exists. Since this is completely lockless,
1549 * utilize that to temporarily store requests until the task is
1550 * either done or scheduled away.

--- 22 unchanged lines hidden (view full) ---

1573 blk_flush_plug_list(plug, false);
1574 trace_block_plug(q);
1575 }
1576
1577 list_add_tail(&rq->queuelist, &plug->mq_list);
1578 return cookie;
1579 }
1580
1581 if (q->elevator) {
1582 blk_mq_put_ctx(data.ctx);
1583 blk_mq_bio_to_request(rq, bio);
1584 blk_mq_sched_insert_request(rq, false, true, true);
1585 goto done;
1586 }
1547 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1548 /*
1549 * For a SYNC request, send it to the hardware immediately. For
1550 * an ASYNC request, just ensure that we run it later on. The
1551 * latter allows for merging opportunities and more efficient
1552 * dispatching.
1553 */
1554run_queue:
1555 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1556 }
1557
1558 blk_mq_put_ctx(data.ctx);
1587 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1588 /*
1589 * For a SYNC request, send it to the hardware immediately. For
1590 * an ASYNC request, just ensure that we run it later on. The
1591 * latter allows for merging opportunities and more efficient
1592 * dispatching.
1593 */
1594run_queue:
1595 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1596 }
1597
1598 blk_mq_put_ctx(data.ctx);
1599done:
1559 return cookie;
1560}
1561
1562void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1563 unsigned int hctx_idx)
1564{
1565 struct page *page;
1566

--- 353 unchanged lines hidden (view full) ---

1920 blk_mq_free_rq_map(set->tags[hctx_idx]);
1921 set->tags[hctx_idx] = NULL;
1922 return false;
1923}
1924
1925static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
1926 unsigned int hctx_idx)
1927{
1600 return cookie;
1601}
1602
1603void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1604 unsigned int hctx_idx)
1605{
1606 struct page *page;
1607

--- 353 unchanged lines hidden (view full) ---

1961 blk_mq_free_rq_map(set->tags[hctx_idx]);
1962 set->tags[hctx_idx] = NULL;
1963 return false;
1964}
1965
1966static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
1967 unsigned int hctx_idx)
1968{
1928 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
1929 blk_mq_free_rq_map(set->tags[hctx_idx]);
1930 set->tags[hctx_idx] = NULL;
1969 if (set->tags[hctx_idx]) {
1970 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
1971 blk_mq_free_rq_map(set->tags[hctx_idx]);
1972 set->tags[hctx_idx] = NULL;
1973 }
1931}
1932
1933static void blk_mq_map_swqueue(struct request_queue *q,
1934 const struct cpumask *online_mask)
1935{
1936 unsigned int i, hctx_idx;
1937 struct blk_mq_hw_ctx *hctx;
1938 struct blk_mq_ctx *ctx;

--- 140 unchanged lines hidden (view full) ---

2079 * and headache because q->mq_kobj shouldn't have been introduced,
2080 * but we can't group ctx/kctx kobj without it.
2081 */
2082void blk_mq_release(struct request_queue *q)
2083{
2084 struct blk_mq_hw_ctx *hctx;
2085 unsigned int i;
2086
1974}
1975
1976static void blk_mq_map_swqueue(struct request_queue *q,
1977 const struct cpumask *online_mask)
1978{
1979 unsigned int i, hctx_idx;
1980 struct blk_mq_hw_ctx *hctx;
1981 struct blk_mq_ctx *ctx;

--- 140 unchanged lines hidden (view full) ---

2122 * and headache because q->mq_kobj shouldn't have been introduced,
2123 * but we can't group ctx/kctx kobj without it.
2124 */
2125void blk_mq_release(struct request_queue *q)
2126{
2127 struct blk_mq_hw_ctx *hctx;
2128 unsigned int i;
2129
2130 blk_mq_sched_teardown(q);
2131
2087 /* hctx kobj stays in hctx */
2088 queue_for_each_hw_ctx(q, hctx, i) {
2089 if (!hctx)
2090 continue;
2091 kfree(hctx->ctxs);
2092 kfree(hctx);
2093 }
2094

--- 404 unchanged lines hidden (view full) ---

2499EXPORT_SYMBOL(blk_mq_free_tag_set);
2500
2501int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2502{
2503 struct blk_mq_tag_set *set = q->tag_set;
2504 struct blk_mq_hw_ctx *hctx;
2505 int i, ret;
2506
2132 /* hctx kobj stays in hctx */
2133 queue_for_each_hw_ctx(q, hctx, i) {
2134 if (!hctx)
2135 continue;
2136 kfree(hctx->ctxs);
2137 kfree(hctx);
2138 }
2139

--- 404 unchanged lines hidden (view full) ---

2544EXPORT_SYMBOL(blk_mq_free_tag_set);
2545
2546int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2547{
2548 struct blk_mq_tag_set *set = q->tag_set;
2549 struct blk_mq_hw_ctx *hctx;
2550 int i, ret;
2551
2507 if (!set || nr > set->queue_depth)
2552 if (!set)
2508 return -EINVAL;
2509
2510 ret = 0;
2511 queue_for_each_hw_ctx(q, hctx, i) {
2512 if (!hctx->tags)
2513 continue;
2553 return -EINVAL;
2554
2555 ret = 0;
2556 queue_for_each_hw_ctx(q, hctx, i) {
2557 if (!hctx->tags)
2558 continue;
2514 ret = blk_mq_tag_update_depth(hctx->tags, nr);
2559 /*
2560 * If we're using an MQ scheduler, just update the scheduler
2561 * queue depth. This is similar to what the old code would do.
2562 */
2563 if (!hctx->sched_tags)
2564 ret = blk_mq_tag_update_depth(hctx->tags,
2565 min(nr, set->queue_depth));
2566 else
2567 ret = blk_mq_tag_update_depth(hctx->sched_tags, nr);
2515 if (ret)
2516 break;
2517 }
2518
2519 if (!ret)
2520 q->nr_requests = nr;
2521
2522 return ret;

--- 176 unchanged lines hidden (view full) ---

2699 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
2700 return false;
2701
2702 plug = current->plug;
2703 if (plug)
2704 blk_flush_plug_list(plug, false);
2705
2706 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
2568 if (ret)
2569 break;
2570 }
2571
2572 if (!ret)
2573 q->nr_requests = nr;
2574
2575 return ret;

--- 176 unchanged lines hidden (view full) ---

2752 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
2753 return false;
2754
2755 plug = current->plug;
2756 if (plug)
2757 blk_flush_plug_list(plug, false);
2758
2759 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
2707 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
2760 if (!blk_qc_t_is_internal(cookie))
2761 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
2762 else
2763 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
2708
2709 return __blk_mq_poll(hctx, rq);
2710}
2711EXPORT_SYMBOL_GPL(blk_mq_poll);
2712
2713void blk_mq_disable_hotplug(void)
2714{
2715 mutex_lock(&all_q_mutex);

--- 18 unchanged lines hidden ---
2764
2765 return __blk_mq_poll(hctx, rq);
2766}
2767EXPORT_SYMBOL_GPL(blk_mq_poll);
2768
2769void blk_mq_disable_hotplug(void)
2770{
2771 mutex_lock(&all_q_mutex);

--- 18 unchanged lines hidden ---