blk-mq.c (3506c37dccd1686f2e6607f74c275acb63b47513) blk-mq.c (c3077b5d97a39223a2d4b95a21ccff660836170f)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Block multiqueue core code
4 *
5 * Copyright (C) 2013-2014 Jens Axboe
6 * Copyright (C) 2013-2014 Christoph Hellwig
7 */
8#include <linux/kernel.h>

--- 27 unchanged lines hidden (view full) ---

36#include "blk-mq.h"
37#include "blk-mq-debugfs.h"
38#include "blk-mq-tag.h"
39#include "blk-pm.h"
40#include "blk-stat.h"
41#include "blk-mq-sched.h"
42#include "blk-rq-qos.h"
43
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Block multiqueue core code
4 *
5 * Copyright (C) 2013-2014 Jens Axboe
6 * Copyright (C) 2013-2014 Christoph Hellwig
7 */
8#include <linux/kernel.h>

--- 27 unchanged lines hidden (view full) ---

36#include "blk-mq.h"
37#include "blk-mq-debugfs.h"
38#include "blk-mq-tag.h"
39#include "blk-pm.h"
40#include "blk-stat.h"
41#include "blk-mq-sched.h"
42#include "blk-rq-qos.h"
43
44static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
45
44static void blk_mq_poll_stats_start(struct request_queue *q);
45static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
46
47static int blk_mq_poll_stats_bkt(const struct request *rq)
48{
49 int ddir, sectors, bucket;
50
51 ddir = rq_data_dir(rq);

--- 517 unchanged lines hidden (view full) ---

569void blk_mq_end_request(struct request *rq, blk_status_t error)
570{
571 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
572 BUG();
573 __blk_mq_end_request(rq, error);
574}
575EXPORT_SYMBOL(blk_mq_end_request);
576
46static void blk_mq_poll_stats_start(struct request_queue *q);
47static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
48
49static int blk_mq_poll_stats_bkt(const struct request *rq)
50{
51 int ddir, sectors, bucket;
52
53 ddir = rq_data_dir(rq);

--- 517 unchanged lines hidden (view full) ---

571void blk_mq_end_request(struct request *rq, blk_status_t error)
572{
573 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
574 BUG();
575 __blk_mq_end_request(rq, error);
576}
577EXPORT_SYMBOL(blk_mq_end_request);
578
579/*
580 * Softirq action handler - move entries to local list and loop over them
581 * while passing them to the queue registered handler.
582 */
583static __latent_entropy void blk_done_softirq(struct softirq_action *h)
584{
585 struct list_head *cpu_list, local_list;
586
587 local_irq_disable();
588 cpu_list = this_cpu_ptr(&blk_cpu_done);
589 list_replace_init(cpu_list, &local_list);
590 local_irq_enable();
591
592 while (!list_empty(&local_list)) {
593 struct request *rq;
594
595 rq = list_entry(local_list.next, struct request, ipi_list);
596 list_del_init(&rq->ipi_list);
597 rq->q->mq_ops->complete(rq);
598 }
599}
600
601#ifdef CONFIG_SMP
602static void trigger_softirq(void *data)
603{
604 struct request *rq = data;
605 struct list_head *list;
606
607 list = this_cpu_ptr(&blk_cpu_done);
608 list_add_tail(&rq->ipi_list, list);
609
610 if (list->next == &rq->ipi_list)
611 raise_softirq_irqoff(BLOCK_SOFTIRQ);
612}
613
614/*
615 * Setup and invoke a run of 'trigger_softirq' on the given cpu.
616 */
617static int raise_blk_irq(int cpu, struct request *rq)
618{
619 if (cpu_online(cpu)) {
620 call_single_data_t *data = &rq->csd;
621
622 data->func = trigger_softirq;
623 data->info = rq;
624 data->flags = 0;
625
626 smp_call_function_single_async(cpu, data);
627 return 0;
628 }
629
630 return 1;
631}
632#else /* CONFIG_SMP */
633static int raise_blk_irq(int cpu, struct request *rq)
634{
635 return 1;
636}
637#endif
638
639static int blk_softirq_cpu_dead(unsigned int cpu)
640{
641 /*
642 * If a CPU goes away, splice its entries to the current CPU
643 * and trigger a run of the softirq
644 */
645 local_irq_disable();
646 list_splice_init(&per_cpu(blk_cpu_done, cpu),
647 this_cpu_ptr(&blk_cpu_done));
648 raise_softirq_irqoff(BLOCK_SOFTIRQ);
649 local_irq_enable();
650
651 return 0;
652}
653
654static void __blk_complete_request(struct request *req)
655{
656 struct request_queue *q = req->q;
657 int cpu, ccpu = req->mq_ctx->cpu;
658 unsigned long flags;
659 bool shared = false;
660
661 BUG_ON(!q->mq_ops->complete);
662
663 local_irq_save(flags);
664 cpu = smp_processor_id();
665
666 /*
667 * Select completion CPU
668 */
669 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && ccpu != -1) {
670 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags))
671 shared = cpus_share_cache(cpu, ccpu);
672 } else
673 ccpu = cpu;
674
675 /*
676 * If current CPU and requested CPU share a cache, run the softirq on
677 * the current CPU. One might concern this is just like
678 * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is
679 * running in interrupt handler, and currently I/O controller doesn't
680 * support multiple interrupts, so current CPU is unique actually. This
681 * avoids IPI sending from current CPU to the first CPU of a group.
682 */
683 if (ccpu == cpu || shared) {
684 struct list_head *list;
685do_local:
686 list = this_cpu_ptr(&blk_cpu_done);
687 list_add_tail(&req->ipi_list, list);
688
689 /*
690 * if the list only contains our just added request,
691 * signal a raise of the softirq. If there are already
692 * entries there, someone already raised the irq but it
693 * hasn't run yet.
694 */
695 if (list->next == &req->ipi_list)
696 raise_softirq_irqoff(BLOCK_SOFTIRQ);
697 } else if (raise_blk_irq(ccpu, req))
698 goto do_local;
699
700 local_irq_restore(flags);
701}
702
577static void __blk_mq_complete_request_remote(void *data)
578{
579 struct request *rq = data;
580 struct request_queue *q = rq->q;
581
582 q->mq_ops->complete(rq);
583}
584

--- 3170 unchanged lines hidden (view full) ---

3755unsigned int blk_mq_rq_cpu(struct request *rq)
3756{
3757 return rq->mq_ctx->cpu;
3758}
3759EXPORT_SYMBOL(blk_mq_rq_cpu);
3760
3761static int __init blk_mq_init(void)
3762{
703static void __blk_mq_complete_request_remote(void *data)
704{
705 struct request *rq = data;
706 struct request_queue *q = rq->q;
707
708 q->mq_ops->complete(rq);
709}
710

--- 3170 unchanged lines hidden (view full) ---

3881unsigned int blk_mq_rq_cpu(struct request *rq)
3882{
3883 return rq->mq_ctx->cpu;
3884}
3885EXPORT_SYMBOL(blk_mq_rq_cpu);
3886
3887static int __init blk_mq_init(void)
3888{
3889 int i;
3890
3891 for_each_possible_cpu(i)
3892 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
3893 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
3894
3895 cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
3896 "block/softirq:dead", NULL,
3897 blk_softirq_cpu_dead);
3763 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
3764 blk_mq_hctx_notify_dead);
3765 cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online",
3766 blk_mq_hctx_notify_online,
3767 blk_mq_hctx_notify_offline);
3768 return 0;
3769}
3770subsys_initcall(blk_mq_init);
3898 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
3899 blk_mq_hctx_notify_dead);
3900 cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online",
3901 blk_mq_hctx_notify_online,
3902 blk_mq_hctx_notify_offline);
3903 return 0;
3904}
3905subsys_initcall(blk_mq_init);