1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * CPU <-> hardware queue mapping helpers
4 *
5 * Copyright (C) 2013-2014 Jens Axboe
6 */
7 #include <linux/kernel.h>
8 #include <linux/threads.h>
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 #include <linux/smp.h>
12 #include <linux/cpu.h>
13 #include <linux/group_cpus.h>
14 #include <linux/device/bus.h>
15 #include <linux/sched/isolation.h>
16
17 #include "blk.h"
18 #include "blk-mq.h"
19
blk_mq_num_queues(const struct cpumask * mask,unsigned int max_queues)20 static unsigned int blk_mq_num_queues(const struct cpumask *mask,
21 unsigned int max_queues)
22 {
23 unsigned int num;
24
25 num = cpumask_weight(mask);
26 return min_not_zero(num, max_queues);
27 }
28
29 /**
30 * blk_mq_num_possible_queues - Calc nr of queues for multiqueue devices
31 * @max_queues: The maximum number of queues the hardware/driver
32 * supports. If max_queues is 0, the argument is
33 * ignored.
34 *
35 * Calculates the number of queues to be used for a multiqueue
36 * device based on the number of possible CPUs.
37 */
blk_mq_num_possible_queues(unsigned int max_queues)38 unsigned int blk_mq_num_possible_queues(unsigned int max_queues)
39 {
40 return blk_mq_num_queues(cpu_possible_mask, max_queues);
41 }
42 EXPORT_SYMBOL_GPL(blk_mq_num_possible_queues);
43
44 /**
45 * blk_mq_num_online_queues - Calc nr of queues for multiqueue devices
46 * @max_queues: The maximum number of queues the hardware/driver
47 * supports. If max_queues is 0, the argument is
48 * ignored.
49 *
50 * Calculates the number of queues to be used for a multiqueue
51 * device based on the number of online CPUs.
52 */
blk_mq_num_online_queues(unsigned int max_queues)53 unsigned int blk_mq_num_online_queues(unsigned int max_queues)
54 {
55 return blk_mq_num_queues(cpu_online_mask, max_queues);
56 }
57 EXPORT_SYMBOL_GPL(blk_mq_num_online_queues);
58
blk_mq_map_queues(struct blk_mq_queue_map * qmap)59 void blk_mq_map_queues(struct blk_mq_queue_map *qmap)
60 {
61 const struct cpumask *masks;
62 unsigned int queue, cpu, nr_masks;
63
64 masks = group_cpus_evenly(qmap->nr_queues, &nr_masks);
65 if (!masks) {
66 for_each_possible_cpu(cpu)
67 qmap->mq_map[cpu] = qmap->queue_offset;
68 return;
69 }
70
71 for (queue = 0; queue < qmap->nr_queues; queue++) {
72 for_each_cpu(cpu, &masks[queue % nr_masks])
73 qmap->mq_map[cpu] = qmap->queue_offset + queue;
74 }
75 kfree(masks);
76 }
77 EXPORT_SYMBOL_GPL(blk_mq_map_queues);
78
79 /**
80 * blk_mq_hw_queue_to_node - Look up the memory node for a hardware queue index
81 * @qmap: CPU to hardware queue map.
82 * @index: hardware queue index.
83 *
84 * We have no quick way of doing reverse lookups. This is only used at
85 * queue init time, so runtime isn't important.
86 */
blk_mq_hw_queue_to_node(struct blk_mq_queue_map * qmap,unsigned int index)87 int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
88 {
89 int i;
90
91 for_each_possible_cpu(i) {
92 if (index == qmap->mq_map[i])
93 return cpu_to_node(i);
94 }
95
96 return NUMA_NO_NODE;
97 }
98
99 /**
100 * blk_mq_map_hw_queues - Create CPU to hardware queue mapping
101 * @qmap: CPU to hardware queue map
102 * @dev: The device to map queues
103 * @offset: Queue offset to use for the device
104 *
105 * Create a CPU to hardware queue mapping in @qmap. The struct bus_type
106 * irq_get_affinity callback will be used to retrieve the affinity.
107 */
blk_mq_map_hw_queues(struct blk_mq_queue_map * qmap,struct device * dev,unsigned int offset)108 void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap,
109 struct device *dev, unsigned int offset)
110
111 {
112 const struct cpumask *mask;
113 unsigned int queue, cpu;
114
115 if (!dev->bus->irq_get_affinity)
116 goto fallback;
117
118 for (queue = 0; queue < qmap->nr_queues; queue++) {
119 mask = dev->bus->irq_get_affinity(dev, queue + offset);
120 if (!mask)
121 goto fallback;
122
123 for_each_cpu(cpu, mask)
124 qmap->mq_map[cpu] = qmap->queue_offset + queue;
125 }
126
127 return;
128
129 fallback:
130 blk_mq_map_queues(qmap);
131 }
132 EXPORT_SYMBOL_GPL(blk_mq_map_hw_queues);
133