xref: /linux/block/blk-mq-cpumap.c (revision ba6ec09911b805778a2fed6d626bfe77b011a717)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * CPU <-> hardware queue mapping helpers
4  *
5  * Copyright (C) 2013-2014 Jens Axboe
6  */
7 #include <linux/kernel.h>
8 #include <linux/threads.h>
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 #include <linux/smp.h>
12 #include <linux/cpu.h>
13 #include <linux/group_cpus.h>
14 #include <linux/device/bus.h>
15 
16 #include "blk.h"
17 #include "blk-mq.h"
18 
blk_mq_map_queues(struct blk_mq_queue_map * qmap)19 void blk_mq_map_queues(struct blk_mq_queue_map *qmap)
20 {
21 	const struct cpumask *masks;
22 	unsigned int queue, cpu;
23 
24 	masks = group_cpus_evenly(qmap->nr_queues);
25 	if (!masks) {
26 		for_each_possible_cpu(cpu)
27 			qmap->mq_map[cpu] = qmap->queue_offset;
28 		return;
29 	}
30 
31 	for (queue = 0; queue < qmap->nr_queues; queue++) {
32 		for_each_cpu(cpu, &masks[queue])
33 			qmap->mq_map[cpu] = qmap->queue_offset + queue;
34 	}
35 	kfree(masks);
36 }
37 EXPORT_SYMBOL_GPL(blk_mq_map_queues);
38 
39 /**
40  * blk_mq_hw_queue_to_node - Look up the memory node for a hardware queue index
41  * @qmap: CPU to hardware queue map.
42  * @index: hardware queue index.
43  *
44  * We have no quick way of doing reverse lookups. This is only used at
45  * queue init time, so runtime isn't important.
46  */
blk_mq_hw_queue_to_node(struct blk_mq_queue_map * qmap,unsigned int index)47 int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
48 {
49 	int i;
50 
51 	for_each_possible_cpu(i) {
52 		if (index == qmap->mq_map[i])
53 			return cpu_to_node(i);
54 	}
55 
56 	return NUMA_NO_NODE;
57 }
58 
59 /**
60  * blk_mq_map_hw_queues - Create CPU to hardware queue mapping
61  * @qmap:	CPU to hardware queue map
62  * @dev:	The device to map queues
63  * @offset:	Queue offset to use for the device
64  *
65  * Create a CPU to hardware queue mapping in @qmap. The struct bus_type
66  * irq_get_affinity callback will be used to retrieve the affinity.
67  */
blk_mq_map_hw_queues(struct blk_mq_queue_map * qmap,struct device * dev,unsigned int offset)68 void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap,
69 			  struct device *dev, unsigned int offset)
70 
71 {
72 	const struct cpumask *mask;
73 	unsigned int queue, cpu;
74 
75 	if (!dev->bus->irq_get_affinity)
76 		goto fallback;
77 
78 	for (queue = 0; queue < qmap->nr_queues; queue++) {
79 		mask = dev->bus->irq_get_affinity(dev, queue + offset);
80 		if (!mask)
81 			goto fallback;
82 
83 		for_each_cpu(cpu, mask)
84 			qmap->mq_map[cpu] = qmap->queue_offset + queue;
85 	}
86 
87 	return;
88 
89 fallback:
90 	blk_mq_map_queues(qmap);
91 }
92 EXPORT_SYMBOL_GPL(blk_mq_map_hw_queues);
93