xref: /linux/block/blk-mq-cpumap.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * CPU <-> hardware queue mapping helpers
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  */
6 #include <linux/kernel.h>
7 #include <linux/threads.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/smp.h>
11 #include <linux/cpu.h>
12 
13 #include <linux/blk-mq.h>
14 #include "blk.h"
15 #include "blk-mq.h"
16 
17 static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues,
18 			      const int cpu)
19 {
20 	return cpu * nr_queues / nr_cpus;
21 }
22 
23 static int get_first_sibling(unsigned int cpu)
24 {
25 	unsigned int ret;
26 
27 	ret = cpumask_first(topology_sibling_cpumask(cpu));
28 	if (ret < nr_cpu_ids)
29 		return ret;
30 
31 	return cpu;
32 }
33 
34 int blk_mq_map_queues(struct blk_mq_tag_set *set)
35 {
36 	unsigned int *map = set->mq_map;
37 	unsigned int nr_queues = set->nr_hw_queues;
38 	const struct cpumask *online_mask = cpu_online_mask;
39 	unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
40 	cpumask_var_t cpus;
41 
42 	if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
43 		return -ENOMEM;
44 
45 	cpumask_clear(cpus);
46 	nr_cpus = nr_uniq_cpus = 0;
47 	for_each_cpu(i, online_mask) {
48 		nr_cpus++;
49 		first_sibling = get_first_sibling(i);
50 		if (!cpumask_test_cpu(first_sibling, cpus))
51 			nr_uniq_cpus++;
52 		cpumask_set_cpu(i, cpus);
53 	}
54 
55 	queue = 0;
56 	for_each_possible_cpu(i) {
57 		if (!cpumask_test_cpu(i, online_mask)) {
58 			map[i] = 0;
59 			continue;
60 		}
61 
62 		/*
63 		 * Easy case - we have equal or more hardware queues. Or
64 		 * there are no thread siblings to take into account. Do
65 		 * 1:1 if enough, or sequential mapping if less.
66 		 */
67 		if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) {
68 			map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue);
69 			queue++;
70 			continue;
71 		}
72 
73 		/*
74 		 * Less then nr_cpus queues, and we have some number of
75 		 * threads per cores. Map sibling threads to the same
76 		 * queue.
77 		 */
78 		first_sibling = get_first_sibling(i);
79 		if (first_sibling == i) {
80 			map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues,
81 							queue);
82 			queue++;
83 		} else
84 			map[i] = map[first_sibling];
85 	}
86 
87 	free_cpumask_var(cpus);
88 	return 0;
89 }
90 EXPORT_SYMBOL_GPL(blk_mq_map_queues);
91 
92 /*
93  * We have no quick way of doing reverse lookups. This is only used at
94  * queue init time, so runtime isn't important.
95  */
96 int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index)
97 {
98 	int i;
99 
100 	for_each_possible_cpu(i) {
101 		if (index == mq_map[i])
102 			return local_memory_node(cpu_to_node(i));
103 	}
104 
105 	return NUMA_NO_NODE;
106 }
107