xref: /linux/kernel/irq/affinity.c (revision 69fb09f6ccdb2f070557fd1f4c56c4d646694c8e)
1 /*
2  * Copyright (C) 2016 Thomas Gleixner.
3  * Copyright (C) 2016-2017 Christoph Hellwig.
4  */
5 #include <linux/interrupt.h>
6 #include <linux/kernel.h>
7 #include <linux/slab.h>
8 #include <linux/cpu.h>
9 
10 static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
11 				int cpus_per_vec)
12 {
13 	const struct cpumask *siblmsk;
14 	int cpu, sibl;
15 
16 	for ( ; cpus_per_vec > 0; ) {
17 		cpu = cpumask_first(nmsk);
18 
19 		/* Should not happen, but I'm too lazy to think about it */
20 		if (cpu >= nr_cpu_ids)
21 			return;
22 
23 		cpumask_clear_cpu(cpu, nmsk);
24 		cpumask_set_cpu(cpu, irqmsk);
25 		cpus_per_vec--;
26 
27 		/* If the cpu has siblings, use them first */
28 		siblmsk = topology_sibling_cpumask(cpu);
29 		for (sibl = -1; cpus_per_vec > 0; ) {
30 			sibl = cpumask_next(sibl, siblmsk);
31 			if (sibl >= nr_cpu_ids)
32 				break;
33 			if (!cpumask_test_and_clear_cpu(sibl, nmsk))
34 				continue;
35 			cpumask_set_cpu(sibl, irqmsk);
36 			cpus_per_vec--;
37 		}
38 	}
39 }
40 
41 static cpumask_var_t *alloc_node_to_present_cpumask(void)
42 {
43 	cpumask_var_t *masks;
44 	int node;
45 
46 	masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL);
47 	if (!masks)
48 		return NULL;
49 
50 	for (node = 0; node < nr_node_ids; node++) {
51 		if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL))
52 			goto out_unwind;
53 	}
54 
55 	return masks;
56 
57 out_unwind:
58 	while (--node >= 0)
59 		free_cpumask_var(masks[node]);
60 	kfree(masks);
61 	return NULL;
62 }
63 
64 static void free_node_to_present_cpumask(cpumask_var_t *masks)
65 {
66 	int node;
67 
68 	for (node = 0; node < nr_node_ids; node++)
69 		free_cpumask_var(masks[node]);
70 	kfree(masks);
71 }
72 
73 static void build_node_to_present_cpumask(cpumask_var_t *masks)
74 {
75 	int cpu;
76 
77 	for_each_present_cpu(cpu)
78 		cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]);
79 }
80 
81 static int get_nodes_in_cpumask(cpumask_var_t *node_to_present_cpumask,
82 				const struct cpumask *mask, nodemask_t *nodemsk)
83 {
84 	int n, nodes = 0;
85 
86 	/* Calculate the number of nodes in the supplied affinity mask */
87 	for_each_node(n) {
88 		if (cpumask_intersects(mask, node_to_present_cpumask[n])) {
89 			node_set(n, *nodemsk);
90 			nodes++;
91 		}
92 	}
93 	return nodes;
94 }
95 
96 /**
97  * irq_create_affinity_masks - Create affinity masks for multiqueue spreading
98  * @nvecs:	The total number of vectors
99  * @affd:	Description of the affinity requirements
100  *
101  * Returns the masks pointer or NULL if allocation failed.
102  */
103 struct cpumask *
104 irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
105 {
106 	int n, nodes, cpus_per_vec, extra_vecs, curvec;
107 	int affv = nvecs - affd->pre_vectors - affd->post_vectors;
108 	int last_affv = affv + affd->pre_vectors;
109 	nodemask_t nodemsk = NODE_MASK_NONE;
110 	struct cpumask *masks;
111 	cpumask_var_t nmsk, *node_to_present_cpumask;
112 
113 	if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
114 		return NULL;
115 
116 	masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
117 	if (!masks)
118 		goto out;
119 
120 	node_to_present_cpumask = alloc_node_to_present_cpumask();
121 	if (!node_to_present_cpumask)
122 		goto out;
123 
124 	/* Fill out vectors at the beginning that don't need affinity */
125 	for (curvec = 0; curvec < affd->pre_vectors; curvec++)
126 		cpumask_copy(masks + curvec, irq_default_affinity);
127 
128 	/* Stabilize the cpumasks */
129 	get_online_cpus();
130 	build_node_to_present_cpumask(node_to_present_cpumask);
131 	nodes = get_nodes_in_cpumask(node_to_present_cpumask, cpu_present_mask,
132 				     &nodemsk);
133 
134 	/*
135 	 * If the number of nodes in the mask is greater than or equal the
136 	 * number of vectors we just spread the vectors across the nodes.
137 	 */
138 	if (affv <= nodes) {
139 		for_each_node_mask(n, nodemsk) {
140 			cpumask_copy(masks + curvec,
141 				     node_to_present_cpumask[n]);
142 			if (++curvec == last_affv)
143 				break;
144 		}
145 		goto done;
146 	}
147 
148 	for_each_node_mask(n, nodemsk) {
149 		int ncpus, v, vecs_to_assign, vecs_per_node;
150 
151 		/* Spread the vectors per node */
152 		vecs_per_node = (affv - (curvec - affd->pre_vectors)) / nodes;
153 
154 		/* Get the cpus on this node which are in the mask */
155 		cpumask_and(nmsk, cpu_present_mask, node_to_present_cpumask[n]);
156 
157 		/* Calculate the number of cpus per vector */
158 		ncpus = cpumask_weight(nmsk);
159 		vecs_to_assign = min(vecs_per_node, ncpus);
160 
161 		/* Account for rounding errors */
162 		extra_vecs = ncpus - vecs_to_assign * (ncpus / vecs_to_assign);
163 
164 		for (v = 0; curvec < last_affv && v < vecs_to_assign;
165 		     curvec++, v++) {
166 			cpus_per_vec = ncpus / vecs_to_assign;
167 
168 			/* Account for extra vectors to compensate rounding errors */
169 			if (extra_vecs) {
170 				cpus_per_vec++;
171 				--extra_vecs;
172 			}
173 			irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
174 		}
175 
176 		if (curvec >= last_affv)
177 			break;
178 		--nodes;
179 	}
180 
181 done:
182 	put_online_cpus();
183 
184 	/* Fill out vectors at the end that don't need affinity */
185 	for (; curvec < nvecs; curvec++)
186 		cpumask_copy(masks + curvec, irq_default_affinity);
187 	free_node_to_present_cpumask(node_to_present_cpumask);
188 out:
189 	free_cpumask_var(nmsk);
190 	return masks;
191 }
192 
193 /**
194  * irq_calc_affinity_vectors - Calculate the optimal number of vectors
195  * @maxvec:	The maximum number of vectors available
196  * @affd:	Description of the affinity requirements
197  */
198 int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd)
199 {
200 	int resv = affd->pre_vectors + affd->post_vectors;
201 	int vecs = maxvec - resv;
202 	int ret;
203 
204 	get_online_cpus();
205 	ret = min_t(int, cpumask_weight(cpu_present_mask), vecs) + resv;
206 	put_online_cpus();
207 	return ret;
208 }
209