xref: /linux/tools/lib/perf/cpumap.c (revision be239684b18e1cdcafcf8c7face4a2f562c745ad)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <perf/cpumap.h>
3 #include <stdlib.h>
4 #include <linux/refcount.h>
5 #include <internal/cpumap.h>
6 #include <asm/bug.h>
7 #include <stdio.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <ctype.h>
11 #include <limits.h>
12 #include "internal.h"
13 
14 void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus)
15 {
16 	RC_CHK_ACCESS(map)->nr = nr_cpus;
17 }
18 
19 struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus)
20 {
21 	RC_STRUCT(perf_cpu_map) *cpus = malloc(sizeof(*cpus) + sizeof(struct perf_cpu) * nr_cpus);
22 	struct perf_cpu_map *result;
23 
24 	if (ADD_RC_CHK(result, cpus)) {
25 		cpus->nr = nr_cpus;
26 		refcount_set(&cpus->refcnt, 1);
27 	}
28 	return result;
29 }
30 
31 struct perf_cpu_map *perf_cpu_map__new_any_cpu(void)
32 {
33 	struct perf_cpu_map *cpus = perf_cpu_map__alloc(1);
34 
35 	if (cpus)
36 		RC_CHK_ACCESS(cpus)->map[0].cpu = -1;
37 
38 	return cpus;
39 }
40 
41 static void cpu_map__delete(struct perf_cpu_map *map)
42 {
43 	if (map) {
44 		WARN_ONCE(refcount_read(perf_cpu_map__refcnt(map)) != 0,
45 			  "cpu_map refcnt unbalanced\n");
46 		RC_CHK_FREE(map);
47 	}
48 }
49 
50 struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
51 {
52 	struct perf_cpu_map *result;
53 
54 	if (RC_CHK_GET(result, map))
55 		refcount_inc(perf_cpu_map__refcnt(map));
56 
57 	return result;
58 }
59 
60 void perf_cpu_map__put(struct perf_cpu_map *map)
61 {
62 	if (map) {
63 		if (refcount_dec_and_test(perf_cpu_map__refcnt(map)))
64 			cpu_map__delete(map);
65 		else
66 			RC_CHK_PUT(map);
67 	}
68 }
69 
70 static struct perf_cpu_map *cpu_map__new_sysconf(void)
71 {
72 	struct perf_cpu_map *cpus;
73 	int nr_cpus, nr_cpus_conf;
74 
75 	nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
76 	if (nr_cpus < 0)
77 		return NULL;
78 
79 	nr_cpus_conf = sysconf(_SC_NPROCESSORS_CONF);
80 	if (nr_cpus != nr_cpus_conf) {
81 		pr_warning("Number of online CPUs (%d) differs from the number configured (%d) the CPU map will only cover the first %d CPUs.",
82 			nr_cpus, nr_cpus_conf, nr_cpus);
83 	}
84 
85 	cpus = perf_cpu_map__alloc(nr_cpus);
86 	if (cpus != NULL) {
87 		int i;
88 
89 		for (i = 0; i < nr_cpus; ++i)
90 			RC_CHK_ACCESS(cpus)->map[i].cpu = i;
91 	}
92 
93 	return cpus;
94 }
95 
96 static struct perf_cpu_map *cpu_map__new_sysfs_online(void)
97 {
98 	struct perf_cpu_map *cpus = NULL;
99 	FILE *onlnf;
100 
101 	onlnf = fopen("/sys/devices/system/cpu/online", "r");
102 	if (onlnf) {
103 		cpus = perf_cpu_map__read(onlnf);
104 		fclose(onlnf);
105 	}
106 	return cpus;
107 }
108 
109 struct perf_cpu_map *perf_cpu_map__new_online_cpus(void)
110 {
111 	struct perf_cpu_map *cpus = cpu_map__new_sysfs_online();
112 
113 	if (cpus)
114 		return cpus;
115 
116 	return cpu_map__new_sysconf();
117 }
118 
119 
120 static int cmp_cpu(const void *a, const void *b)
121 {
122 	const struct perf_cpu *cpu_a = a, *cpu_b = b;
123 
124 	return cpu_a->cpu - cpu_b->cpu;
125 }
126 
127 static struct perf_cpu __perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
128 {
129 	return RC_CHK_ACCESS(cpus)->map[idx];
130 }
131 
132 static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu *tmp_cpus)
133 {
134 	size_t payload_size = nr_cpus * sizeof(struct perf_cpu);
135 	struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr_cpus);
136 	int i, j;
137 
138 	if (cpus != NULL) {
139 		memcpy(RC_CHK_ACCESS(cpus)->map, tmp_cpus, payload_size);
140 		qsort(RC_CHK_ACCESS(cpus)->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu);
141 		/* Remove dups */
142 		j = 0;
143 		for (i = 0; i < nr_cpus; i++) {
144 			if (i == 0 ||
145 			    __perf_cpu_map__cpu(cpus, i).cpu !=
146 			    __perf_cpu_map__cpu(cpus, i - 1).cpu) {
147 				RC_CHK_ACCESS(cpus)->map[j++].cpu =
148 					__perf_cpu_map__cpu(cpus, i).cpu;
149 			}
150 		}
151 		perf_cpu_map__set_nr(cpus, j);
152 		assert(j <= nr_cpus);
153 	}
154 	return cpus;
155 }
156 
157 struct perf_cpu_map *perf_cpu_map__read(FILE *file)
158 {
159 	struct perf_cpu_map *cpus = NULL;
160 	int nr_cpus = 0;
161 	struct perf_cpu *tmp_cpus = NULL, *tmp;
162 	int max_entries = 0;
163 	int n, cpu, prev;
164 	char sep;
165 
166 	sep = 0;
167 	prev = -1;
168 	for (;;) {
169 		n = fscanf(file, "%u%c", &cpu, &sep);
170 		if (n <= 0)
171 			break;
172 		if (prev >= 0) {
173 			int new_max = nr_cpus + cpu - prev - 1;
174 
175 			WARN_ONCE(new_max >= MAX_NR_CPUS, "Perf can support %d CPUs. "
176 							  "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
177 
178 			if (new_max >= max_entries) {
179 				max_entries = new_max + MAX_NR_CPUS / 2;
180 				tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
181 				if (tmp == NULL)
182 					goto out_free_tmp;
183 				tmp_cpus = tmp;
184 			}
185 
186 			while (++prev < cpu)
187 				tmp_cpus[nr_cpus++].cpu = prev;
188 		}
189 		if (nr_cpus == max_entries) {
190 			max_entries += MAX_NR_CPUS;
191 			tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
192 			if (tmp == NULL)
193 				goto out_free_tmp;
194 			tmp_cpus = tmp;
195 		}
196 
197 		tmp_cpus[nr_cpus++].cpu = cpu;
198 		if (n == 2 && sep == '-')
199 			prev = cpu;
200 		else
201 			prev = -1;
202 		if (n == 1 || sep == '\n')
203 			break;
204 	}
205 
206 	if (nr_cpus > 0)
207 		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
208 out_free_tmp:
209 	free(tmp_cpus);
210 	return cpus;
211 }
212 
213 struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
214 {
215 	struct perf_cpu_map *cpus = NULL;
216 	unsigned long start_cpu, end_cpu = 0;
217 	char *p = NULL;
218 	int i, nr_cpus = 0;
219 	struct perf_cpu *tmp_cpus = NULL, *tmp;
220 	int max_entries = 0;
221 
222 	if (!cpu_list)
223 		return perf_cpu_map__new_online_cpus();
224 
225 	/*
226 	 * must handle the case of empty cpumap to cover
227 	 * TOPOLOGY header for NUMA nodes with no CPU
228 	 * ( e.g., because of CPU hotplug)
229 	 */
230 	if (!isdigit(*cpu_list) && *cpu_list != '\0')
231 		goto out;
232 
233 	while (isdigit(*cpu_list)) {
234 		p = NULL;
235 		start_cpu = strtoul(cpu_list, &p, 0);
236 		if (start_cpu >= INT_MAX
237 		    || (*p != '\0' && *p != ',' && *p != '-'))
238 			goto invalid;
239 
240 		if (*p == '-') {
241 			cpu_list = ++p;
242 			p = NULL;
243 			end_cpu = strtoul(cpu_list, &p, 0);
244 
245 			if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
246 				goto invalid;
247 
248 			if (end_cpu < start_cpu)
249 				goto invalid;
250 		} else {
251 			end_cpu = start_cpu;
252 		}
253 
254 		WARN_ONCE(end_cpu >= MAX_NR_CPUS, "Perf can support %d CPUs. "
255 						  "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
256 
257 		for (; start_cpu <= end_cpu; start_cpu++) {
258 			/* check for duplicates */
259 			for (i = 0; i < nr_cpus; i++)
260 				if (tmp_cpus[i].cpu == (int)start_cpu)
261 					goto invalid;
262 
263 			if (nr_cpus == max_entries) {
264 				max_entries += MAX_NR_CPUS;
265 				tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
266 				if (tmp == NULL)
267 					goto invalid;
268 				tmp_cpus = tmp;
269 			}
270 			tmp_cpus[nr_cpus++].cpu = (int)start_cpu;
271 		}
272 		if (*p)
273 			++p;
274 
275 		cpu_list = p;
276 	}
277 
278 	if (nr_cpus > 0)
279 		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
280 	else if (*cpu_list != '\0') {
281 		pr_warning("Unexpected characters at end of cpu list ('%s'), using online CPUs.",
282 			   cpu_list);
283 		cpus = perf_cpu_map__new_online_cpus();
284 	} else
285 		cpus = perf_cpu_map__new_any_cpu();
286 invalid:
287 	free(tmp_cpus);
288 out:
289 	return cpus;
290 }
291 
292 static int __perf_cpu_map__nr(const struct perf_cpu_map *cpus)
293 {
294 	return RC_CHK_ACCESS(cpus)->nr;
295 }
296 
297 struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
298 {
299 	struct perf_cpu result = {
300 		.cpu = -1
301 	};
302 
303 	if (cpus && idx < __perf_cpu_map__nr(cpus))
304 		return __perf_cpu_map__cpu(cpus, idx);
305 
306 	return result;
307 }
308 
309 int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
310 {
311 	return cpus ? __perf_cpu_map__nr(cpus) : 1;
312 }
313 
314 bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map)
315 {
316 	return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true;
317 }
318 
319 int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
320 {
321 	int low, high;
322 
323 	if (!cpus)
324 		return -1;
325 
326 	low = 0;
327 	high = __perf_cpu_map__nr(cpus);
328 	while (low < high) {
329 		int idx = (low + high) / 2;
330 		struct perf_cpu cpu_at_idx = __perf_cpu_map__cpu(cpus, idx);
331 
332 		if (cpu_at_idx.cpu == cpu.cpu)
333 			return idx;
334 
335 		if (cpu_at_idx.cpu > cpu.cpu)
336 			high = idx;
337 		else
338 			low = idx + 1;
339 	}
340 
341 	return -1;
342 }
343 
344 bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
345 {
346 	return perf_cpu_map__idx(cpus, cpu) != -1;
347 }
348 
349 bool perf_cpu_map__equal(const struct perf_cpu_map *lhs, const struct perf_cpu_map *rhs)
350 {
351 	int nr;
352 
353 	if (lhs == rhs)
354 		return true;
355 
356 	if (!lhs || !rhs)
357 		return false;
358 
359 	nr = __perf_cpu_map__nr(lhs);
360 	if (nr != __perf_cpu_map__nr(rhs))
361 		return false;
362 
363 	for (int idx = 0; idx < nr; idx++) {
364 		if (__perf_cpu_map__cpu(lhs, idx).cpu != __perf_cpu_map__cpu(rhs, idx).cpu)
365 			return false;
366 	}
367 	return true;
368 }
369 
370 bool perf_cpu_map__has_any_cpu(const struct perf_cpu_map *map)
371 {
372 	return map && __perf_cpu_map__cpu(map, 0).cpu == -1;
373 }
374 
375 struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map)
376 {
377 	struct perf_cpu result = {
378 		.cpu = -1
379 	};
380 
381 	// cpu_map__trim_new() qsort()s it, cpu_map__default_new() sorts it as well.
382 	return __perf_cpu_map__nr(map) > 0
383 		? __perf_cpu_map__cpu(map, __perf_cpu_map__nr(map) - 1)
384 		: result;
385 }
386 
387 /** Is 'b' a subset of 'a'. */
388 bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b)
389 {
390 	if (a == b || !b)
391 		return true;
392 	if (!a || __perf_cpu_map__nr(b) > __perf_cpu_map__nr(a))
393 		return false;
394 
395 	for (int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) {
396 		if (__perf_cpu_map__cpu(a, i).cpu > __perf_cpu_map__cpu(b, j).cpu)
397 			return false;
398 		if (__perf_cpu_map__cpu(a, i).cpu == __perf_cpu_map__cpu(b, j).cpu) {
399 			j++;
400 			if (j == __perf_cpu_map__nr(b))
401 				return true;
402 		}
403 	}
404 	return false;
405 }
406 
407 /*
408  * Merge two cpumaps
409  *
410  * orig either gets freed and replaced with a new map, or reused
411  * with no reference count change (similar to "realloc")
412  * other has its reference count increased.
413  */
414 
415 struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
416 					 struct perf_cpu_map *other)
417 {
418 	struct perf_cpu *tmp_cpus;
419 	int tmp_len;
420 	int i, j, k;
421 	struct perf_cpu_map *merged;
422 
423 	if (perf_cpu_map__is_subset(orig, other))
424 		return orig;
425 	if (perf_cpu_map__is_subset(other, orig)) {
426 		perf_cpu_map__put(orig);
427 		return perf_cpu_map__get(other);
428 	}
429 
430 	tmp_len = __perf_cpu_map__nr(orig) + __perf_cpu_map__nr(other);
431 	tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
432 	if (!tmp_cpus)
433 		return NULL;
434 
435 	/* Standard merge algorithm from wikipedia */
436 	i = j = k = 0;
437 	while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
438 		if (__perf_cpu_map__cpu(orig, i).cpu <= __perf_cpu_map__cpu(other, j).cpu) {
439 			if (__perf_cpu_map__cpu(orig, i).cpu == __perf_cpu_map__cpu(other, j).cpu)
440 				j++;
441 			tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
442 		} else
443 			tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
444 	}
445 
446 	while (i < __perf_cpu_map__nr(orig))
447 		tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
448 
449 	while (j < __perf_cpu_map__nr(other))
450 		tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
451 	assert(k <= tmp_len);
452 
453 	merged = cpu_map__trim_new(k, tmp_cpus);
454 	free(tmp_cpus);
455 	perf_cpu_map__put(orig);
456 	return merged;
457 }
458 
459 struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
460 					     struct perf_cpu_map *other)
461 {
462 	struct perf_cpu *tmp_cpus;
463 	int tmp_len;
464 	int i, j, k;
465 	struct perf_cpu_map *merged = NULL;
466 
467 	if (perf_cpu_map__is_subset(other, orig))
468 		return perf_cpu_map__get(orig);
469 	if (perf_cpu_map__is_subset(orig, other))
470 		return perf_cpu_map__get(other);
471 
472 	tmp_len = max(__perf_cpu_map__nr(orig), __perf_cpu_map__nr(other));
473 	tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
474 	if (!tmp_cpus)
475 		return NULL;
476 
477 	i = j = k = 0;
478 	while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
479 		if (__perf_cpu_map__cpu(orig, i).cpu < __perf_cpu_map__cpu(other, j).cpu)
480 			i++;
481 		else if (__perf_cpu_map__cpu(orig, i).cpu > __perf_cpu_map__cpu(other, j).cpu)
482 			j++;
483 		else {
484 			j++;
485 			tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
486 		}
487 	}
488 	if (k)
489 		merged = cpu_map__trim_new(k, tmp_cpus);
490 	free(tmp_cpus);
491 	return merged;
492 }
493