xref: /linux/tools/lib/perf/cpumap.c (revision e9ef810dfee7a2227da9d423aecb0ced35faddbe)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <errno.h>
3 #include <perf/cpumap.h>
4 #include <stdlib.h>
5 #include <linux/refcount.h>
6 #include <internal/cpumap.h>
7 #include <asm/bug.h>
8 #include <stdio.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <ctype.h>
12 #include <limits.h>
13 #include "internal.h"
14 #include <api/fs/fs.h>
15 
16 #define MAX_NR_CPUS 4096
17 
perf_cpu_map__set_nr(struct perf_cpu_map * map,int nr_cpus)18 void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus)
19 {
20 	RC_CHK_ACCESS(map)->nr = nr_cpus;
21 }
22 
perf_cpu_map__alloc(int nr_cpus)23 struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus)
24 {
25 	RC_STRUCT(perf_cpu_map) *cpus;
26 	struct perf_cpu_map *result;
27 
28 	if (nr_cpus == 0)
29 		return NULL;
30 
31 	cpus = malloc(sizeof(*cpus) + sizeof(struct perf_cpu) * nr_cpus);
32 	if (ADD_RC_CHK(result, cpus)) {
33 		cpus->nr = nr_cpus;
34 		refcount_set(&cpus->refcnt, 1);
35 	}
36 	return result;
37 }
38 
perf_cpu_map__new_any_cpu(void)39 struct perf_cpu_map *perf_cpu_map__new_any_cpu(void)
40 {
41 	struct perf_cpu_map *cpus = perf_cpu_map__alloc(1);
42 
43 	if (cpus)
44 		RC_CHK_ACCESS(cpus)->map[0].cpu = -1;
45 
46 	return cpus;
47 }
48 
cpu_map__delete(struct perf_cpu_map * map)49 static void cpu_map__delete(struct perf_cpu_map *map)
50 {
51 	if (map) {
52 		WARN_ONCE(refcount_read(perf_cpu_map__refcnt(map)) != 0,
53 			  "cpu_map refcnt unbalanced\n");
54 		RC_CHK_FREE(map);
55 	}
56 }
57 
perf_cpu_map__get(struct perf_cpu_map * map)58 struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
59 {
60 	struct perf_cpu_map *result;
61 
62 	if (RC_CHK_GET(result, map))
63 		refcount_inc(perf_cpu_map__refcnt(map));
64 
65 	return result;
66 }
67 
perf_cpu_map__put(struct perf_cpu_map * map)68 void perf_cpu_map__put(struct perf_cpu_map *map)
69 {
70 	if (map) {
71 		if (refcount_dec_and_test(perf_cpu_map__refcnt(map)))
72 			cpu_map__delete(map);
73 		else
74 			RC_CHK_PUT(map);
75 	}
76 }
77 
cpu_map__new_sysconf(void)78 static struct perf_cpu_map *cpu_map__new_sysconf(void)
79 {
80 	struct perf_cpu_map *cpus;
81 	int nr_cpus, nr_cpus_conf;
82 
83 	nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
84 	if (nr_cpus < 0)
85 		return NULL;
86 
87 	nr_cpus_conf = sysconf(_SC_NPROCESSORS_CONF);
88 	if (nr_cpus != nr_cpus_conf) {
89 		pr_warning("Number of online CPUs (%d) differs from the number configured (%d) the CPU map will only cover the first %d CPUs.",
90 			nr_cpus, nr_cpus_conf, nr_cpus);
91 	}
92 
93 	cpus = perf_cpu_map__alloc(nr_cpus);
94 	if (cpus != NULL) {
95 		int i;
96 
97 		for (i = 0; i < nr_cpus; ++i)
98 			RC_CHK_ACCESS(cpus)->map[i].cpu = i;
99 	}
100 
101 	return cpus;
102 }
103 
cpu_map__new_sysfs_online(void)104 static struct perf_cpu_map *cpu_map__new_sysfs_online(void)
105 {
106 	struct perf_cpu_map *cpus = NULL;
107 	char *buf = NULL;
108 	size_t buf_len;
109 
110 	if (sysfs__read_str("devices/system/cpu/online", &buf, &buf_len) >= 0) {
111 		cpus = perf_cpu_map__new(buf);
112 		free(buf);
113 	}
114 	return cpus;
115 }
116 
perf_cpu_map__new_online_cpus(void)117 struct perf_cpu_map *perf_cpu_map__new_online_cpus(void)
118 {
119 	struct perf_cpu_map *cpus = cpu_map__new_sysfs_online();
120 
121 	if (cpus)
122 		return cpus;
123 
124 	return cpu_map__new_sysconf();
125 }
126 
127 
cmp_cpu(const void * a,const void * b)128 static int cmp_cpu(const void *a, const void *b)
129 {
130 	const struct perf_cpu *cpu_a = a, *cpu_b = b;
131 
132 	return cpu_a->cpu - cpu_b->cpu;
133 }
134 
__perf_cpu_map__cpu(const struct perf_cpu_map * cpus,int idx)135 static struct perf_cpu __perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
136 {
137 	return RC_CHK_ACCESS(cpus)->map[idx];
138 }
139 
cpu_map__trim_new(int nr_cpus,const struct perf_cpu * tmp_cpus)140 static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu *tmp_cpus)
141 {
142 	size_t payload_size = nr_cpus * sizeof(struct perf_cpu);
143 	struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr_cpus);
144 	int i, j;
145 
146 	if (cpus != NULL) {
147 		memcpy(RC_CHK_ACCESS(cpus)->map, tmp_cpus, payload_size);
148 		qsort(RC_CHK_ACCESS(cpus)->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu);
149 		/* Remove dups */
150 		j = 0;
151 		for (i = 0; i < nr_cpus; i++) {
152 			if (i == 0 ||
153 			    __perf_cpu_map__cpu(cpus, i).cpu !=
154 			    __perf_cpu_map__cpu(cpus, i - 1).cpu) {
155 				RC_CHK_ACCESS(cpus)->map[j++].cpu =
156 					__perf_cpu_map__cpu(cpus, i).cpu;
157 			}
158 		}
159 		perf_cpu_map__set_nr(cpus, j);
160 		assert(j <= nr_cpus);
161 	}
162 	return cpus;
163 }
164 
perf_cpu_map__new(const char * cpu_list)165 struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
166 {
167 	struct perf_cpu_map *cpus = NULL;
168 	unsigned long start_cpu, end_cpu = 0;
169 	char *p = NULL;
170 	int i, nr_cpus = 0;
171 	struct perf_cpu *tmp_cpus = NULL, *tmp;
172 	int max_entries = 0;
173 
174 	if (!cpu_list)
175 		return perf_cpu_map__new_online_cpus();
176 
177 	/*
178 	 * must handle the case of empty cpumap to cover
179 	 * TOPOLOGY header for NUMA nodes with no CPU
180 	 * ( e.g., because of CPU hotplug)
181 	 */
182 	if (!isdigit(*cpu_list) && *cpu_list != '\0')
183 		goto out;
184 
185 	while (isdigit(*cpu_list)) {
186 		p = NULL;
187 		start_cpu = strtoul(cpu_list, &p, 0);
188 		if (start_cpu >= INT16_MAX
189 		    || (*p != '\0' && *p != ',' && *p != '-' && *p != '\n'))
190 			goto invalid;
191 
192 		if (*p == '-') {
193 			cpu_list = ++p;
194 			p = NULL;
195 			end_cpu = strtoul(cpu_list, &p, 0);
196 
197 			if (end_cpu >= INT16_MAX || (*p != '\0' && *p != ',' && *p != '\n'))
198 				goto invalid;
199 
200 			if (end_cpu < start_cpu)
201 				goto invalid;
202 		} else {
203 			end_cpu = start_cpu;
204 		}
205 
206 		WARN_ONCE(end_cpu >= MAX_NR_CPUS, "Perf can support %d CPUs. "
207 						  "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
208 
209 		for (; start_cpu <= end_cpu; start_cpu++) {
210 			/* check for duplicates */
211 			for (i = 0; i < nr_cpus; i++)
212 				if (tmp_cpus[i].cpu == (int16_t)start_cpu)
213 					goto invalid;
214 
215 			if (nr_cpus == max_entries) {
216 				max_entries += max(end_cpu - start_cpu + 1, 16UL);
217 				tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
218 				if (tmp == NULL)
219 					goto invalid;
220 				tmp_cpus = tmp;
221 			}
222 			tmp_cpus[nr_cpus++].cpu = (int16_t)start_cpu;
223 		}
224 		if (*p)
225 			++p;
226 
227 		cpu_list = p;
228 	}
229 
230 	if (nr_cpus > 0) {
231 		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
232 	} else if (*cpu_list != '\0') {
233 		pr_warning("Unexpected characters at end of cpu list ('%s'), using online CPUs.",
234 			   cpu_list);
235 		cpus = perf_cpu_map__new_online_cpus();
236 	} else {
237 		cpus = perf_cpu_map__new_any_cpu();
238 	}
239 invalid:
240 	free(tmp_cpus);
241 out:
242 	return cpus;
243 }
244 
perf_cpu_map__new_int(int cpu)245 struct perf_cpu_map *perf_cpu_map__new_int(int cpu)
246 {
247 	struct perf_cpu_map *cpus = perf_cpu_map__alloc(1);
248 
249 	if (cpus)
250 		RC_CHK_ACCESS(cpus)->map[0].cpu = cpu;
251 
252 	return cpus;
253 }
254 
__perf_cpu_map__nr(const struct perf_cpu_map * cpus)255 static int __perf_cpu_map__nr(const struct perf_cpu_map *cpus)
256 {
257 	return RC_CHK_ACCESS(cpus)->nr;
258 }
259 
perf_cpu_map__cpu(const struct perf_cpu_map * cpus,int idx)260 struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
261 {
262 	struct perf_cpu result = {
263 		.cpu = -1
264 	};
265 
266 	if (cpus && idx < __perf_cpu_map__nr(cpus))
267 		return __perf_cpu_map__cpu(cpus, idx);
268 
269 	return result;
270 }
271 
perf_cpu_map__nr(const struct perf_cpu_map * cpus)272 int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
273 {
274 	return cpus ? __perf_cpu_map__nr(cpus) : 1;
275 }
276 
perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map * map)277 bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map)
278 {
279 	return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true;
280 }
281 
perf_cpu_map__is_any_cpu_or_is_empty(const struct perf_cpu_map * map)282 bool perf_cpu_map__is_any_cpu_or_is_empty(const struct perf_cpu_map *map)
283 {
284 	if (!map)
285 		return true;
286 
287 	return __perf_cpu_map__nr(map) == 1 && __perf_cpu_map__cpu(map, 0).cpu == -1;
288 }
289 
perf_cpu_map__is_empty(const struct perf_cpu_map * map)290 bool perf_cpu_map__is_empty(const struct perf_cpu_map *map)
291 {
292 	return map == NULL;
293 }
294 
perf_cpu_map__idx(const struct perf_cpu_map * cpus,struct perf_cpu cpu)295 int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
296 {
297 	int low, high;
298 
299 	if (!cpus)
300 		return -1;
301 
302 	low = 0;
303 	high = __perf_cpu_map__nr(cpus);
304 	while (low < high) {
305 		int idx = (low + high) / 2;
306 		struct perf_cpu cpu_at_idx = __perf_cpu_map__cpu(cpus, idx);
307 
308 		if (cpu_at_idx.cpu == cpu.cpu)
309 			return idx;
310 
311 		if (cpu_at_idx.cpu > cpu.cpu)
312 			high = idx;
313 		else
314 			low = idx + 1;
315 	}
316 
317 	return -1;
318 }
319 
perf_cpu_map__has(const struct perf_cpu_map * cpus,struct perf_cpu cpu)320 bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
321 {
322 	return perf_cpu_map__idx(cpus, cpu) != -1;
323 }
324 
perf_cpu_map__equal(const struct perf_cpu_map * lhs,const struct perf_cpu_map * rhs)325 bool perf_cpu_map__equal(const struct perf_cpu_map *lhs, const struct perf_cpu_map *rhs)
326 {
327 	int nr;
328 
329 	if (lhs == rhs)
330 		return true;
331 
332 	if (!lhs || !rhs)
333 		return false;
334 
335 	nr = __perf_cpu_map__nr(lhs);
336 	if (nr != __perf_cpu_map__nr(rhs))
337 		return false;
338 
339 	for (int idx = 0; idx < nr; idx++) {
340 		if (__perf_cpu_map__cpu(lhs, idx).cpu != __perf_cpu_map__cpu(rhs, idx).cpu)
341 			return false;
342 	}
343 	return true;
344 }
345 
perf_cpu_map__has_any_cpu(const struct perf_cpu_map * map)346 bool perf_cpu_map__has_any_cpu(const struct perf_cpu_map *map)
347 {
348 	return map && __perf_cpu_map__cpu(map, 0).cpu == -1;
349 }
350 
perf_cpu_map__min(const struct perf_cpu_map * map)351 struct perf_cpu perf_cpu_map__min(const struct perf_cpu_map *map)
352 {
353 	struct perf_cpu cpu, result = {
354 		.cpu = -1
355 	};
356 	int idx;
357 
358 	perf_cpu_map__for_each_cpu_skip_any(cpu, idx, map) {
359 		result = cpu;
360 		break;
361 	}
362 	return result;
363 }
364 
perf_cpu_map__max(const struct perf_cpu_map * map)365 struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map)
366 {
367 	struct perf_cpu result = {
368 		.cpu = -1
369 	};
370 
371 	// cpu_map__trim_new() qsort()s it, cpu_map__default_new() sorts it as well.
372 	return __perf_cpu_map__nr(map) > 0
373 		? __perf_cpu_map__cpu(map, __perf_cpu_map__nr(map) - 1)
374 		: result;
375 }
376 
377 /** Is 'b' a subset of 'a'. */
perf_cpu_map__is_subset(const struct perf_cpu_map * a,const struct perf_cpu_map * b)378 bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b)
379 {
380 	if (a == b || !b)
381 		return true;
382 	if (!a || __perf_cpu_map__nr(b) > __perf_cpu_map__nr(a))
383 		return false;
384 
385 	for (int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) {
386 		if (__perf_cpu_map__cpu(a, i).cpu > __perf_cpu_map__cpu(b, j).cpu)
387 			return false;
388 		if (__perf_cpu_map__cpu(a, i).cpu == __perf_cpu_map__cpu(b, j).cpu) {
389 			j++;
390 			if (j == __perf_cpu_map__nr(b))
391 				return true;
392 		}
393 	}
394 	return false;
395 }
396 
397 /*
398  * Merge two cpumaps.
399  *
400  * If 'other' is subset of '*orig', '*orig' keeps itself with no reference count
401  * change (similar to "realloc").
402  *
403  * If '*orig' is subset of 'other', '*orig' reuses 'other' with its reference
404  * count increased.
405  *
406  * Otherwise, '*orig' gets freed and replaced with a new map.
407  */
perf_cpu_map__merge(struct perf_cpu_map ** orig,struct perf_cpu_map * other)408 int perf_cpu_map__merge(struct perf_cpu_map **orig, struct perf_cpu_map *other)
409 {
410 	struct perf_cpu *tmp_cpus;
411 	int tmp_len;
412 	int i, j, k;
413 	struct perf_cpu_map *merged;
414 
415 	if (perf_cpu_map__is_subset(*orig, other))
416 		return 0;
417 	if (perf_cpu_map__is_subset(other, *orig)) {
418 		perf_cpu_map__put(*orig);
419 		*orig = perf_cpu_map__get(other);
420 		return 0;
421 	}
422 
423 	tmp_len = __perf_cpu_map__nr(*orig) + __perf_cpu_map__nr(other);
424 	tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
425 	if (!tmp_cpus)
426 		return -ENOMEM;
427 
428 	/* Standard merge algorithm from wikipedia */
429 	i = j = k = 0;
430 	while (i < __perf_cpu_map__nr(*orig) && j < __perf_cpu_map__nr(other)) {
431 		if (__perf_cpu_map__cpu(*orig, i).cpu <= __perf_cpu_map__cpu(other, j).cpu) {
432 			if (__perf_cpu_map__cpu(*orig, i).cpu == __perf_cpu_map__cpu(other, j).cpu)
433 				j++;
434 			tmp_cpus[k++] = __perf_cpu_map__cpu(*orig, i++);
435 		} else
436 			tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
437 	}
438 
439 	while (i < __perf_cpu_map__nr(*orig))
440 		tmp_cpus[k++] = __perf_cpu_map__cpu(*orig, i++);
441 
442 	while (j < __perf_cpu_map__nr(other))
443 		tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
444 	assert(k <= tmp_len);
445 
446 	merged = cpu_map__trim_new(k, tmp_cpus);
447 	free(tmp_cpus);
448 	perf_cpu_map__put(*orig);
449 	*orig = merged;
450 	return 0;
451 }
452 
perf_cpu_map__intersect(struct perf_cpu_map * orig,struct perf_cpu_map * other)453 struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
454 					     struct perf_cpu_map *other)
455 {
456 	struct perf_cpu *tmp_cpus;
457 	int tmp_len;
458 	int i, j, k;
459 	struct perf_cpu_map *merged = NULL;
460 
461 	if (perf_cpu_map__is_subset(other, orig))
462 		return perf_cpu_map__get(orig);
463 	if (perf_cpu_map__is_subset(orig, other))
464 		return perf_cpu_map__get(other);
465 
466 	tmp_len = max(__perf_cpu_map__nr(orig), __perf_cpu_map__nr(other));
467 	tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
468 	if (!tmp_cpus)
469 		return NULL;
470 
471 	i = j = k = 0;
472 	while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
473 		if (__perf_cpu_map__cpu(orig, i).cpu < __perf_cpu_map__cpu(other, j).cpu)
474 			i++;
475 		else if (__perf_cpu_map__cpu(orig, i).cpu > __perf_cpu_map__cpu(other, j).cpu)
476 			j++;
477 		else {
478 			j++;
479 			tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
480 		}
481 	}
482 	if (k)
483 		merged = cpu_map__trim_new(k, tmp_cpus);
484 	free(tmp_cpus);
485 	return merged;
486 }
487