xref: /linux/tools/perf/util/cpumap.c (revision 4f9786035f9e519db41375818e1d0b5f20da2f10)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <api/fs/fs.h>
3 #include "cpumap.h"
4 #include "debug.h"
5 #include "event.h"
6 #include <assert.h>
7 #include <dirent.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <linux/bitmap.h>
11 #include "asm/bug.h"
12 
13 #include <linux/ctype.h>
14 #include <linux/zalloc.h>
15 #include <internal/cpumap.h>
16 
17 static struct perf_cpu max_cpu_num;
18 static struct perf_cpu max_present_cpu_num;
19 static int max_node_num;
20 /**
21  * The numa node X as read from /sys/devices/system/node/nodeX indexed by the
22  * CPU number.
23  */
24 static int *cpunode_map;
25 
26 bool perf_record_cpu_map_data__test_bit(int i,
27 					const struct perf_record_cpu_map_data *data)
28 {
29 	int bit_word32 = i / 32;
30 	__u32 bit_mask32 = 1U << (i & 31);
31 	int bit_word64 = i / 64;
32 	__u64 bit_mask64 = ((__u64)1) << (i & 63);
33 
34 	return (data->mask32_data.long_size == 4)
35 		? (bit_word32 < data->mask32_data.nr) &&
36 		(data->mask32_data.mask[bit_word32] & bit_mask32) != 0
37 		: (bit_word64 < data->mask64_data.nr) &&
38 		(data->mask64_data.mask[bit_word64] & bit_mask64) != 0;
39 }
40 
41 /* Read ith mask value from data into the given 64-bit sized bitmap */
42 static void perf_record_cpu_map_data__read_one_mask(const struct perf_record_cpu_map_data *data,
43 						    int i, unsigned long *bitmap)
44 {
45 #if __SIZEOF_LONG__ == 8
46 	if (data->mask32_data.long_size == 4)
47 		bitmap[0] = data->mask32_data.mask[i];
48 	else
49 		bitmap[0] = data->mask64_data.mask[i];
50 #else
51 	if (data->mask32_data.long_size == 4) {
52 		bitmap[0] = data->mask32_data.mask[i];
53 		bitmap[1] = 0;
54 	} else {
55 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
56 		bitmap[0] = (unsigned long)(data->mask64_data.mask[i] >> 32);
57 		bitmap[1] = (unsigned long)data->mask64_data.mask[i];
58 #else
59 		bitmap[0] = (unsigned long)data->mask64_data.mask[i];
60 		bitmap[1] = (unsigned long)(data->mask64_data.mask[i] >> 32);
61 #endif
62 	}
63 #endif
64 }
65 static struct perf_cpu_map *cpu_map__from_entries(const struct perf_record_cpu_map_data *data)
66 {
67 	struct perf_cpu_map *map;
68 
69 	map = perf_cpu_map__empty_new(data->cpus_data.nr);
70 	if (!map)
71 		return NULL;
72 
73 	for (unsigned int i = 0; i < data->cpus_data.nr; i++) {
74 		/*
75 		 * Special treatment for -1, which is not real cpu number,
76 		 * and we need to use (int) -1 to initialize map[i],
77 		 * otherwise it would become 65535.
78 		 */
79 		if (data->cpus_data.cpu[i] == (u16) -1) {
80 			RC_CHK_ACCESS(map)->map[i].cpu = -1;
81 		} else if (data->cpus_data.cpu[i] < INT16_MAX) {
82 			RC_CHK_ACCESS(map)->map[i].cpu = (int16_t) data->cpus_data.cpu[i];
83 		} else {
84 			pr_err("Invalid cpumap entry %u\n", data->cpus_data.cpu[i]);
85 			perf_cpu_map__put(map);
86 			return NULL;
87 		}
88 	}
89 
90 	return map;
91 }
92 
93 static struct perf_cpu_map *cpu_map__from_mask(const struct perf_record_cpu_map_data *data)
94 {
95 	DECLARE_BITMAP(local_copy, 64);
96 	int weight = 0, mask_nr = data->mask32_data.nr;
97 	struct perf_cpu_map *map;
98 
99 	for (int i = 0; i < mask_nr; i++) {
100 		perf_record_cpu_map_data__read_one_mask(data, i, local_copy);
101 		weight += bitmap_weight(local_copy, 64);
102 	}
103 
104 	map = perf_cpu_map__empty_new(weight);
105 	if (!map)
106 		return NULL;
107 
108 	for (int i = 0, j = 0; i < mask_nr; i++) {
109 		int cpus_per_i = (i * data->mask32_data.long_size  * BITS_PER_BYTE);
110 		int cpu;
111 
112 		perf_record_cpu_map_data__read_one_mask(data, i, local_copy);
113 		for_each_set_bit(cpu, local_copy, 64) {
114 			if (cpu + cpus_per_i < INT16_MAX) {
115 				RC_CHK_ACCESS(map)->map[j++].cpu = cpu + cpus_per_i;
116 			} else {
117 				pr_err("Invalid cpumap entry %d\n", cpu + cpus_per_i);
118 				perf_cpu_map__put(map);
119 				return NULL;
120 			}
121 		}
122 	}
123 	return map;
124 
125 }
126 
127 static struct perf_cpu_map *cpu_map__from_range(const struct perf_record_cpu_map_data *data)
128 {
129 	struct perf_cpu_map *map;
130 	unsigned int i = 0;
131 
132 	map = perf_cpu_map__empty_new(data->range_cpu_data.end_cpu -
133 				data->range_cpu_data.start_cpu + 1 + data->range_cpu_data.any_cpu);
134 	if (!map)
135 		return NULL;
136 
137 	if (data->range_cpu_data.any_cpu)
138 		RC_CHK_ACCESS(map)->map[i++].cpu = -1;
139 
140 	for (int cpu = data->range_cpu_data.start_cpu; cpu <= data->range_cpu_data.end_cpu;
141 	     i++, cpu++) {
142 		if (cpu < INT16_MAX) {
143 			RC_CHK_ACCESS(map)->map[i].cpu = cpu;
144 		} else {
145 			pr_err("Invalid cpumap entry %d\n", cpu);
146 			perf_cpu_map__put(map);
147 			return NULL;
148 		}
149 	}
150 
151 	return map;
152 }
153 
154 struct perf_cpu_map *cpu_map__new_data(const struct perf_record_cpu_map_data *data)
155 {
156 	switch (data->type) {
157 	case PERF_CPU_MAP__CPUS:
158 		return cpu_map__from_entries(data);
159 	case PERF_CPU_MAP__MASK:
160 		return cpu_map__from_mask(data);
161 	case PERF_CPU_MAP__RANGE_CPUS:
162 		return cpu_map__from_range(data);
163 	default:
164 		pr_err("cpu_map__new_data unknown type %d\n", data->type);
165 		return NULL;
166 	}
167 }
168 
169 size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp)
170 {
171 #define BUFSIZE 1024
172 	char buf[BUFSIZE];
173 
174 	cpu_map__snprint(map, buf, sizeof(buf));
175 	return fprintf(fp, "%s\n", buf);
176 #undef BUFSIZE
177 }
178 
179 struct perf_cpu_map *perf_cpu_map__empty_new(int nr)
180 {
181 	struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr);
182 
183 	if (cpus != NULL) {
184 		for (int i = 0; i < nr; i++)
185 			RC_CHK_ACCESS(cpus)->map[i].cpu = -1;
186 	}
187 
188 	return cpus;
189 }
190 
191 struct cpu_aggr_map *cpu_aggr_map__empty_new(int nr)
192 {
193 	struct cpu_aggr_map *cpus = malloc(sizeof(*cpus) + sizeof(struct aggr_cpu_id) * nr);
194 
195 	if (cpus != NULL) {
196 		int i;
197 
198 		cpus->nr = nr;
199 		for (i = 0; i < nr; i++)
200 			cpus->map[i] = aggr_cpu_id__empty();
201 	}
202 
203 	return cpus;
204 }
205 
206 static int cpu__get_topology_int(int cpu, const char *name, int *value)
207 {
208 	char path[PATH_MAX];
209 
210 	snprintf(path, PATH_MAX,
211 		"devices/system/cpu/cpu%d/topology/%s", cpu, name);
212 
213 	return sysfs__read_int(path, value);
214 }
215 
216 int cpu__get_socket_id(struct perf_cpu cpu)
217 {
218 	int value, ret = cpu__get_topology_int(cpu.cpu, "physical_package_id", &value);
219 	return ret ?: value;
220 }
221 
222 struct aggr_cpu_id aggr_cpu_id__socket(struct perf_cpu cpu, void *data __maybe_unused)
223 {
224 	struct aggr_cpu_id id = aggr_cpu_id__empty();
225 
226 	id.socket = cpu__get_socket_id(cpu);
227 	return id;
228 }
229 
230 static int aggr_cpu_id__cmp(const void *a_pointer, const void *b_pointer)
231 {
232 	struct aggr_cpu_id *a = (struct aggr_cpu_id *)a_pointer;
233 	struct aggr_cpu_id *b = (struct aggr_cpu_id *)b_pointer;
234 
235 	if (a->node != b->node)
236 		return a->node - b->node;
237 	else if (a->socket != b->socket)
238 		return a->socket - b->socket;
239 	else if (a->die != b->die)
240 		return a->die - b->die;
241 	else if (a->cluster != b->cluster)
242 		return a->cluster - b->cluster;
243 	else if (a->cache_lvl != b->cache_lvl)
244 		return a->cache_lvl - b->cache_lvl;
245 	else if (a->cache != b->cache)
246 		return a->cache - b->cache;
247 	else if (a->core != b->core)
248 		return a->core - b->core;
249 	else
250 		return a->thread_idx - b->thread_idx;
251 }
252 
253 struct cpu_aggr_map *cpu_aggr_map__new(const struct perf_cpu_map *cpus,
254 				       aggr_cpu_id_get_t get_id,
255 				       void *data, bool needs_sort)
256 {
257 	int idx;
258 	struct perf_cpu cpu;
259 	struct cpu_aggr_map *c = cpu_aggr_map__empty_new(perf_cpu_map__nr(cpus));
260 
261 	if (!c)
262 		return NULL;
263 
264 	/* Reset size as it may only be partially filled */
265 	c->nr = 0;
266 
267 	perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
268 		bool duplicate = false;
269 		struct aggr_cpu_id cpu_id = get_id(cpu, data);
270 
271 		for (int j = 0; j < c->nr; j++) {
272 			if (aggr_cpu_id__equal(&cpu_id, &c->map[j])) {
273 				duplicate = true;
274 				break;
275 			}
276 		}
277 		if (!duplicate) {
278 			c->map[c->nr] = cpu_id;
279 			c->nr++;
280 		}
281 	}
282 	/* Trim. */
283 	if (c->nr != perf_cpu_map__nr(cpus)) {
284 		struct cpu_aggr_map *trimmed_c =
285 			realloc(c,
286 				sizeof(struct cpu_aggr_map) + sizeof(struct aggr_cpu_id) * c->nr);
287 
288 		if (trimmed_c)
289 			c = trimmed_c;
290 	}
291 
292 	/* ensure we process id in increasing order */
293 	if (needs_sort)
294 		qsort(c->map, c->nr, sizeof(struct aggr_cpu_id), aggr_cpu_id__cmp);
295 
296 	return c;
297 
298 }
299 
300 int cpu__get_die_id(struct perf_cpu cpu)
301 {
302 	int value, ret = cpu__get_topology_int(cpu.cpu, "die_id", &value);
303 
304 	return ret ?: value;
305 }
306 
307 struct aggr_cpu_id aggr_cpu_id__die(struct perf_cpu cpu, void *data)
308 {
309 	struct aggr_cpu_id id;
310 	int die;
311 
312 	die = cpu__get_die_id(cpu);
313 	/* There is no die_id on legacy system. */
314 	if (die < 0)
315 		die = 0;
316 
317 	/*
318 	 * die_id is relative to socket, so start
319 	 * with the socket ID and then add die to
320 	 * make a unique ID.
321 	 */
322 	id = aggr_cpu_id__socket(cpu, data);
323 	if (aggr_cpu_id__is_empty(&id))
324 		return id;
325 
326 	id.die = die;
327 	return id;
328 }
329 
330 int cpu__get_cluster_id(struct perf_cpu cpu)
331 {
332 	int value, ret = cpu__get_topology_int(cpu.cpu, "cluster_id", &value);
333 
334 	return ret ?: value;
335 }
336 
337 struct aggr_cpu_id aggr_cpu_id__cluster(struct perf_cpu cpu, void *data)
338 {
339 	int cluster = cpu__get_cluster_id(cpu);
340 	struct aggr_cpu_id id;
341 
342 	/* There is no cluster_id on legacy system. */
343 	if (cluster < 0)
344 		cluster = 0;
345 
346 	id = aggr_cpu_id__die(cpu, data);
347 	if (aggr_cpu_id__is_empty(&id))
348 		return id;
349 
350 	id.cluster = cluster;
351 	return id;
352 }
353 
354 int cpu__get_core_id(struct perf_cpu cpu)
355 {
356 	int value, ret = cpu__get_topology_int(cpu.cpu, "core_id", &value);
357 	return ret ?: value;
358 }
359 
360 struct aggr_cpu_id aggr_cpu_id__core(struct perf_cpu cpu, void *data)
361 {
362 	struct aggr_cpu_id id;
363 	int core = cpu__get_core_id(cpu);
364 
365 	/* aggr_cpu_id__die returns a struct with socket die, and cluster set. */
366 	id = aggr_cpu_id__cluster(cpu, data);
367 	if (aggr_cpu_id__is_empty(&id))
368 		return id;
369 
370 	/*
371 	 * core_id is relative to socket and die, we need a global id.
372 	 * So we combine the result from cpu_map__get_die with the core id
373 	 */
374 	id.core = core;
375 	return id;
376 
377 }
378 
379 struct aggr_cpu_id aggr_cpu_id__cpu(struct perf_cpu cpu, void *data)
380 {
381 	struct aggr_cpu_id id;
382 
383 	/* aggr_cpu_id__core returns a struct with socket, die and core set. */
384 	id = aggr_cpu_id__core(cpu, data);
385 	if (aggr_cpu_id__is_empty(&id))
386 		return id;
387 
388 	id.cpu = cpu;
389 	return id;
390 
391 }
392 
393 struct aggr_cpu_id aggr_cpu_id__node(struct perf_cpu cpu, void *data __maybe_unused)
394 {
395 	struct aggr_cpu_id id = aggr_cpu_id__empty();
396 
397 	id.node = cpu__get_node(cpu);
398 	return id;
399 }
400 
401 struct aggr_cpu_id aggr_cpu_id__global(struct perf_cpu cpu, void *data __maybe_unused)
402 {
403 	struct aggr_cpu_id id = aggr_cpu_id__empty();
404 
405 	/* it always aggregates to the cpu 0 */
406 	cpu.cpu = 0;
407 	id.cpu = cpu;
408 	return id;
409 }
410 
411 /* setup simple routines to easily access node numbers given a cpu number */
412 static int get_max_num(char *path, int *max)
413 {
414 	size_t num;
415 	char *buf;
416 	int err = 0;
417 
418 	if (filename__read_str(path, &buf, &num))
419 		return -1;
420 
421 	buf[num] = '\0';
422 
423 	/* start on the right, to find highest node num */
424 	while (--num) {
425 		if ((buf[num] == ',') || (buf[num] == '-')) {
426 			num++;
427 			break;
428 		}
429 	}
430 	if (sscanf(&buf[num], "%d", max) < 1) {
431 		err = -1;
432 		goto out;
433 	}
434 
435 	/* convert from 0-based to 1-based */
436 	(*max)++;
437 
438 out:
439 	free(buf);
440 	return err;
441 }
442 
443 /* Determine highest possible cpu in the system for sparse allocation */
444 static void set_max_cpu_num(void)
445 {
446 	const char *mnt;
447 	char path[PATH_MAX];
448 	int max, ret = -1;
449 
450 	/* set up default */
451 	max_cpu_num.cpu = 4096;
452 	max_present_cpu_num.cpu = 4096;
453 
454 	mnt = sysfs__mountpoint();
455 	if (!mnt)
456 		goto out;
457 
458 	/* get the highest possible cpu number for a sparse allocation */
459 	ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
460 	if (ret >= PATH_MAX) {
461 		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
462 		goto out;
463 	}
464 
465 	ret = get_max_num(path, &max);
466 	if (ret)
467 		goto out;
468 
469 	max_cpu_num.cpu = max;
470 
471 	/* get the highest present cpu number for a sparse allocation */
472 	ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt);
473 	if (ret >= PATH_MAX) {
474 		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
475 		goto out;
476 	}
477 
478 	ret = get_max_num(path, &max);
479 
480 	if (!ret && max > INT16_MAX) {
481 		pr_err("Read out of bounds max cpus of %d\n", max);
482 		ret = -1;
483 	}
484 	if (!ret)
485 		max_present_cpu_num.cpu = (int16_t)max;
486 out:
487 	if (ret)
488 		pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num.cpu);
489 }
490 
491 /* Determine highest possible node in the system for sparse allocation */
492 static void set_max_node_num(void)
493 {
494 	const char *mnt;
495 	char path[PATH_MAX];
496 	int ret = -1;
497 
498 	/* set up default */
499 	max_node_num = 8;
500 
501 	mnt = sysfs__mountpoint();
502 	if (!mnt)
503 		goto out;
504 
505 	/* get the highest possible cpu number for a sparse allocation */
506 	ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
507 	if (ret >= PATH_MAX) {
508 		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
509 		goto out;
510 	}
511 
512 	ret = get_max_num(path, &max_node_num);
513 
514 out:
515 	if (ret)
516 		pr_err("Failed to read max nodes, using default of %d\n", max_node_num);
517 }
518 
519 int cpu__max_node(void)
520 {
521 	if (unlikely(!max_node_num))
522 		set_max_node_num();
523 
524 	return max_node_num;
525 }
526 
527 struct perf_cpu cpu__max_cpu(void)
528 {
529 	if (unlikely(!max_cpu_num.cpu))
530 		set_max_cpu_num();
531 
532 	return max_cpu_num;
533 }
534 
535 struct perf_cpu cpu__max_present_cpu(void)
536 {
537 	if (unlikely(!max_present_cpu_num.cpu))
538 		set_max_cpu_num();
539 
540 	return max_present_cpu_num;
541 }
542 
543 
544 int cpu__get_node(struct perf_cpu cpu)
545 {
546 	if (unlikely(cpunode_map == NULL)) {
547 		pr_debug("cpu_map not initialized\n");
548 		return -1;
549 	}
550 
551 	return cpunode_map[cpu.cpu];
552 }
553 
554 static int init_cpunode_map(void)
555 {
556 	int i;
557 
558 	set_max_cpu_num();
559 	set_max_node_num();
560 
561 	cpunode_map = calloc(max_cpu_num.cpu, sizeof(int));
562 	if (!cpunode_map) {
563 		pr_err("%s: calloc failed\n", __func__);
564 		return -1;
565 	}
566 
567 	for (i = 0; i < max_cpu_num.cpu; i++)
568 		cpunode_map[i] = -1;
569 
570 	return 0;
571 }
572 
573 int cpu__setup_cpunode_map(void)
574 {
575 	struct dirent *dent1, *dent2;
576 	DIR *dir1, *dir2;
577 	unsigned int cpu, mem;
578 	char buf[PATH_MAX];
579 	char path[PATH_MAX];
580 	const char *mnt;
581 	int n;
582 
583 	/* initialize globals */
584 	if (init_cpunode_map())
585 		return -1;
586 
587 	mnt = sysfs__mountpoint();
588 	if (!mnt)
589 		return 0;
590 
591 	n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
592 	if (n >= PATH_MAX) {
593 		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
594 		return -1;
595 	}
596 
597 	dir1 = opendir(path);
598 	if (!dir1)
599 		return 0;
600 
601 	/* walk tree and setup map */
602 	while ((dent1 = readdir(dir1)) != NULL) {
603 		if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1)
604 			continue;
605 
606 		n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
607 		if (n >= PATH_MAX) {
608 			pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
609 			continue;
610 		}
611 
612 		dir2 = opendir(buf);
613 		if (!dir2)
614 			continue;
615 		while ((dent2 = readdir(dir2)) != NULL) {
616 			if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
617 				continue;
618 			cpunode_map[cpu] = mem;
619 		}
620 		closedir(dir2);
621 	}
622 	closedir(dir1);
623 	return 0;
624 }
625 
626 size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size)
627 {
628 	int i, start = -1;
629 	bool first = true;
630 	size_t ret = 0;
631 
632 #define COMMA first ? "" : ","
633 
634 	for (i = 0; i < perf_cpu_map__nr(map) + 1; i++) {
635 		struct perf_cpu cpu = { .cpu = INT16_MAX };
636 		bool last = i == perf_cpu_map__nr(map);
637 
638 		if (!last)
639 			cpu = perf_cpu_map__cpu(map, i);
640 
641 		if (start == -1) {
642 			start = i;
643 			if (last) {
644 				ret += snprintf(buf + ret, size - ret,
645 						"%s%d", COMMA,
646 						perf_cpu_map__cpu(map, i).cpu);
647 			}
648 		} else if (((i - start) != (cpu.cpu - perf_cpu_map__cpu(map, start).cpu)) || last) {
649 			int end = i - 1;
650 
651 			if (start == end) {
652 				ret += snprintf(buf + ret, size - ret,
653 						"%s%d", COMMA,
654 						perf_cpu_map__cpu(map, start).cpu);
655 			} else {
656 				ret += snprintf(buf + ret, size - ret,
657 						"%s%d-%d", COMMA,
658 						perf_cpu_map__cpu(map, start).cpu, perf_cpu_map__cpu(map, end).cpu);
659 			}
660 			first = false;
661 			start = i;
662 		}
663 	}
664 
665 #undef COMMA
666 
667 	pr_debug2("cpumask list: %s\n", buf);
668 	return ret;
669 }
670 
671 static char hex_char(unsigned char val)
672 {
673 	if (val < 10)
674 		return val + '0';
675 	if (val < 16)
676 		return val - 10 + 'a';
677 	return '?';
678 }
679 
680 size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size)
681 {
682 	int idx;
683 	char *ptr = buf;
684 	unsigned char *bitmap;
685 	struct perf_cpu c, last_cpu = perf_cpu_map__max(map);
686 
687 	if (buf == NULL)
688 		return 0;
689 
690 	bitmap = zalloc(last_cpu.cpu / 8 + 1);
691 	if (bitmap == NULL) {
692 		buf[0] = '\0';
693 		return 0;
694 	}
695 
696 	perf_cpu_map__for_each_cpu(c, idx, map)
697 		bitmap[c.cpu / 8] |= 1 << (c.cpu % 8);
698 
699 	for (int cpu = last_cpu.cpu / 4 * 4; cpu >= 0; cpu -= 4) {
700 		unsigned char bits = bitmap[cpu / 8];
701 
702 		if (cpu % 8)
703 			bits >>= 4;
704 		else
705 			bits &= 0xf;
706 
707 		*ptr++ = hex_char(bits);
708 		if ((cpu % 32) == 0 && cpu > 0)
709 			*ptr++ = ',';
710 	}
711 	*ptr = '\0';
712 	free(bitmap);
713 
714 	buf[size - 1] = '\0';
715 	return ptr - buf;
716 }
717 
718 struct perf_cpu_map *cpu_map__online(void) /* thread unsafe */
719 {
720 	static struct perf_cpu_map *online;
721 
722 	if (!online)
723 		online = perf_cpu_map__new_online_cpus(); /* from /sys/devices/system/cpu/online */
724 
725 	return perf_cpu_map__get(online);
726 }
727 
728 bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, const struct aggr_cpu_id *b)
729 {
730 	return a->thread_idx == b->thread_idx &&
731 		a->node == b->node &&
732 		a->socket == b->socket &&
733 		a->die == b->die &&
734 		a->cluster == b->cluster &&
735 		a->cache_lvl == b->cache_lvl &&
736 		a->cache == b->cache &&
737 		a->core == b->core &&
738 		a->cpu.cpu == b->cpu.cpu;
739 }
740 
741 bool aggr_cpu_id__is_empty(const struct aggr_cpu_id *a)
742 {
743 	return a->thread_idx == -1 &&
744 		a->node == -1 &&
745 		a->socket == -1 &&
746 		a->die == -1 &&
747 		a->cluster == -1 &&
748 		a->cache_lvl == -1 &&
749 		a->cache == -1 &&
750 		a->core == -1 &&
751 		a->cpu.cpu == -1;
752 }
753 
754 struct aggr_cpu_id aggr_cpu_id__empty(void)
755 {
756 	struct aggr_cpu_id ret = {
757 		.thread_idx = -1,
758 		.node = -1,
759 		.socket = -1,
760 		.die = -1,
761 		.cluster = -1,
762 		.cache_lvl = -1,
763 		.cache = -1,
764 		.core = -1,
765 		.cpu = (struct perf_cpu){ .cpu = -1 },
766 	};
767 	return ret;
768 }
769