xref: /linux/tools/perf/util/env.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "cpumap.h"
3 #include "debug.h"
4 #include "env.h"
5 #include "util/header.h"
6 #include "util/rwsem.h"
7 #include <linux/compiler.h>
8 #include <linux/ctype.h>
9 #include <linux/rbtree.h>
10 #include <linux/string.h>
11 #include <linux/zalloc.h>
12 #include "cgroup.h"
13 #include <errno.h>
14 #include <sys/utsname.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include "pmu.h"
18 #include "pmus.h"
19 #include "strbuf.h"
20 #include "trace/beauty/beauty.h"
21 
22 #ifdef HAVE_LIBBPF_SUPPORT
23 #include "bpf-event.h"
24 #include "bpf-utils.h"
25 #include <bpf/libbpf.h>
26 
27 bool perf_env__insert_bpf_prog_info(struct perf_env *env,
28 				    struct bpf_prog_info_node *info_node)
29 {
30 	bool ret;
31 
32 	down_write(&env->bpf_progs.lock);
33 	ret = __perf_env__insert_bpf_prog_info(env, info_node);
34 	up_write(&env->bpf_progs.lock);
35 
36 	return ret;
37 }
38 
39 bool __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
40 {
41 	__u32 prog_id = info_node->info_linear->info.id;
42 	struct bpf_prog_info_node *node;
43 	struct rb_node *parent = NULL;
44 	struct rb_node **p;
45 
46 	p = &env->bpf_progs.infos.rb_node;
47 
48 	while (*p != NULL) {
49 		parent = *p;
50 		node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
51 		if (prog_id < node->info_linear->info.id) {
52 			p = &(*p)->rb_left;
53 		} else if (prog_id > node->info_linear->info.id) {
54 			p = &(*p)->rb_right;
55 		} else {
56 			pr_debug("duplicated bpf prog info %u\n", prog_id);
57 			return false;
58 		}
59 	}
60 
61 	rb_link_node(&info_node->rb_node, parent, p);
62 	rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
63 	env->bpf_progs.infos_cnt++;
64 	return true;
65 }
66 
67 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
68 							__u32 prog_id)
69 {
70 	struct bpf_prog_info_node *node = NULL;
71 	struct rb_node *n;
72 
73 	down_read(&env->bpf_progs.lock);
74 	n = env->bpf_progs.infos.rb_node;
75 
76 	while (n) {
77 		node = rb_entry(n, struct bpf_prog_info_node, rb_node);
78 		if (prog_id < node->info_linear->info.id)
79 			n = n->rb_left;
80 		else if (prog_id > node->info_linear->info.id)
81 			n = n->rb_right;
82 		else
83 			goto out;
84 	}
85 	node = NULL;
86 
87 out:
88 	up_read(&env->bpf_progs.lock);
89 	return node;
90 }
91 
92 void perf_env__iterate_bpf_prog_info(struct perf_env *env,
93 				     void (*cb)(struct bpf_prog_info_node *node,
94 						void *data),
95 				     void *data)
96 {
97 	struct rb_node *first;
98 
99 	down_read(&env->bpf_progs.lock);
100 	first = rb_first(&env->bpf_progs.infos);
101 	for (struct rb_node *node = first; node != NULL; node = rb_next(node))
102 		(*cb)(rb_entry(node, struct bpf_prog_info_node, rb_node), data);
103 	up_read(&env->bpf_progs.lock);
104 }
105 
106 bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
107 {
108 	bool ret;
109 
110 	down_write(&env->bpf_progs.lock);
111 	ret = __perf_env__insert_btf(env, btf_node);
112 	up_write(&env->bpf_progs.lock);
113 	return ret;
114 }
115 
116 bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
117 {
118 	struct rb_node *parent = NULL;
119 	__u32 btf_id = btf_node->id;
120 	struct btf_node *node;
121 	struct rb_node **p;
122 
123 	p = &env->bpf_progs.btfs.rb_node;
124 
125 	while (*p != NULL) {
126 		parent = *p;
127 		node = rb_entry(parent, struct btf_node, rb_node);
128 		if (btf_id < node->id) {
129 			p = &(*p)->rb_left;
130 		} else if (btf_id > node->id) {
131 			p = &(*p)->rb_right;
132 		} else {
133 			pr_debug("duplicated btf %u\n", btf_id);
134 			return false;
135 		}
136 	}
137 
138 	rb_link_node(&btf_node->rb_node, parent, p);
139 	rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
140 	env->bpf_progs.btfs_cnt++;
141 	return true;
142 }
143 
144 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
145 {
146 	struct btf_node *res;
147 
148 	down_read(&env->bpf_progs.lock);
149 	res = __perf_env__find_btf(env, btf_id);
150 	up_read(&env->bpf_progs.lock);
151 	return res;
152 }
153 
154 struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id)
155 {
156 	struct btf_node *node = NULL;
157 	struct rb_node *n;
158 
159 	n = env->bpf_progs.btfs.rb_node;
160 
161 	while (n) {
162 		node = rb_entry(n, struct btf_node, rb_node);
163 		if (btf_id < node->id)
164 			n = n->rb_left;
165 		else if (btf_id > node->id)
166 			n = n->rb_right;
167 		else
168 			return node;
169 	}
170 	return NULL;
171 }
172 
173 /* purge data in bpf_progs.infos tree */
174 static void perf_env__purge_bpf(struct perf_env *env)
175 {
176 	struct rb_root *root;
177 	struct rb_node *next;
178 
179 	down_write(&env->bpf_progs.lock);
180 
181 	root = &env->bpf_progs.infos;
182 	next = rb_first(root);
183 
184 	while (next) {
185 		struct bpf_prog_info_node *node;
186 
187 		node = rb_entry(next, struct bpf_prog_info_node, rb_node);
188 		next = rb_next(&node->rb_node);
189 		rb_erase(&node->rb_node, root);
190 		zfree(&node->info_linear);
191 		bpf_metadata_free(node->metadata);
192 		free(node);
193 	}
194 
195 	env->bpf_progs.infos_cnt = 0;
196 
197 	root = &env->bpf_progs.btfs;
198 	next = rb_first(root);
199 
200 	while (next) {
201 		struct btf_node *node;
202 
203 		node = rb_entry(next, struct btf_node, rb_node);
204 		next = rb_next(&node->rb_node);
205 		rb_erase(&node->rb_node, root);
206 		free(node);
207 	}
208 
209 	env->bpf_progs.btfs_cnt = 0;
210 
211 	up_write(&env->bpf_progs.lock);
212 }
213 #else // HAVE_LIBBPF_SUPPORT
214 static void perf_env__purge_bpf(struct perf_env *env __maybe_unused)
215 {
216 }
217 #endif // HAVE_LIBBPF_SUPPORT
218 
219 void free_cpu_domain_info(struct cpu_domain_map **cd_map, u32 schedstat_version, u32 nr)
220 {
221 	if (!cd_map)
222 		return;
223 
224 	for (u32 i = 0; i < nr; i++) {
225 		if (!cd_map[i])
226 			continue;
227 
228 		for (u32 j = 0; j < cd_map[i]->nr_domains; j++) {
229 			struct domain_info *d_info = cd_map[i]->domains[j];
230 
231 			if (!d_info)
232 				continue;
233 
234 			if (schedstat_version >= 17)
235 				zfree(&d_info->dname);
236 
237 			zfree(&d_info->cpumask);
238 			zfree(&d_info->cpulist);
239 			zfree(&d_info);
240 		}
241 		zfree(&cd_map[i]->domains);
242 		zfree(&cd_map[i]);
243 	}
244 	zfree(&cd_map);
245 }
246 
247 void perf_env__exit(struct perf_env *env)
248 {
249 	int i, j;
250 
251 	perf_env__purge_bpf(env);
252 	perf_env__purge_cgroups(env);
253 	zfree(&env->hostname);
254 	zfree(&env->os_release);
255 	zfree(&env->version);
256 	zfree(&env->arch);
257 	zfree(&env->cpu_desc);
258 	zfree(&env->cpuid);
259 	zfree(&env->cmdline);
260 	zfree(&env->cmdline_argv);
261 	zfree(&env->sibling_dies);
262 	zfree(&env->sibling_cores);
263 	zfree(&env->sibling_threads);
264 	zfree(&env->pmu_mappings);
265 	zfree(&env->cpu);
266 	for (i = 0; i < env->nr_cpu_pmu_caps; i++)
267 		zfree(&env->cpu_pmu_caps[i]);
268 	zfree(&env->cpu_pmu_caps);
269 	zfree(&env->numa_map);
270 
271 	for (i = 0; i < env->nr_numa_nodes; i++)
272 		perf_cpu_map__put(env->numa_nodes[i].map);
273 	zfree(&env->numa_nodes);
274 
275 	for (i = 0; i < env->caches_cnt; i++)
276 		cpu_cache_level__free(&env->caches[i]);
277 	zfree(&env->caches);
278 
279 	for (i = 0; i < env->nr_memory_nodes; i++)
280 		zfree(&env->memory_nodes[i].set);
281 	zfree(&env->memory_nodes);
282 
283 	for (i = 0; i < env->nr_hybrid_nodes; i++) {
284 		zfree(&env->hybrid_nodes[i].pmu_name);
285 		zfree(&env->hybrid_nodes[i].cpus);
286 	}
287 	zfree(&env->hybrid_nodes);
288 
289 	for (i = 0; i < env->nr_pmus_with_caps; i++) {
290 		for (j = 0; j < env->pmu_caps[i].nr_caps; j++)
291 			zfree(&env->pmu_caps[i].caps[j]);
292 		zfree(&env->pmu_caps[i].caps);
293 		zfree(&env->pmu_caps[i].pmu_name);
294 	}
295 	zfree(&env->pmu_caps);
296 	free_cpu_domain_info(env->cpu_domain, env->schedstat_version, env->nr_cpus_avail);
297 }
298 
299 void perf_env__init(struct perf_env *env)
300 {
301 	memset(env, 0, sizeof(*env));
302 #ifdef HAVE_LIBBPF_SUPPORT
303 	env->bpf_progs.infos = RB_ROOT;
304 	env->bpf_progs.btfs = RB_ROOT;
305 	init_rwsem(&env->bpf_progs.lock);
306 #endif
307 	env->kernel_is_64_bit = -1;
308 }
309 
310 static void perf_env__init_kernel_mode(struct perf_env *env)
311 {
312 	const char *arch = perf_env__raw_arch(env);
313 
314 	if (!strncmp(arch, "x86_64", 6) || !strncmp(arch, "aarch64", 7) ||
315 	    !strncmp(arch, "arm64", 5) || !strncmp(arch, "mips64", 6) ||
316 	    !strncmp(arch, "parisc64", 8) || !strncmp(arch, "riscv64", 7) ||
317 	    !strncmp(arch, "s390x", 5) || !strncmp(arch, "sparc64", 7))
318 		env->kernel_is_64_bit = 1;
319 	else
320 		env->kernel_is_64_bit = 0;
321 }
322 
323 int perf_env__kernel_is_64_bit(struct perf_env *env)
324 {
325 	if (env->kernel_is_64_bit == -1)
326 		perf_env__init_kernel_mode(env);
327 
328 	return env->kernel_is_64_bit;
329 }
330 
331 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
332 {
333 	int i;
334 
335 	/* do not include NULL termination */
336 	env->cmdline_argv = calloc(argc, sizeof(char *));
337 	if (env->cmdline_argv == NULL)
338 		goto out_enomem;
339 
340 	/*
341 	 * Must copy argv contents because it gets moved around during option
342 	 * parsing:
343 	 */
344 	for (i = 0; i < argc ; i++) {
345 		env->cmdline_argv[i] = argv[i];
346 		if (env->cmdline_argv[i] == NULL)
347 			goto out_free;
348 	}
349 
350 	env->nr_cmdline = argc;
351 
352 	return 0;
353 out_free:
354 	zfree(&env->cmdline_argv);
355 out_enomem:
356 	return -ENOMEM;
357 }
358 
359 int perf_env__read_cpu_topology_map(struct perf_env *env)
360 {
361 	int idx, nr_cpus;
362 
363 	if (env->cpu != NULL)
364 		return 0;
365 
366 	if (env->nr_cpus_avail == 0)
367 		env->nr_cpus_avail = cpu__max_present_cpu().cpu;
368 
369 	nr_cpus = env->nr_cpus_avail;
370 	if (nr_cpus == -1)
371 		return -EINVAL;
372 
373 	env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
374 	if (env->cpu == NULL)
375 		return -ENOMEM;
376 
377 	for (idx = 0; idx < nr_cpus; ++idx) {
378 		struct perf_cpu cpu = { .cpu = idx };
379 		int core_id   = cpu__get_core_id(cpu);
380 		int socket_id = cpu__get_socket_id(cpu);
381 		int die_id    = cpu__get_die_id(cpu);
382 
383 		env->cpu[idx].core_id	= core_id >= 0 ? core_id : -1;
384 		env->cpu[idx].socket_id	= socket_id >= 0 ? socket_id : -1;
385 		env->cpu[idx].die_id	= die_id >= 0 ? die_id : -1;
386 	}
387 
388 	env->nr_cpus_avail = nr_cpus;
389 	return 0;
390 }
391 
392 int perf_env__read_pmu_mappings(struct perf_env *env)
393 {
394 	struct perf_pmu *pmu = NULL;
395 	u32 pmu_num = 0;
396 	struct strbuf sb;
397 
398 	while ((pmu = perf_pmus__scan(pmu)))
399 		pmu_num++;
400 
401 	if (!pmu_num) {
402 		pr_debug("pmu mappings not available\n");
403 		return -ENOENT;
404 	}
405 	env->nr_pmu_mappings = pmu_num;
406 
407 	if (strbuf_init(&sb, 128 * pmu_num) < 0)
408 		return -ENOMEM;
409 
410 	while ((pmu = perf_pmus__scan(pmu))) {
411 		if (strbuf_addf(&sb, "%u:%s", pmu->type, pmu->name) < 0)
412 			goto error;
413 		/* include a NULL character at the end */
414 		if (strbuf_add(&sb, "", 1) < 0)
415 			goto error;
416 	}
417 
418 	env->pmu_mappings = strbuf_detach(&sb, NULL);
419 
420 	return 0;
421 
422 error:
423 	strbuf_release(&sb);
424 	return -1;
425 }
426 
427 int perf_env__read_cpuid(struct perf_env *env)
428 {
429 	char cpuid[128];
430 	struct perf_cpu cpu = {-1};
431 	int err = get_cpuid(cpuid, sizeof(cpuid), cpu);
432 
433 	if (err)
434 		return err;
435 
436 	free(env->cpuid);
437 	env->cpuid = strdup(cpuid);
438 	if (env->cpuid == NULL)
439 		return ENOMEM;
440 	return 0;
441 }
442 
443 static int perf_env__read_arch(struct perf_env *env)
444 {
445 	struct utsname uts;
446 
447 	if (env->arch)
448 		return 0;
449 
450 	if (!uname(&uts))
451 		env->arch = strdup(uts.machine);
452 
453 	return env->arch ? 0 : -ENOMEM;
454 }
455 
456 static int perf_env__read_nr_cpus_avail(struct perf_env *env)
457 {
458 	if (env->nr_cpus_avail == 0)
459 		env->nr_cpus_avail = cpu__max_present_cpu().cpu;
460 
461 	return env->nr_cpus_avail ? 0 : -ENOENT;
462 }
463 
464 static int __perf_env__read_core_pmu_caps(const struct perf_pmu *pmu,
465 					  int *nr_caps, char ***caps,
466 					  unsigned int *max_branches,
467 					  unsigned int *br_cntr_nr,
468 					  unsigned int *br_cntr_width)
469 {
470 	struct perf_pmu_caps *pcaps = NULL;
471 	char *ptr, **tmp;
472 	int ret = 0;
473 
474 	*nr_caps = 0;
475 	*caps = NULL;
476 
477 	if (!pmu->nr_caps)
478 		return 0;
479 
480 	*caps = calloc(pmu->nr_caps, sizeof(char *));
481 	if (!*caps)
482 		return -ENOMEM;
483 
484 	tmp = *caps;
485 	list_for_each_entry(pcaps, &pmu->caps, list) {
486 		if (asprintf(&ptr, "%s=%s", pcaps->name, pcaps->value) < 0) {
487 			ret = -ENOMEM;
488 			goto error;
489 		}
490 
491 		*tmp++ = ptr;
492 
493 		if (!strcmp(pcaps->name, "branches"))
494 			*max_branches = atoi(pcaps->value);
495 		else if (!strcmp(pcaps->name, "branch_counter_nr"))
496 			*br_cntr_nr = atoi(pcaps->value);
497 		else if (!strcmp(pcaps->name, "branch_counter_width"))
498 			*br_cntr_width = atoi(pcaps->value);
499 	}
500 	*nr_caps = pmu->nr_caps;
501 	return 0;
502 error:
503 	while (tmp-- != *caps)
504 		zfree(tmp);
505 	zfree(caps);
506 	*nr_caps = 0;
507 	return ret;
508 }
509 
510 int perf_env__read_core_pmu_caps(struct perf_env *env)
511 {
512 	struct pmu_caps *pmu_caps;
513 	struct perf_pmu *pmu = NULL;
514 	int nr_pmu, i = 0, j;
515 	int ret;
516 
517 	nr_pmu = perf_pmus__num_core_pmus();
518 
519 	if (!nr_pmu)
520 		return -ENODEV;
521 
522 	if (nr_pmu == 1) {
523 		pmu = perf_pmus__find_core_pmu();
524 		if (!pmu)
525 			return -ENODEV;
526 		ret = perf_pmu__caps_parse(pmu);
527 		if (ret < 0)
528 			return ret;
529 		return __perf_env__read_core_pmu_caps(pmu, &env->nr_cpu_pmu_caps,
530 						      &env->cpu_pmu_caps,
531 						      &env->max_branches,
532 						      &env->br_cntr_nr,
533 						      &env->br_cntr_width);
534 	}
535 
536 	pmu_caps = calloc(nr_pmu, sizeof(*pmu_caps));
537 	if (!pmu_caps)
538 		return -ENOMEM;
539 
540 	while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
541 		if (perf_pmu__caps_parse(pmu) <= 0)
542 			continue;
543 		ret = __perf_env__read_core_pmu_caps(pmu, &pmu_caps[i].nr_caps,
544 						     &pmu_caps[i].caps,
545 						     &pmu_caps[i].max_branches,
546 						     &pmu_caps[i].br_cntr_nr,
547 						     &pmu_caps[i].br_cntr_width);
548 		if (ret)
549 			goto error;
550 
551 		pmu_caps[i].pmu_name = strdup(pmu->name);
552 		if (!pmu_caps[i].pmu_name) {
553 			ret = -ENOMEM;
554 			goto error;
555 		}
556 		i++;
557 	}
558 
559 	env->nr_pmus_with_caps = nr_pmu;
560 	env->pmu_caps = pmu_caps;
561 
562 	return 0;
563 error:
564 	for (i = 0; i < nr_pmu; i++) {
565 		for (j = 0; j < pmu_caps[i].nr_caps; j++)
566 			zfree(&pmu_caps[i].caps[j]);
567 		zfree(&pmu_caps[i].caps);
568 		zfree(&pmu_caps[i].pmu_name);
569 	}
570 	zfree(&pmu_caps);
571 	return ret;
572 }
573 
574 const char *perf_env__raw_arch(struct perf_env *env)
575 {
576 	return env && !perf_env__read_arch(env) ? env->arch : "unknown";
577 }
578 
579 int perf_env__nr_cpus_avail(struct perf_env *env)
580 {
581 	return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
582 }
583 
584 void cpu_cache_level__free(struct cpu_cache_level *cache)
585 {
586 	zfree(&cache->type);
587 	zfree(&cache->map);
588 	zfree(&cache->size);
589 }
590 
591 /*
592  * Return architecture name in a normalized form.
593  * The conversion logic comes from the Makefile.
594  */
595 static const char *normalize_arch(char *arch)
596 {
597 	if (!strcmp(arch, "x86_64"))
598 		return "x86";
599 	if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
600 		return "x86";
601 	if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
602 		return "sparc";
603 	if (!strncmp(arch, "aarch64", 7) || !strncmp(arch, "arm64", 5))
604 		return "arm64";
605 	if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
606 		return "arm";
607 	if (!strncmp(arch, "s390", 4))
608 		return "s390";
609 	if (!strncmp(arch, "parisc", 6))
610 		return "parisc";
611 	if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
612 		return "powerpc";
613 	if (!strncmp(arch, "mips", 4))
614 		return "mips";
615 	if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
616 		return "sh";
617 	if (!strncmp(arch, "loongarch", 9))
618 		return "loongarch";
619 
620 	return arch;
621 }
622 
623 const char *perf_env__arch(struct perf_env *env)
624 {
625 	char *arch_name;
626 
627 	if (!env || !env->arch) { /* Assume local operation */
628 		static struct utsname uts = { .machine[0] = '\0', };
629 		if (uts.machine[0] == '\0' && uname(&uts) < 0)
630 			return NULL;
631 		arch_name = uts.machine;
632 	} else
633 		arch_name = env->arch;
634 
635 	return normalize_arch(arch_name);
636 }
637 
638 #if defined(HAVE_LIBTRACEEVENT)
639 #include "trace/beauty/arch_errno_names.c"
640 #endif
641 
642 const char *perf_env__arch_strerrno(struct perf_env *env __maybe_unused, int err __maybe_unused)
643 {
644 #if defined(HAVE_LIBTRACEEVENT)
645 	if (env->arch_strerrno == NULL)
646 		env->arch_strerrno = arch_syscalls__strerrno_function(perf_env__arch(env));
647 
648 	return env->arch_strerrno ? env->arch_strerrno(err) : "no arch specific strerrno function";
649 #else
650 	return "!HAVE_LIBTRACEEVENT";
651 #endif
652 }
653 
654 const char *perf_env__cpuid(struct perf_env *env)
655 {
656 	int status;
657 
658 	if (!env->cpuid) { /* Assume local operation */
659 		status = perf_env__read_cpuid(env);
660 		if (status)
661 			return NULL;
662 	}
663 
664 	return env->cpuid;
665 }
666 
667 int perf_env__nr_pmu_mappings(struct perf_env *env)
668 {
669 	int status;
670 
671 	if (!env->nr_pmu_mappings) { /* Assume local operation */
672 		status = perf_env__read_pmu_mappings(env);
673 		if (status)
674 			return 0;
675 	}
676 
677 	return env->nr_pmu_mappings;
678 }
679 
680 const char *perf_env__pmu_mappings(struct perf_env *env)
681 {
682 	int status;
683 
684 	if (!env->pmu_mappings) { /* Assume local operation */
685 		status = perf_env__read_pmu_mappings(env);
686 		if (status)
687 			return NULL;
688 	}
689 
690 	return env->pmu_mappings;
691 }
692 
693 int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu)
694 {
695 	if (!env->nr_numa_map) {
696 		struct numa_node *nn;
697 		int i, nr = 0;
698 
699 		for (i = 0; i < env->nr_numa_nodes; i++) {
700 			nn = &env->numa_nodes[i];
701 			nr = max(nr, (int)perf_cpu_map__max(nn->map).cpu);
702 		}
703 
704 		nr++;
705 
706 		/*
707 		 * We initialize the numa_map array to prepare
708 		 * it for missing cpus, which return node -1
709 		 */
710 		env->numa_map = malloc(nr * sizeof(int));
711 		if (!env->numa_map)
712 			return -1;
713 
714 		for (i = 0; i < nr; i++)
715 			env->numa_map[i] = -1;
716 
717 		env->nr_numa_map = nr;
718 
719 		for (i = 0; i < env->nr_numa_nodes; i++) {
720 			struct perf_cpu tmp;
721 			int j;
722 
723 			nn = &env->numa_nodes[i];
724 			perf_cpu_map__for_each_cpu(tmp, j, nn->map)
725 				env->numa_map[tmp.cpu] = i;
726 		}
727 	}
728 
729 	return cpu.cpu >= 0 && cpu.cpu < env->nr_numa_map ? env->numa_map[cpu.cpu] : -1;
730 }
731 
732 bool perf_env__has_pmu_mapping(struct perf_env *env, const char *pmu_name)
733 {
734 	char *pmu_mapping = env->pmu_mappings, *colon;
735 
736 	for (int i = 0; i < env->nr_pmu_mappings; ++i) {
737 		if (strtoul(pmu_mapping, &colon, 0) == ULONG_MAX || *colon != ':')
738 			goto out_error;
739 
740 		pmu_mapping = colon + 1;
741 		if (strcmp(pmu_mapping, pmu_name) == 0)
742 			return true;
743 
744 		pmu_mapping += strlen(pmu_mapping) + 1;
745 	}
746 out_error:
747 	return false;
748 }
749 
750 char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
751 			     const char *cap)
752 {
753 	char *cap_eq;
754 	int cap_size;
755 	char **ptr;
756 	int i, j;
757 
758 	if (!pmu_name || !cap)
759 		return NULL;
760 
761 	cap_size = strlen(cap);
762 	cap_eq = zalloc(cap_size + 2);
763 	if (!cap_eq)
764 		return NULL;
765 
766 	memcpy(cap_eq, cap, cap_size);
767 	cap_eq[cap_size] = '=';
768 
769 	if (!strcmp(pmu_name, "cpu")) {
770 		for (i = 0; i < env->nr_cpu_pmu_caps; i++) {
771 			if (!strncmp(env->cpu_pmu_caps[i], cap_eq, cap_size + 1)) {
772 				free(cap_eq);
773 				return &env->cpu_pmu_caps[i][cap_size + 1];
774 			}
775 		}
776 		goto out;
777 	}
778 
779 	for (i = 0; i < env->nr_pmus_with_caps; i++) {
780 		if (strcmp(env->pmu_caps[i].pmu_name, pmu_name))
781 			continue;
782 
783 		ptr = env->pmu_caps[i].caps;
784 
785 		for (j = 0; j < env->pmu_caps[i].nr_caps; j++) {
786 			if (!strncmp(ptr[j], cap_eq, cap_size + 1)) {
787 				free(cap_eq);
788 				return &ptr[j][cap_size + 1];
789 			}
790 		}
791 	}
792 
793 out:
794 	free(cap_eq);
795 	return NULL;
796 }
797 
798 void perf_env__find_br_cntr_info(struct perf_env *env,
799 				 unsigned int *nr,
800 				 unsigned int *width)
801 {
802 	if (nr) {
803 		*nr = env->cpu_pmu_caps ? env->br_cntr_nr :
804 					  env->pmu_caps->br_cntr_nr;
805 	}
806 
807 	if (width) {
808 		*width = env->cpu_pmu_caps ? env->br_cntr_width :
809 					     env->pmu_caps->br_cntr_width;
810 	}
811 }
812 
813 bool perf_env__is_x86_amd_cpu(struct perf_env *env)
814 {
815 	static int is_amd; /* 0: Uninitialized, 1: Yes, -1: No */
816 
817 	if (is_amd == 0)
818 		is_amd = env->cpuid && strstarts(env->cpuid, "AuthenticAMD") ? 1 : -1;
819 
820 	return is_amd >= 1 ? true : false;
821 }
822 
823 bool x86__is_amd_cpu(void)
824 {
825 	struct perf_env env = { .total_mem = 0, };
826 	bool is_amd;
827 
828 	perf_env__cpuid(&env);
829 	is_amd = perf_env__is_x86_amd_cpu(&env);
830 	perf_env__exit(&env);
831 
832 	return is_amd;
833 }
834 
835 bool perf_env__is_x86_intel_cpu(struct perf_env *env)
836 {
837 	static int is_intel; /* 0: Uninitialized, 1: Yes, -1: No */
838 
839 	if (is_intel == 0)
840 		is_intel = env->cpuid && strstarts(env->cpuid, "GenuineIntel") ? 1 : -1;
841 
842 	return is_intel >= 1 ? true : false;
843 }
844 
845 bool x86__is_intel_cpu(void)
846 {
847 	struct perf_env env = { .total_mem = 0, };
848 	bool is_intel;
849 
850 	perf_env__cpuid(&env);
851 	is_intel = perf_env__is_x86_intel_cpu(&env);
852 	perf_env__exit(&env);
853 
854 	return is_intel;
855 }
856