1 // SPDX-License-Identifier: GPL-2.0
2 #include "cpumap.h"
3 #include "debug.h"
4 #include "env.h"
5 #include "util/header.h"
6 #include "util/rwsem.h"
7 #include <linux/compiler.h>
8 #include <linux/ctype.h>
9 #include <linux/rbtree.h>
10 #include <linux/string.h>
11 #include <linux/zalloc.h>
12 #include "cgroup.h"
13 #include <errno.h>
14 #include <sys/utsname.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include "pmu.h"
18 #include "pmus.h"
19 #include "strbuf.h"
20 #include "trace/beauty/beauty.h"
21
22 #ifdef HAVE_LIBBPF_SUPPORT
23 #include "bpf-event.h"
24 #include "bpf-utils.h"
25 #include <bpf/libbpf.h>
26
perf_env__insert_bpf_prog_info(struct perf_env * env,struct bpf_prog_info_node * info_node)27 bool perf_env__insert_bpf_prog_info(struct perf_env *env,
28 struct bpf_prog_info_node *info_node)
29 {
30 bool ret;
31
32 down_write(&env->bpf_progs.lock);
33 ret = __perf_env__insert_bpf_prog_info(env, info_node);
34 up_write(&env->bpf_progs.lock);
35
36 return ret;
37 }
38
__perf_env__insert_bpf_prog_info(struct perf_env * env,struct bpf_prog_info_node * info_node)39 bool __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
40 {
41 __u32 prog_id = info_node->info_linear->info.id;
42 struct bpf_prog_info_node *node;
43 struct rb_node *parent = NULL;
44 struct rb_node **p;
45
46 p = &env->bpf_progs.infos.rb_node;
47
48 while (*p != NULL) {
49 parent = *p;
50 node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
51 if (prog_id < node->info_linear->info.id) {
52 p = &(*p)->rb_left;
53 } else if (prog_id > node->info_linear->info.id) {
54 p = &(*p)->rb_right;
55 } else {
56 pr_debug("duplicated bpf prog info %u\n", prog_id);
57 return false;
58 }
59 }
60
61 rb_link_node(&info_node->rb_node, parent, p);
62 rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
63 env->bpf_progs.infos_cnt++;
64 return true;
65 }
66
perf_env__find_bpf_prog_info(struct perf_env * env,__u32 prog_id)67 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
68 __u32 prog_id)
69 {
70 struct bpf_prog_info_node *node = NULL;
71 struct rb_node *n;
72
73 down_read(&env->bpf_progs.lock);
74 n = env->bpf_progs.infos.rb_node;
75
76 while (n) {
77 node = rb_entry(n, struct bpf_prog_info_node, rb_node);
78 if (prog_id < node->info_linear->info.id)
79 n = n->rb_left;
80 else if (prog_id > node->info_linear->info.id)
81 n = n->rb_right;
82 else
83 goto out;
84 }
85 node = NULL;
86
87 out:
88 up_read(&env->bpf_progs.lock);
89 return node;
90 }
91
perf_env__iterate_bpf_prog_info(struct perf_env * env,void (* cb)(struct bpf_prog_info_node * node,void * data),void * data)92 void perf_env__iterate_bpf_prog_info(struct perf_env *env,
93 void (*cb)(struct bpf_prog_info_node *node,
94 void *data),
95 void *data)
96 {
97 struct rb_node *first;
98
99 down_read(&env->bpf_progs.lock);
100 first = rb_first(&env->bpf_progs.infos);
101 for (struct rb_node *node = first; node != NULL; node = rb_next(node))
102 (*cb)(rb_entry(node, struct bpf_prog_info_node, rb_node), data);
103 up_read(&env->bpf_progs.lock);
104 }
105
perf_env__insert_btf(struct perf_env * env,struct btf_node * btf_node)106 bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
107 {
108 bool ret;
109
110 down_write(&env->bpf_progs.lock);
111 ret = __perf_env__insert_btf(env, btf_node);
112 up_write(&env->bpf_progs.lock);
113 return ret;
114 }
115
__perf_env__insert_btf(struct perf_env * env,struct btf_node * btf_node)116 bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
117 {
118 struct rb_node *parent = NULL;
119 __u32 btf_id = btf_node->id;
120 struct btf_node *node;
121 struct rb_node **p;
122
123 p = &env->bpf_progs.btfs.rb_node;
124
125 while (*p != NULL) {
126 parent = *p;
127 node = rb_entry(parent, struct btf_node, rb_node);
128 if (btf_id < node->id) {
129 p = &(*p)->rb_left;
130 } else if (btf_id > node->id) {
131 p = &(*p)->rb_right;
132 } else {
133 pr_debug("duplicated btf %u\n", btf_id);
134 return false;
135 }
136 }
137
138 rb_link_node(&btf_node->rb_node, parent, p);
139 rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
140 env->bpf_progs.btfs_cnt++;
141 return true;
142 }
143
perf_env__find_btf(struct perf_env * env,__u32 btf_id)144 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
145 {
146 struct btf_node *res;
147
148 down_read(&env->bpf_progs.lock);
149 res = __perf_env__find_btf(env, btf_id);
150 up_read(&env->bpf_progs.lock);
151 return res;
152 }
153
__perf_env__find_btf(struct perf_env * env,__u32 btf_id)154 struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id)
155 {
156 struct btf_node *node = NULL;
157 struct rb_node *n;
158
159 n = env->bpf_progs.btfs.rb_node;
160
161 while (n) {
162 node = rb_entry(n, struct btf_node, rb_node);
163 if (btf_id < node->id)
164 n = n->rb_left;
165 else if (btf_id > node->id)
166 n = n->rb_right;
167 else
168 return node;
169 }
170 return NULL;
171 }
172
173 /* purge data in bpf_progs.infos tree */
perf_env__purge_bpf(struct perf_env * env)174 static void perf_env__purge_bpf(struct perf_env *env)
175 {
176 struct rb_root *root;
177 struct rb_node *next;
178
179 down_write(&env->bpf_progs.lock);
180
181 root = &env->bpf_progs.infos;
182 next = rb_first(root);
183
184 while (next) {
185 struct bpf_prog_info_node *node;
186
187 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
188 next = rb_next(&node->rb_node);
189 rb_erase(&node->rb_node, root);
190 zfree(&node->info_linear);
191 bpf_metadata_free(node->metadata);
192 free(node);
193 }
194
195 env->bpf_progs.infos_cnt = 0;
196
197 root = &env->bpf_progs.btfs;
198 next = rb_first(root);
199
200 while (next) {
201 struct btf_node *node;
202
203 node = rb_entry(next, struct btf_node, rb_node);
204 next = rb_next(&node->rb_node);
205 rb_erase(&node->rb_node, root);
206 free(node);
207 }
208
209 env->bpf_progs.btfs_cnt = 0;
210
211 up_write(&env->bpf_progs.lock);
212 }
213 #else // HAVE_LIBBPF_SUPPORT
perf_env__purge_bpf(struct perf_env * env __maybe_unused)214 static void perf_env__purge_bpf(struct perf_env *env __maybe_unused)
215 {
216 }
217 #endif // HAVE_LIBBPF_SUPPORT
218
perf_env__exit(struct perf_env * env)219 void perf_env__exit(struct perf_env *env)
220 {
221 int i, j;
222
223 perf_env__purge_bpf(env);
224 perf_env__purge_cgroups(env);
225 zfree(&env->hostname);
226 zfree(&env->os_release);
227 zfree(&env->version);
228 zfree(&env->arch);
229 zfree(&env->cpu_desc);
230 zfree(&env->cpuid);
231 zfree(&env->cmdline);
232 zfree(&env->cmdline_argv);
233 zfree(&env->sibling_dies);
234 zfree(&env->sibling_cores);
235 zfree(&env->sibling_threads);
236 zfree(&env->pmu_mappings);
237 zfree(&env->cpu);
238 for (i = 0; i < env->nr_cpu_pmu_caps; i++)
239 zfree(&env->cpu_pmu_caps[i]);
240 zfree(&env->cpu_pmu_caps);
241 zfree(&env->numa_map);
242
243 for (i = 0; i < env->nr_numa_nodes; i++)
244 perf_cpu_map__put(env->numa_nodes[i].map);
245 zfree(&env->numa_nodes);
246
247 for (i = 0; i < env->caches_cnt; i++)
248 cpu_cache_level__free(&env->caches[i]);
249 zfree(&env->caches);
250
251 for (i = 0; i < env->nr_memory_nodes; i++)
252 zfree(&env->memory_nodes[i].set);
253 zfree(&env->memory_nodes);
254
255 for (i = 0; i < env->nr_hybrid_nodes; i++) {
256 zfree(&env->hybrid_nodes[i].pmu_name);
257 zfree(&env->hybrid_nodes[i].cpus);
258 }
259 zfree(&env->hybrid_nodes);
260
261 for (i = 0; i < env->nr_pmus_with_caps; i++) {
262 for (j = 0; j < env->pmu_caps[i].nr_caps; j++)
263 zfree(&env->pmu_caps[i].caps[j]);
264 zfree(&env->pmu_caps[i].caps);
265 zfree(&env->pmu_caps[i].pmu_name);
266 }
267 zfree(&env->pmu_caps);
268 }
269
perf_env__init(struct perf_env * env)270 void perf_env__init(struct perf_env *env)
271 {
272 memset(env, 0, sizeof(*env));
273 #ifdef HAVE_LIBBPF_SUPPORT
274 env->bpf_progs.infos = RB_ROOT;
275 env->bpf_progs.btfs = RB_ROOT;
276 init_rwsem(&env->bpf_progs.lock);
277 #endif
278 env->kernel_is_64_bit = -1;
279 }
280
perf_env__init_kernel_mode(struct perf_env * env)281 static void perf_env__init_kernel_mode(struct perf_env *env)
282 {
283 const char *arch = perf_env__raw_arch(env);
284
285 if (!strncmp(arch, "x86_64", 6) || !strncmp(arch, "aarch64", 7) ||
286 !strncmp(arch, "arm64", 5) || !strncmp(arch, "mips64", 6) ||
287 !strncmp(arch, "parisc64", 8) || !strncmp(arch, "riscv64", 7) ||
288 !strncmp(arch, "s390x", 5) || !strncmp(arch, "sparc64", 7))
289 env->kernel_is_64_bit = 1;
290 else
291 env->kernel_is_64_bit = 0;
292 }
293
perf_env__kernel_is_64_bit(struct perf_env * env)294 int perf_env__kernel_is_64_bit(struct perf_env *env)
295 {
296 if (env->kernel_is_64_bit == -1)
297 perf_env__init_kernel_mode(env);
298
299 return env->kernel_is_64_bit;
300 }
301
perf_env__set_cmdline(struct perf_env * env,int argc,const char * argv[])302 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
303 {
304 int i;
305
306 /* do not include NULL termination */
307 env->cmdline_argv = calloc(argc, sizeof(char *));
308 if (env->cmdline_argv == NULL)
309 goto out_enomem;
310
311 /*
312 * Must copy argv contents because it gets moved around during option
313 * parsing:
314 */
315 for (i = 0; i < argc ; i++) {
316 env->cmdline_argv[i] = argv[i];
317 if (env->cmdline_argv[i] == NULL)
318 goto out_free;
319 }
320
321 env->nr_cmdline = argc;
322
323 return 0;
324 out_free:
325 zfree(&env->cmdline_argv);
326 out_enomem:
327 return -ENOMEM;
328 }
329
perf_env__read_cpu_topology_map(struct perf_env * env)330 int perf_env__read_cpu_topology_map(struct perf_env *env)
331 {
332 int idx, nr_cpus;
333
334 if (env->cpu != NULL)
335 return 0;
336
337 if (env->nr_cpus_avail == 0)
338 env->nr_cpus_avail = cpu__max_present_cpu().cpu;
339
340 nr_cpus = env->nr_cpus_avail;
341 if (nr_cpus == -1)
342 return -EINVAL;
343
344 env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
345 if (env->cpu == NULL)
346 return -ENOMEM;
347
348 for (idx = 0; idx < nr_cpus; ++idx) {
349 struct perf_cpu cpu = { .cpu = idx };
350 int core_id = cpu__get_core_id(cpu);
351 int socket_id = cpu__get_socket_id(cpu);
352 int die_id = cpu__get_die_id(cpu);
353
354 env->cpu[idx].core_id = core_id >= 0 ? core_id : -1;
355 env->cpu[idx].socket_id = socket_id >= 0 ? socket_id : -1;
356 env->cpu[idx].die_id = die_id >= 0 ? die_id : -1;
357 }
358
359 env->nr_cpus_avail = nr_cpus;
360 return 0;
361 }
362
perf_env__read_pmu_mappings(struct perf_env * env)363 int perf_env__read_pmu_mappings(struct perf_env *env)
364 {
365 struct perf_pmu *pmu = NULL;
366 u32 pmu_num = 0;
367 struct strbuf sb;
368
369 while ((pmu = perf_pmus__scan(pmu)))
370 pmu_num++;
371
372 if (!pmu_num) {
373 pr_debug("pmu mappings not available\n");
374 return -ENOENT;
375 }
376 env->nr_pmu_mappings = pmu_num;
377
378 if (strbuf_init(&sb, 128 * pmu_num) < 0)
379 return -ENOMEM;
380
381 while ((pmu = perf_pmus__scan(pmu))) {
382 if (strbuf_addf(&sb, "%u:%s", pmu->type, pmu->name) < 0)
383 goto error;
384 /* include a NULL character at the end */
385 if (strbuf_add(&sb, "", 1) < 0)
386 goto error;
387 }
388
389 env->pmu_mappings = strbuf_detach(&sb, NULL);
390
391 return 0;
392
393 error:
394 strbuf_release(&sb);
395 return -1;
396 }
397
perf_env__read_cpuid(struct perf_env * env)398 int perf_env__read_cpuid(struct perf_env *env)
399 {
400 char cpuid[128];
401 struct perf_cpu cpu = {-1};
402 int err = get_cpuid(cpuid, sizeof(cpuid), cpu);
403
404 if (err)
405 return err;
406
407 free(env->cpuid);
408 env->cpuid = strdup(cpuid);
409 if (env->cpuid == NULL)
410 return ENOMEM;
411 return 0;
412 }
413
perf_env__read_arch(struct perf_env * env)414 static int perf_env__read_arch(struct perf_env *env)
415 {
416 struct utsname uts;
417
418 if (env->arch)
419 return 0;
420
421 if (!uname(&uts))
422 env->arch = strdup(uts.machine);
423
424 return env->arch ? 0 : -ENOMEM;
425 }
426
perf_env__read_nr_cpus_avail(struct perf_env * env)427 static int perf_env__read_nr_cpus_avail(struct perf_env *env)
428 {
429 if (env->nr_cpus_avail == 0)
430 env->nr_cpus_avail = cpu__max_present_cpu().cpu;
431
432 return env->nr_cpus_avail ? 0 : -ENOENT;
433 }
434
__perf_env__read_core_pmu_caps(const struct perf_pmu * pmu,int * nr_caps,char *** caps,unsigned int * max_branches,unsigned int * br_cntr_nr,unsigned int * br_cntr_width)435 static int __perf_env__read_core_pmu_caps(const struct perf_pmu *pmu,
436 int *nr_caps, char ***caps,
437 unsigned int *max_branches,
438 unsigned int *br_cntr_nr,
439 unsigned int *br_cntr_width)
440 {
441 struct perf_pmu_caps *pcaps = NULL;
442 char *ptr, **tmp;
443 int ret = 0;
444
445 *nr_caps = 0;
446 *caps = NULL;
447
448 if (!pmu->nr_caps)
449 return 0;
450
451 *caps = calloc(pmu->nr_caps, sizeof(char *));
452 if (!*caps)
453 return -ENOMEM;
454
455 tmp = *caps;
456 list_for_each_entry(pcaps, &pmu->caps, list) {
457 if (asprintf(&ptr, "%s=%s", pcaps->name, pcaps->value) < 0) {
458 ret = -ENOMEM;
459 goto error;
460 }
461
462 *tmp++ = ptr;
463
464 if (!strcmp(pcaps->name, "branches"))
465 *max_branches = atoi(pcaps->value);
466 else if (!strcmp(pcaps->name, "branch_counter_nr"))
467 *br_cntr_nr = atoi(pcaps->value);
468 else if (!strcmp(pcaps->name, "branch_counter_width"))
469 *br_cntr_width = atoi(pcaps->value);
470 }
471 *nr_caps = pmu->nr_caps;
472 return 0;
473 error:
474 while (tmp-- != *caps)
475 zfree(tmp);
476 zfree(caps);
477 *nr_caps = 0;
478 return ret;
479 }
480
perf_env__read_core_pmu_caps(struct perf_env * env)481 int perf_env__read_core_pmu_caps(struct perf_env *env)
482 {
483 struct pmu_caps *pmu_caps;
484 struct perf_pmu *pmu = NULL;
485 int nr_pmu, i = 0, j;
486 int ret;
487
488 nr_pmu = perf_pmus__num_core_pmus();
489
490 if (!nr_pmu)
491 return -ENODEV;
492
493 if (nr_pmu == 1) {
494 pmu = perf_pmus__find_core_pmu();
495 if (!pmu)
496 return -ENODEV;
497 ret = perf_pmu__caps_parse(pmu);
498 if (ret < 0)
499 return ret;
500 return __perf_env__read_core_pmu_caps(pmu, &env->nr_cpu_pmu_caps,
501 &env->cpu_pmu_caps,
502 &env->max_branches,
503 &env->br_cntr_nr,
504 &env->br_cntr_width);
505 }
506
507 pmu_caps = calloc(nr_pmu, sizeof(*pmu_caps));
508 if (!pmu_caps)
509 return -ENOMEM;
510
511 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
512 if (perf_pmu__caps_parse(pmu) <= 0)
513 continue;
514 ret = __perf_env__read_core_pmu_caps(pmu, &pmu_caps[i].nr_caps,
515 &pmu_caps[i].caps,
516 &pmu_caps[i].max_branches,
517 &pmu_caps[i].br_cntr_nr,
518 &pmu_caps[i].br_cntr_width);
519 if (ret)
520 goto error;
521
522 pmu_caps[i].pmu_name = strdup(pmu->name);
523 if (!pmu_caps[i].pmu_name) {
524 ret = -ENOMEM;
525 goto error;
526 }
527 i++;
528 }
529
530 env->nr_pmus_with_caps = nr_pmu;
531 env->pmu_caps = pmu_caps;
532
533 return 0;
534 error:
535 for (i = 0; i < nr_pmu; i++) {
536 for (j = 0; j < pmu_caps[i].nr_caps; j++)
537 zfree(&pmu_caps[i].caps[j]);
538 zfree(&pmu_caps[i].caps);
539 zfree(&pmu_caps[i].pmu_name);
540 }
541 zfree(&pmu_caps);
542 return ret;
543 }
544
perf_env__raw_arch(struct perf_env * env)545 const char *perf_env__raw_arch(struct perf_env *env)
546 {
547 return env && !perf_env__read_arch(env) ? env->arch : "unknown";
548 }
549
perf_env__nr_cpus_avail(struct perf_env * env)550 int perf_env__nr_cpus_avail(struct perf_env *env)
551 {
552 return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
553 }
554
cpu_cache_level__free(struct cpu_cache_level * cache)555 void cpu_cache_level__free(struct cpu_cache_level *cache)
556 {
557 zfree(&cache->type);
558 zfree(&cache->map);
559 zfree(&cache->size);
560 }
561
562 /*
563 * Return architecture name in a normalized form.
564 * The conversion logic comes from the Makefile.
565 */
normalize_arch(char * arch)566 static const char *normalize_arch(char *arch)
567 {
568 if (!strcmp(arch, "x86_64"))
569 return "x86";
570 if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
571 return "x86";
572 if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
573 return "sparc";
574 if (!strncmp(arch, "aarch64", 7) || !strncmp(arch, "arm64", 5))
575 return "arm64";
576 if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
577 return "arm";
578 if (!strncmp(arch, "s390", 4))
579 return "s390";
580 if (!strncmp(arch, "parisc", 6))
581 return "parisc";
582 if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
583 return "powerpc";
584 if (!strncmp(arch, "mips", 4))
585 return "mips";
586 if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
587 return "sh";
588 if (!strncmp(arch, "loongarch", 9))
589 return "loongarch";
590
591 return arch;
592 }
593
perf_env__arch(struct perf_env * env)594 const char *perf_env__arch(struct perf_env *env)
595 {
596 char *arch_name;
597
598 if (!env || !env->arch) { /* Assume local operation */
599 static struct utsname uts = { .machine[0] = '\0', };
600 if (uts.machine[0] == '\0' && uname(&uts) < 0)
601 return NULL;
602 arch_name = uts.machine;
603 } else
604 arch_name = env->arch;
605
606 return normalize_arch(arch_name);
607 }
608
609 #if defined(HAVE_LIBTRACEEVENT)
610 #include "trace/beauty/arch_errno_names.c"
611 #endif
612
perf_env__arch_strerrno(struct perf_env * env __maybe_unused,int err __maybe_unused)613 const char *perf_env__arch_strerrno(struct perf_env *env __maybe_unused, int err __maybe_unused)
614 {
615 #if defined(HAVE_LIBTRACEEVENT)
616 if (env->arch_strerrno == NULL)
617 env->arch_strerrno = arch_syscalls__strerrno_function(perf_env__arch(env));
618
619 return env->arch_strerrno ? env->arch_strerrno(err) : "no arch specific strerrno function";
620 #else
621 return "!HAVE_LIBTRACEEVENT";
622 #endif
623 }
624
perf_env__cpuid(struct perf_env * env)625 const char *perf_env__cpuid(struct perf_env *env)
626 {
627 int status;
628
629 if (!env->cpuid) { /* Assume local operation */
630 status = perf_env__read_cpuid(env);
631 if (status)
632 return NULL;
633 }
634
635 return env->cpuid;
636 }
637
perf_env__nr_pmu_mappings(struct perf_env * env)638 int perf_env__nr_pmu_mappings(struct perf_env *env)
639 {
640 int status;
641
642 if (!env->nr_pmu_mappings) { /* Assume local operation */
643 status = perf_env__read_pmu_mappings(env);
644 if (status)
645 return 0;
646 }
647
648 return env->nr_pmu_mappings;
649 }
650
perf_env__pmu_mappings(struct perf_env * env)651 const char *perf_env__pmu_mappings(struct perf_env *env)
652 {
653 int status;
654
655 if (!env->pmu_mappings) { /* Assume local operation */
656 status = perf_env__read_pmu_mappings(env);
657 if (status)
658 return NULL;
659 }
660
661 return env->pmu_mappings;
662 }
663
perf_env__numa_node(struct perf_env * env,struct perf_cpu cpu)664 int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu)
665 {
666 if (!env->nr_numa_map) {
667 struct numa_node *nn;
668 int i, nr = 0;
669
670 for (i = 0; i < env->nr_numa_nodes; i++) {
671 nn = &env->numa_nodes[i];
672 nr = max(nr, (int)perf_cpu_map__max(nn->map).cpu);
673 }
674
675 nr++;
676
677 /*
678 * We initialize the numa_map array to prepare
679 * it for missing cpus, which return node -1
680 */
681 env->numa_map = malloc(nr * sizeof(int));
682 if (!env->numa_map)
683 return -1;
684
685 for (i = 0; i < nr; i++)
686 env->numa_map[i] = -1;
687
688 env->nr_numa_map = nr;
689
690 for (i = 0; i < env->nr_numa_nodes; i++) {
691 struct perf_cpu tmp;
692 int j;
693
694 nn = &env->numa_nodes[i];
695 perf_cpu_map__for_each_cpu(tmp, j, nn->map)
696 env->numa_map[tmp.cpu] = i;
697 }
698 }
699
700 return cpu.cpu >= 0 && cpu.cpu < env->nr_numa_map ? env->numa_map[cpu.cpu] : -1;
701 }
702
perf_env__has_pmu_mapping(struct perf_env * env,const char * pmu_name)703 bool perf_env__has_pmu_mapping(struct perf_env *env, const char *pmu_name)
704 {
705 char *pmu_mapping = env->pmu_mappings, *colon;
706
707 for (int i = 0; i < env->nr_pmu_mappings; ++i) {
708 if (strtoul(pmu_mapping, &colon, 0) == ULONG_MAX || *colon != ':')
709 goto out_error;
710
711 pmu_mapping = colon + 1;
712 if (strcmp(pmu_mapping, pmu_name) == 0)
713 return true;
714
715 pmu_mapping += strlen(pmu_mapping) + 1;
716 }
717 out_error:
718 return false;
719 }
720
perf_env__find_pmu_cap(struct perf_env * env,const char * pmu_name,const char * cap)721 char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
722 const char *cap)
723 {
724 char *cap_eq;
725 int cap_size;
726 char **ptr;
727 int i, j;
728
729 if (!pmu_name || !cap)
730 return NULL;
731
732 cap_size = strlen(cap);
733 cap_eq = zalloc(cap_size + 2);
734 if (!cap_eq)
735 return NULL;
736
737 memcpy(cap_eq, cap, cap_size);
738 cap_eq[cap_size] = '=';
739
740 if (!strcmp(pmu_name, "cpu")) {
741 for (i = 0; i < env->nr_cpu_pmu_caps; i++) {
742 if (!strncmp(env->cpu_pmu_caps[i], cap_eq, cap_size + 1)) {
743 free(cap_eq);
744 return &env->cpu_pmu_caps[i][cap_size + 1];
745 }
746 }
747 goto out;
748 }
749
750 for (i = 0; i < env->nr_pmus_with_caps; i++) {
751 if (strcmp(env->pmu_caps[i].pmu_name, pmu_name))
752 continue;
753
754 ptr = env->pmu_caps[i].caps;
755
756 for (j = 0; j < env->pmu_caps[i].nr_caps; j++) {
757 if (!strncmp(ptr[j], cap_eq, cap_size + 1)) {
758 free(cap_eq);
759 return &ptr[j][cap_size + 1];
760 }
761 }
762 }
763
764 out:
765 free(cap_eq);
766 return NULL;
767 }
768
perf_env__find_br_cntr_info(struct perf_env * env,unsigned int * nr,unsigned int * width)769 void perf_env__find_br_cntr_info(struct perf_env *env,
770 unsigned int *nr,
771 unsigned int *width)
772 {
773 if (nr) {
774 *nr = env->cpu_pmu_caps ? env->br_cntr_nr :
775 env->pmu_caps->br_cntr_nr;
776 }
777
778 if (width) {
779 *width = env->cpu_pmu_caps ? env->br_cntr_width :
780 env->pmu_caps->br_cntr_width;
781 }
782 }
783
perf_env__is_x86_amd_cpu(struct perf_env * env)784 bool perf_env__is_x86_amd_cpu(struct perf_env *env)
785 {
786 static int is_amd; /* 0: Uninitialized, 1: Yes, -1: No */
787
788 if (is_amd == 0)
789 is_amd = env->cpuid && strstarts(env->cpuid, "AuthenticAMD") ? 1 : -1;
790
791 return is_amd >= 1 ? true : false;
792 }
793
x86__is_amd_cpu(void)794 bool x86__is_amd_cpu(void)
795 {
796 struct perf_env env = { .total_mem = 0, };
797 bool is_amd;
798
799 perf_env__cpuid(&env);
800 is_amd = perf_env__is_x86_amd_cpu(&env);
801 perf_env__exit(&env);
802
803 return is_amd;
804 }
805