Lines Matching refs:env
4 #include "env.h"
27 bool perf_env__insert_bpf_prog_info(struct perf_env *env,
32 down_write(&env->bpf_progs.lock);
33 ret = __perf_env__insert_bpf_prog_info(env, info_node);
34 up_write(&env->bpf_progs.lock);
39 bool __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
46 p = &env->bpf_progs.infos.rb_node;
62 rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
63 env->bpf_progs.infos_cnt++;
67 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
73 down_read(&env->bpf_progs.lock);
74 n = env->bpf_progs.infos.rb_node;
88 up_read(&env->bpf_progs.lock);
92 bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
96 down_write(&env->bpf_progs.lock);
97 ret = __perf_env__insert_btf(env, btf_node);
98 up_write(&env->bpf_progs.lock);
102 bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
109 p = &env->bpf_progs.btfs.rb_node;
125 rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
126 env->bpf_progs.btfs_cnt++;
130 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
134 down_read(&env->bpf_progs.lock);
135 res = __perf_env__find_btf(env, btf_id);
136 up_read(&env->bpf_progs.lock);
140 struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id)
145 n = env->bpf_progs.btfs.rb_node;
160 static void perf_env__purge_bpf(struct perf_env *env)
165 down_write(&env->bpf_progs.lock);
167 root = &env->bpf_progs.infos;
180 env->bpf_progs.infos_cnt = 0;
182 root = &env->bpf_progs.btfs;
194 env->bpf_progs.btfs_cnt = 0;
196 up_write(&env->bpf_progs.lock);
199 static void perf_env__purge_bpf(struct perf_env *env __maybe_unused)
204 void perf_env__exit(struct perf_env *env)
208 perf_env__purge_bpf(env);
209 perf_env__purge_cgroups(env);
210 zfree(&env->hostname);
211 zfree(&env->os_release);
212 zfree(&env->version);
213 zfree(&env->arch);
214 zfree(&env->cpu_desc);
215 zfree(&env->cpuid);
216 zfree(&env->cmdline);
217 zfree(&env->cmdline_argv);
218 zfree(&env->sibling_dies);
219 zfree(&env->sibling_cores);
220 zfree(&env->sibling_threads);
221 zfree(&env->pmu_mappings);
222 zfree(&env->cpu);
223 for (i = 0; i < env->nr_cpu_pmu_caps; i++)
224 zfree(&env->cpu_pmu_caps[i]);
225 zfree(&env->cpu_pmu_caps);
226 zfree(&env->numa_map);
228 for (i = 0; i < env->nr_numa_nodes; i++)
229 perf_cpu_map__put(env->numa_nodes[i].map);
230 zfree(&env->numa_nodes);
232 for (i = 0; i < env->caches_cnt; i++)
233 cpu_cache_level__free(&env->caches[i]);
234 zfree(&env->caches);
236 for (i = 0; i < env->nr_memory_nodes; i++)
237 zfree(&env->memory_nodes[i].set);
238 zfree(&env->memory_nodes);
240 for (i = 0; i < env->nr_hybrid_nodes; i++) {
241 zfree(&env->hybrid_nodes[i].pmu_name);
242 zfree(&env->hybrid_nodes[i].cpus);
244 zfree(&env->hybrid_nodes);
246 for (i = 0; i < env->nr_pmus_with_caps; i++) {
247 for (j = 0; j < env->pmu_caps[i].nr_caps; j++)
248 zfree(&env->pmu_caps[i].caps[j]);
249 zfree(&env->pmu_caps[i].caps);
250 zfree(&env->pmu_caps[i].pmu_name);
252 zfree(&env->pmu_caps);
255 void perf_env__init(struct perf_env *env)
258 env->bpf_progs.infos = RB_ROOT;
259 env->bpf_progs.btfs = RB_ROOT;
260 init_rwsem(&env->bpf_progs.lock);
262 env->kernel_is_64_bit = -1;
265 static void perf_env__init_kernel_mode(struct perf_env *env)
267 const char *arch = perf_env__raw_arch(env);
273 env->kernel_is_64_bit = 1;
275 env->kernel_is_64_bit = 0;
278 int perf_env__kernel_is_64_bit(struct perf_env *env)
280 if (env->kernel_is_64_bit == -1)
281 perf_env__init_kernel_mode(env);
283 return env->kernel_is_64_bit;
286 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
291 env->cmdline_argv = calloc(argc, sizeof(char *));
292 if (env->cmdline_argv == NULL)
300 env->cmdline_argv[i] = argv[i];
301 if (env->cmdline_argv[i] == NULL)
305 env->nr_cmdline = argc;
309 zfree(&env->cmdline_argv);
314 int perf_env__read_cpu_topology_map(struct perf_env *env)
318 if (env->cpu != NULL)
321 if (env->nr_cpus_avail == 0)
322 env->nr_cpus_avail = cpu__max_present_cpu().cpu;
324 nr_cpus = env->nr_cpus_avail;
328 env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
329 if (env->cpu == NULL)
338 env->cpu[idx].core_id = core_id >= 0 ? core_id : -1;
339 env->cpu[idx].socket_id = socket_id >= 0 ? socket_id : -1;
340 env->cpu[idx].die_id = die_id >= 0 ? die_id : -1;
343 env->nr_cpus_avail = nr_cpus;
347 int perf_env__read_pmu_mappings(struct perf_env *env)
360 env->nr_pmu_mappings = pmu_num;
373 env->pmu_mappings = strbuf_detach(&sb, NULL);
382 int perf_env__read_cpuid(struct perf_env *env)
391 free(env->cpuid);
392 env->cpuid = strdup(cpuid);
393 if (env->cpuid == NULL)
398 static int perf_env__read_arch(struct perf_env *env)
402 if (env->arch)
406 env->arch = strdup(uts.machine);
408 return env->arch ? 0 : -ENOMEM;
411 static int perf_env__read_nr_cpus_avail(struct perf_env *env)
413 if (env->nr_cpus_avail == 0)
414 env->nr_cpus_avail = cpu__max_present_cpu().cpu;
416 return env->nr_cpus_avail ? 0 : -ENOENT;
419 const char *perf_env__raw_arch(struct perf_env *env)
421 return env && !perf_env__read_arch(env) ? env->arch : "unknown";
424 int perf_env__nr_cpus_avail(struct perf_env *env)
426 return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
468 const char *perf_env__arch(struct perf_env *env)
472 if (!env || !env->arch) { /* Assume local operation */
478 arch_name = env->arch;
487 const char *perf_env__arch_strerrno(struct perf_env *env __maybe_unused, int err __maybe_unused)
490 if (env->arch_strerrno == NULL)
491 env->arch_strerrno = arch_syscalls__strerrno_function(perf_env__arch(env));
493 return env->arch_strerrno ? env->arch_strerrno(err) : "no arch specific strerrno function";
499 const char *perf_env__cpuid(struct perf_env *env)
503 if (!env->cpuid) { /* Assume local operation */
504 status = perf_env__read_cpuid(env);
509 return env->cpuid;
512 int perf_env__nr_pmu_mappings(struct perf_env *env)
516 if (!env->nr_pmu_mappings) { /* Assume local operation */
517 status = perf_env__read_pmu_mappings(env);
522 return env->nr_pmu_mappings;
525 const char *perf_env__pmu_mappings(struct perf_env *env)
529 if (!env->pmu_mappings) { /* Assume local operation */
530 status = perf_env__read_pmu_mappings(env);
535 return env->pmu_mappings;
538 int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu)
540 if (!env->nr_numa_map) {
544 for (i = 0; i < env->nr_numa_nodes; i++) {
545 nn = &env->numa_nodes[i];
555 env->numa_map = malloc(nr * sizeof(int));
556 if (!env->numa_map)
560 env->numa_map[i] = -1;
562 env->nr_numa_map = nr;
564 for (i = 0; i < env->nr_numa_nodes; i++) {
568 nn = &env->numa_nodes[i];
570 env->numa_map[tmp.cpu] = i;
574 return cpu.cpu >= 0 && cpu.cpu < env->nr_numa_map ? env->numa_map[cpu.cpu] : -1;
577 bool perf_env__has_pmu_mapping(struct perf_env *env, const char *pmu_name)
579 char *pmu_mapping = env->pmu_mappings, *colon;
581 for (int i = 0; i < env->nr_pmu_mappings; ++i) {
595 char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
615 for (i = 0; i < env->nr_cpu_pmu_caps; i++) {
616 if (!strncmp(env->cpu_pmu_caps[i], cap_eq, cap_size + 1)) {
618 return &env->cpu_pmu_caps[i][cap_size + 1];
624 for (i = 0; i < env->nr_pmus_with_caps; i++) {
625 if (strcmp(env->pmu_caps[i].pmu_name, pmu_name))
628 ptr = env->pmu_caps[i].caps;
630 for (j = 0; j < env->pmu_caps[i].nr_caps; j++) {
643 void perf_env__find_br_cntr_info(struct perf_env *env,
648 *nr = env->cpu_pmu_caps ? env->br_cntr_nr :
649 env->pmu_caps->br_cntr_nr;
653 *width = env->cpu_pmu_caps ? env->br_cntr_width :
654 env->pmu_caps->br_cntr_width;
658 bool perf_env__is_x86_amd_cpu(struct perf_env *env)
663 is_amd = env->cpuid && strstarts(env->cpuid, "AuthenticAMD") ? 1 : -1;
670 struct perf_env env = { .total_mem = 0, };
673 perf_env__cpuid(&env);
674 is_amd = perf_env__is_x86_amd_cpu(&env);
675 perf_env__exit(&env);