1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __PERF_ENV_H 3 #define __PERF_ENV_H 4 5 #include <linux/types.h> 6 #include <linux/rbtree.h> 7 #include "cpumap.h" 8 #include "rwsem.h" 9 10 struct perf_cpu_map; 11 12 struct cpu_topology_map { 13 int socket_id; 14 int die_id; 15 int core_id; 16 }; 17 18 struct cpu_cache_level { 19 u32 level; 20 u32 line_size; 21 u32 sets; 22 u32 ways; 23 char *type; 24 char *size; 25 char *map; 26 }; 27 28 struct numa_node { 29 u32 node; 30 u64 mem_total; 31 u64 mem_free; 32 struct perf_cpu_map *map; 33 }; 34 35 struct memory_node { 36 u64 node; 37 u64 size; 38 unsigned long *set; 39 }; 40 41 struct hybrid_node { 42 char *pmu_name; 43 char *cpus; 44 }; 45 46 struct pmu_caps { 47 int nr_caps; 48 unsigned int max_branches; 49 unsigned int br_cntr_nr; 50 unsigned int br_cntr_width; 51 52 char **caps; 53 char *pmu_name; 54 }; 55 56 typedef const char *(arch_syscalls__strerrno_t)(int err); 57 58 arch_syscalls__strerrno_t *arch_syscalls__strerrno_function(const char *arch); 59 60 struct perf_env { 61 char *hostname; 62 char *os_release; 63 char *version; 64 char *arch; 65 int nr_cpus_online; 66 int nr_cpus_avail; 67 char *cpu_desc; 68 char *cpuid; 69 unsigned long long total_mem; 70 unsigned int msr_pmu_type; 71 unsigned int max_branches; 72 unsigned int br_cntr_nr; 73 unsigned int br_cntr_width; 74 int kernel_is_64_bit; 75 76 int nr_cmdline; 77 int nr_sibling_cores; 78 int nr_sibling_dies; 79 int nr_sibling_threads; 80 int nr_numa_nodes; 81 int nr_memory_nodes; 82 int nr_pmu_mappings; 83 int nr_groups; 84 int nr_cpu_pmu_caps; 85 int nr_hybrid_nodes; 86 int nr_pmus_with_caps; 87 char *cmdline; 88 const char **cmdline_argv; 89 char *sibling_cores; 90 char *sibling_dies; 91 char *sibling_threads; 92 char *pmu_mappings; 93 char **cpu_pmu_caps; 94 struct cpu_topology_map *cpu; 95 struct cpu_cache_level *caches; 96 int caches_cnt; 97 u32 comp_ratio; 98 u32 comp_ver; 99 u32 comp_type; 100 u32 comp_level; 101 u32 comp_mmap_len; 102 struct numa_node *numa_nodes; 103 struct memory_node *memory_nodes; 104 unsigned long long memory_bsize; 105 struct hybrid_node *hybrid_nodes; 106 struct pmu_caps *pmu_caps; 107 #ifdef HAVE_LIBBPF_SUPPORT 108 /* 109 * bpf_info_lock protects bpf rbtrees. This is needed because the 110 * trees are accessed by different threads in perf-top 111 */ 112 struct { 113 struct rw_semaphore lock; 114 struct rb_root infos; 115 u32 infos_cnt; 116 struct rb_root btfs; 117 u32 btfs_cnt; 118 } bpf_progs; 119 #endif // HAVE_LIBBPF_SUPPORT 120 /* same reason as above (for perf-top) */ 121 struct { 122 struct rw_semaphore lock; 123 struct rb_root tree; 124 } cgroups; 125 126 /* For fast cpu to numa node lookup via perf_env__numa_node */ 127 int *numa_map; 128 int nr_numa_map; 129 130 /* For real clock time reference. */ 131 struct { 132 u64 tod_ns; 133 u64 clockid_ns; 134 u64 clockid_res_ns; 135 int clockid; 136 /* 137 * enabled is valid for report mode, and is true if above 138 * values are set, it's set in process_clock_data 139 */ 140 bool enabled; 141 } clock; 142 arch_syscalls__strerrno_t *arch_strerrno; 143 }; 144 145 enum perf_compress_type { 146 PERF_COMP_NONE = 0, 147 PERF_COMP_ZSTD, 148 PERF_COMP_MAX 149 }; 150 151 struct bpf_prog_info_node; 152 struct btf_node; 153 154 extern struct perf_env perf_env; 155 156 void perf_env__exit(struct perf_env *env); 157 158 int perf_env__kernel_is_64_bit(struct perf_env *env); 159 160 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]); 161 162 int perf_env__read_cpuid(struct perf_env *env); 163 int perf_env__read_pmu_mappings(struct perf_env *env); 164 int perf_env__nr_pmu_mappings(struct perf_env *env); 165 const char *perf_env__pmu_mappings(struct perf_env *env); 166 167 int perf_env__read_cpu_topology_map(struct perf_env *env); 168 169 void cpu_cache_level__free(struct cpu_cache_level *cache); 170 171 const char *perf_env__arch(struct perf_env *env); 172 const char *perf_env__arch_strerrno(struct perf_env *env, int err); 173 const char *perf_env__cpuid(struct perf_env *env); 174 const char *perf_env__raw_arch(struct perf_env *env); 175 int perf_env__nr_cpus_avail(struct perf_env *env); 176 177 void perf_env__init(struct perf_env *env); 178 void __perf_env__insert_bpf_prog_info(struct perf_env *env, 179 struct bpf_prog_info_node *info_node); 180 void perf_env__insert_bpf_prog_info(struct perf_env *env, 181 struct bpf_prog_info_node *info_node); 182 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env, 183 __u32 prog_id); 184 bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node); 185 bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node); 186 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id); 187 struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id); 188 189 int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu); 190 char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name, 191 const char *cap); 192 193 bool perf_env__has_pmu_mapping(struct perf_env *env, const char *pmu_name); 194 #endif /* __PERF_ENV_H */ 195