1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __PERF_ENV_H 3 #define __PERF_ENV_H 4 5 #include <linux/types.h> 6 #include <linux/rbtree.h> 7 #include "cpumap.h" 8 #include "rwsem.h" 9 10 struct perf_cpu_map; 11 12 struct cpu_topology_map { 13 int socket_id; 14 int die_id; 15 int cluster_id; 16 int core_id; 17 }; 18 19 struct cpu_cache_level { 20 u32 level; 21 u32 line_size; 22 u32 sets; 23 u32 ways; 24 char *type; 25 char *size; 26 char *map; 27 }; 28 29 struct numa_node { 30 u32 node; 31 u64 mem_total; 32 u64 mem_free; 33 struct perf_cpu_map *map; 34 }; 35 36 struct memory_node { 37 u64 node; 38 u64 size; 39 unsigned long *set; 40 }; 41 42 struct hybrid_node { 43 char *pmu_name; 44 char *cpus; 45 }; 46 47 struct pmu_caps { 48 int nr_caps; 49 unsigned int max_branches; 50 unsigned int br_cntr_nr; 51 unsigned int br_cntr_width; 52 53 char **caps; 54 char *pmu_name; 55 }; 56 57 struct domain_info { 58 u32 domain; 59 char *dname; 60 char *cpumask; 61 char *cpulist; 62 }; 63 64 struct cpu_domain_map { 65 u32 cpu; 66 u32 nr_domains; 67 struct domain_info **domains; 68 }; 69 70 typedef const char *(arch_syscalls__strerrno_t)(int err); 71 72 struct perf_env { 73 char *hostname; 74 char *os_release; 75 char *version; 76 char *arch; 77 int nr_cpus_online; 78 int nr_cpus_avail; 79 char *cpu_desc; 80 char *cpuid; 81 unsigned long long total_mem; 82 unsigned int msr_pmu_type; 83 unsigned int max_branches; 84 unsigned int br_cntr_nr; 85 unsigned int br_cntr_width; 86 unsigned int schedstat_version; 87 unsigned int max_sched_domains; 88 int kernel_is_64_bit; 89 90 int nr_cmdline; 91 int nr_sibling_cores; 92 int nr_sibling_dies; 93 int nr_sibling_threads; 94 int nr_numa_nodes; 95 int nr_memory_nodes; 96 int nr_pmu_mappings; 97 int nr_groups; 98 int nr_cpu_pmu_caps; 99 int nr_hybrid_nodes; 100 int nr_pmus_with_caps; 101 char *cmdline; 102 const char **cmdline_argv; 103 char *sibling_cores; 104 char *sibling_dies; 105 char *sibling_threads; 106 char *pmu_mappings; 107 char **cpu_pmu_caps; 108 struct cpu_topology_map *cpu; 109 struct cpu_cache_level *caches; 110 struct cpu_domain_map **cpu_domain; 111 int caches_cnt; 112 u32 comp_ratio; 113 u32 comp_ver; 114 u32 comp_type; 115 u32 comp_level; 116 u32 comp_mmap_len; 117 struct numa_node *numa_nodes; 118 struct memory_node *memory_nodes; 119 unsigned long long memory_bsize; 120 struct hybrid_node *hybrid_nodes; 121 struct pmu_caps *pmu_caps; 122 #ifdef HAVE_LIBBPF_SUPPORT 123 /* 124 * bpf_info_lock protects bpf rbtrees. This is needed because the 125 * trees are accessed by different threads in perf-top 126 */ 127 struct { 128 struct rw_semaphore lock; 129 struct rb_root infos; 130 u32 infos_cnt; 131 struct rb_root btfs; 132 u32 btfs_cnt; 133 } bpf_progs; 134 #endif // HAVE_LIBBPF_SUPPORT 135 /* same reason as above (for perf-top) */ 136 struct { 137 struct rw_semaphore lock; 138 struct rb_root tree; 139 } cgroups; 140 141 /* For fast cpu to numa node lookup via perf_env__numa_node */ 142 int *numa_map; 143 int nr_numa_map; 144 145 /* For real clock time reference. */ 146 struct { 147 u64 tod_ns; 148 u64 clockid_ns; 149 u64 clockid_res_ns; 150 int clockid; 151 /* 152 * enabled is valid for report mode, and is true if above 153 * values are set, it's set in process_clock_data 154 */ 155 bool enabled; 156 } clock; 157 arch_syscalls__strerrno_t *arch_strerrno; 158 }; 159 160 enum perf_compress_type { 161 PERF_COMP_NONE = 0, 162 PERF_COMP_ZSTD, 163 PERF_COMP_MAX 164 }; 165 166 struct bpf_prog_info_node; 167 struct btf_node; 168 169 int perf_env__read_core_pmu_caps(struct perf_env *env); 170 void free_cpu_domain_info(struct cpu_domain_map **cd_map, u32 schedstat_version, u32 nr); 171 void perf_env__exit(struct perf_env *env); 172 173 int perf_env__kernel_is_64_bit(struct perf_env *env); 174 175 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]); 176 177 int perf_env__read_cpuid(struct perf_env *env); 178 int perf_env__read_pmu_mappings(struct perf_env *env); 179 int perf_env__nr_pmu_mappings(struct perf_env *env); 180 const char *perf_env__pmu_mappings(struct perf_env *env); 181 182 int perf_env__read_cpu_topology_map(struct perf_env *env); 183 184 void cpu_cache_level__free(struct cpu_cache_level *cache); 185 186 const char *perf_env__arch(struct perf_env *env); 187 const char *perf_env__arch_strerrno(struct perf_env *env, int err); 188 const char *perf_env__cpuid(struct perf_env *env); 189 const char *perf_env__raw_arch(struct perf_env *env); 190 int perf_env__nr_cpus_avail(struct perf_env *env); 191 192 void perf_env__init(struct perf_env *env); 193 #ifdef HAVE_LIBBPF_SUPPORT 194 bool __perf_env__insert_bpf_prog_info(struct perf_env *env, 195 struct bpf_prog_info_node *info_node); 196 bool perf_env__insert_bpf_prog_info(struct perf_env *env, 197 struct bpf_prog_info_node *info_node); 198 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env, 199 __u32 prog_id); 200 void perf_env__iterate_bpf_prog_info(struct perf_env *env, 201 void (*cb)(struct bpf_prog_info_node *node, 202 void *data), 203 void *data); 204 bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node); 205 bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node); 206 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id); 207 struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id); 208 #endif // HAVE_LIBBPF_SUPPORT 209 210 int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu); 211 char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name, 212 const char *cap); 213 214 bool perf_env__has_pmu_mapping(struct perf_env *env, const char *pmu_name); 215 void perf_env__find_br_cntr_info(struct perf_env *env, 216 unsigned int *nr, 217 unsigned int *width); 218 219 bool x86__is_amd_cpu(void); 220 bool perf_env__is_x86_amd_cpu(struct perf_env *env); 221 bool x86__is_intel_cpu(void); 222 bool perf_env__is_x86_intel_cpu(struct perf_env *env); 223 224 #endif /* __PERF_ENV_H */ 225