xref: /linux/tools/perf/util/env.h (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __PERF_ENV_H
3 #define __PERF_ENV_H
4 
5 #include <linux/types.h>
6 #include <linux/rbtree.h>
7 #include "cpumap.h"
8 #include "rwsem.h"
9 
10 struct perf_cpu_map;
11 
12 struct cpu_topology_map {
13 	int	socket_id;
14 	int	die_id;
15 	int	cluster_id;
16 	int	core_id;
17 };
18 
19 struct cpu_cache_level {
20 	u32	level;
21 	u32	line_size;
22 	u32	sets;
23 	u32	ways;
24 	char	*type;
25 	char	*size;
26 	char	*map;
27 };
28 
29 struct numa_node {
30 	u32		 node;
31 	u64		 mem_total;
32 	u64		 mem_free;
33 	struct perf_cpu_map	*map;
34 };
35 
36 struct memory_node {
37 	u64		 node;
38 	u64		 size;
39 	unsigned long	*set;
40 };
41 
42 struct hybrid_node {
43 	char	*pmu_name;
44 	char	*cpus;
45 };
46 
47 struct pmu_caps {
48 	int		nr_caps;
49 	unsigned int    max_branches;
50 	unsigned int	br_cntr_nr;
51 	unsigned int	br_cntr_width;
52 
53 	char            **caps;
54 	char            *pmu_name;
55 };
56 
57 struct domain_info {
58 	u32	domain;
59 	char	*dname;
60 	char	*cpumask;
61 	char	*cpulist;
62 };
63 
64 struct cpu_domain_map {
65 	u32			cpu;
66 	u32			nr_domains;
67 	struct domain_info	**domains;
68 };
69 
70 typedef const char *(arch_syscalls__strerrno_t)(int err);
71 
72 struct perf_env {
73 	char			*hostname;
74 	char			*os_release;
75 	char			*version;
76 	char			*arch;
77 	/* e_machine expanded from 16 to 32-bits for alignment. */
78 	u32			e_machine;
79 	u32			e_flags;
80 	int			nr_cpus_online;
81 	int			nr_cpus_avail;
82 	char			*cpu_desc;
83 	char			*cpuid;
84 	unsigned long long	total_mem;
85 	unsigned int		msr_pmu_type;
86 	unsigned int		max_branches;
87 	unsigned int		br_cntr_nr;
88 	unsigned int		br_cntr_width;
89 	unsigned int		schedstat_version;
90 	unsigned int		max_sched_domains;
91 	int			kernel_is_64_bit;
92 
93 	int			nr_cmdline;
94 	int			nr_sibling_cores;
95 	int			nr_sibling_dies;
96 	int			nr_sibling_threads;
97 	int			nr_numa_nodes;
98 	int			nr_memory_nodes;
99 	int			nr_pmu_mappings;
100 	int			nr_groups;
101 	int			nr_cpu_pmu_caps;
102 	int			nr_hybrid_nodes;
103 	int			nr_pmus_with_caps;
104 	char			*cmdline;
105 	const char		**cmdline_argv;
106 	char			*sibling_cores;
107 	char			*sibling_dies;
108 	char			*sibling_threads;
109 	char			*pmu_mappings;
110 	char			**cpu_pmu_caps;
111 	struct cpu_topology_map	*cpu;
112 	struct cpu_cache_level	*caches;
113 	struct cpu_domain_map	**cpu_domain;
114 	int			 caches_cnt;
115 	u32			comp_ratio;
116 	u32			comp_ver;
117 	u32			comp_type;
118 	u32			comp_level;
119 	u32			comp_mmap_len;
120 	struct numa_node	*numa_nodes;
121 	struct memory_node	*memory_nodes;
122 	unsigned long long	 memory_bsize;
123 	struct hybrid_node	*hybrid_nodes;
124 	struct pmu_caps		*pmu_caps;
125 #ifdef HAVE_LIBBPF_SUPPORT
126 	/*
127 	 * bpf_info_lock protects bpf rbtrees. This is needed because the
128 	 * trees are accessed by different threads in perf-top
129 	 */
130 	struct {
131 		struct rw_semaphore	lock;
132 		struct rb_root		infos;
133 		u32			infos_cnt;
134 		struct rb_root		btfs;
135 		u32			btfs_cnt;
136 	} bpf_progs;
137 #endif // HAVE_LIBBPF_SUPPORT
138 	/* same reason as above (for perf-top) */
139 	struct {
140 		struct rw_semaphore	lock;
141 		struct rb_root		tree;
142 	} cgroups;
143 
144 	/* For fast cpu to numa node lookup via perf_env__numa_node */
145 	int			*numa_map;
146 	int			 nr_numa_map;
147 
148 	/* For real clock time reference. */
149 	struct {
150 		u64	tod_ns;
151 		u64	clockid_ns;
152 		u64     clockid_res_ns;
153 		int	clockid;
154 		/*
155 		 * enabled is valid for report mode, and is true if above
156 		 * values are set, it's set in process_clock_data
157 		 */
158 		bool	enabled;
159 	} clock;
160 	arch_syscalls__strerrno_t *arch_strerrno;
161 };
162 
163 enum perf_compress_type {
164 	PERF_COMP_NONE = 0,
165 	PERF_COMP_ZSTD,
166 	PERF_COMP_MAX
167 };
168 
169 struct bpf_prog_info_node;
170 struct btf_node;
171 
172 int perf_env__read_core_pmu_caps(struct perf_env *env);
173 void free_cpu_domain_info(struct cpu_domain_map **cd_map, u32 schedstat_version, u32 nr);
174 void perf_env__exit(struct perf_env *env);
175 
176 int perf_env__kernel_is_64_bit(struct perf_env *env);
177 
178 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]);
179 
180 int perf_env__read_cpuid(struct perf_env *env);
181 int perf_env__read_pmu_mappings(struct perf_env *env);
182 int perf_env__nr_pmu_mappings(struct perf_env *env);
183 const char *perf_env__pmu_mappings(struct perf_env *env);
184 
185 int perf_env__read_cpu_topology_map(struct perf_env *env);
186 
187 void cpu_cache_level__free(struct cpu_cache_level *cache);
188 
189 const char *perf_env__arch(struct perf_env *env);
190 const char *perf_env__arch_strerrno(struct perf_env *env, int err);
191 const char *perf_env__cpuid(struct perf_env *env);
192 const char *perf_env__raw_arch(struct perf_env *env);
193 int perf_env__nr_cpus_avail(struct perf_env *env);
194 
195 void perf_env__init(struct perf_env *env);
196 #ifdef HAVE_LIBBPF_SUPPORT
197 bool __perf_env__insert_bpf_prog_info(struct perf_env *env,
198 				      struct bpf_prog_info_node *info_node);
199 bool perf_env__insert_bpf_prog_info(struct perf_env *env,
200 				    struct bpf_prog_info_node *info_node);
201 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
202 							__u32 prog_id);
203 void perf_env__iterate_bpf_prog_info(struct perf_env *env,
204 				     void (*cb)(struct bpf_prog_info_node *node,
205 						void *data),
206 				     void *data);
207 bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
208 bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
209 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
210 struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id);
211 #endif // HAVE_LIBBPF_SUPPORT
212 
213 int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu);
214 char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
215 			     const char *cap);
216 
217 bool perf_env__has_pmu_mapping(struct perf_env *env, const char *pmu_name);
218 void perf_env__find_br_cntr_info(struct perf_env *env,
219 				 unsigned int *nr,
220 				 unsigned int *width);
221 
222 bool x86__is_amd_cpu(void);
223 bool perf_env__is_x86_amd_cpu(struct perf_env *env);
224 bool x86__is_intel_cpu(void);
225 bool perf_env__is_x86_intel_cpu(struct perf_env *env);
226 
227 #endif /* __PERF_ENV_H */
228