xref: /linux/tools/perf/util/env.h (revision a6021aa24f6417416d93318bbfa022ab229c33c8)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __PERF_ENV_H
3 #define __PERF_ENV_H
4 
5 #include <linux/types.h>
6 #include <linux/rbtree.h>
7 #include "cpumap.h"
8 #include "rwsem.h"
9 
10 struct perf_cpu_map;
11 
12 struct cpu_topology_map {
13 	int	socket_id;
14 	int	die_id;
15 	int	cluster_id;
16 	int	core_id;
17 };
18 
19 struct cpu_cache_level {
20 	u32	level;
21 	u32	line_size;
22 	u32	sets;
23 	u32	ways;
24 	char	*type;
25 	char	*size;
26 	char	*map;
27 };
28 
29 struct numa_node {
30 	u32		 node;
31 	u64		 mem_total;
32 	u64		 mem_free;
33 	struct perf_cpu_map	*map;
34 };
35 
36 struct memory_node {
37 	u64		 node;
38 	u64		 size;
39 	unsigned long	*set;
40 };
41 
42 struct hybrid_node {
43 	char	*pmu_name;
44 	char	*cpus;
45 };
46 
47 struct pmu_caps {
48 	int		nr_caps;
49 	unsigned int    max_branches;
50 	unsigned int	br_cntr_nr;
51 	unsigned int	br_cntr_width;
52 
53 	char            **caps;
54 	char            *pmu_name;
55 };
56 
57 typedef const char *(arch_syscalls__strerrno_t)(int err);
58 
59 arch_syscalls__strerrno_t *arch_syscalls__strerrno_function(const char *arch);
60 
61 struct perf_env {
62 	char			*hostname;
63 	char			*os_release;
64 	char			*version;
65 	char			*arch;
66 	int			nr_cpus_online;
67 	int			nr_cpus_avail;
68 	char			*cpu_desc;
69 	char			*cpuid;
70 	unsigned long long	total_mem;
71 	unsigned int		msr_pmu_type;
72 	unsigned int		max_branches;
73 	unsigned int		br_cntr_nr;
74 	unsigned int		br_cntr_width;
75 	int			kernel_is_64_bit;
76 
77 	int			nr_cmdline;
78 	int			nr_sibling_cores;
79 	int			nr_sibling_dies;
80 	int			nr_sibling_threads;
81 	int			nr_numa_nodes;
82 	int			nr_memory_nodes;
83 	int			nr_pmu_mappings;
84 	int			nr_groups;
85 	int			nr_cpu_pmu_caps;
86 	int			nr_hybrid_nodes;
87 	int			nr_pmus_with_caps;
88 	char			*cmdline;
89 	const char		**cmdline_argv;
90 	char			*sibling_cores;
91 	char			*sibling_dies;
92 	char			*sibling_threads;
93 	char			*pmu_mappings;
94 	char			**cpu_pmu_caps;
95 	struct cpu_topology_map	*cpu;
96 	struct cpu_cache_level	*caches;
97 	int			 caches_cnt;
98 	u32			comp_ratio;
99 	u32			comp_ver;
100 	u32			comp_type;
101 	u32			comp_level;
102 	u32			comp_mmap_len;
103 	struct numa_node	*numa_nodes;
104 	struct memory_node	*memory_nodes;
105 	unsigned long long	 memory_bsize;
106 	struct hybrid_node	*hybrid_nodes;
107 	struct pmu_caps		*pmu_caps;
108 #ifdef HAVE_LIBBPF_SUPPORT
109 	/*
110 	 * bpf_info_lock protects bpf rbtrees. This is needed because the
111 	 * trees are accessed by different threads in perf-top
112 	 */
113 	struct {
114 		struct rw_semaphore	lock;
115 		struct rb_root		infos;
116 		u32			infos_cnt;
117 		struct rb_root		btfs;
118 		u32			btfs_cnt;
119 	} bpf_progs;
120 #endif // HAVE_LIBBPF_SUPPORT
121 	/* same reason as above (for perf-top) */
122 	struct {
123 		struct rw_semaphore	lock;
124 		struct rb_root		tree;
125 	} cgroups;
126 
127 	/* For fast cpu to numa node lookup via perf_env__numa_node */
128 	int			*numa_map;
129 	int			 nr_numa_map;
130 
131 	/* For real clock time reference. */
132 	struct {
133 		u64	tod_ns;
134 		u64	clockid_ns;
135 		u64     clockid_res_ns;
136 		int	clockid;
137 		/*
138 		 * enabled is valid for report mode, and is true if above
139 		 * values are set, it's set in process_clock_data
140 		 */
141 		bool	enabled;
142 	} clock;
143 	arch_syscalls__strerrno_t *arch_strerrno;
144 };
145 
146 enum perf_compress_type {
147 	PERF_COMP_NONE = 0,
148 	PERF_COMP_ZSTD,
149 	PERF_COMP_MAX
150 };
151 
152 struct bpf_prog_info_node;
153 struct btf_node;
154 
155 extern struct perf_env perf_env;
156 
157 void perf_env__exit(struct perf_env *env);
158 
159 int perf_env__kernel_is_64_bit(struct perf_env *env);
160 
161 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]);
162 
163 int perf_env__read_cpuid(struct perf_env *env);
164 int perf_env__read_pmu_mappings(struct perf_env *env);
165 int perf_env__nr_pmu_mappings(struct perf_env *env);
166 const char *perf_env__pmu_mappings(struct perf_env *env);
167 
168 int perf_env__read_cpu_topology_map(struct perf_env *env);
169 
170 void cpu_cache_level__free(struct cpu_cache_level *cache);
171 
172 const char *perf_env__arch(struct perf_env *env);
173 const char *perf_env__arch_strerrno(struct perf_env *env, int err);
174 const char *perf_env__cpuid(struct perf_env *env);
175 const char *perf_env__raw_arch(struct perf_env *env);
176 int perf_env__nr_cpus_avail(struct perf_env *env);
177 
178 void perf_env__init(struct perf_env *env);
179 void __perf_env__insert_bpf_prog_info(struct perf_env *env,
180 				      struct bpf_prog_info_node *info_node);
181 void perf_env__insert_bpf_prog_info(struct perf_env *env,
182 				    struct bpf_prog_info_node *info_node);
183 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
184 							__u32 prog_id);
185 bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
186 bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
187 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
188 struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id);
189 
190 int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu);
191 char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
192 			     const char *cap);
193 
194 bool perf_env__has_pmu_mapping(struct perf_env *env, const char *pmu_name);
195 void perf_env__find_br_cntr_info(struct perf_env *env,
196 				 unsigned int *nr,
197 				 unsigned int *width);
198 #endif /* __PERF_ENV_H */
199