xref: /linux/tools/perf/pmu-events/empty-pmu-events.c (revision 0e9b70c1e3623fa110fb6be553e644524228ef60)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * An empty pmu-events.c file used when there is no architecture json files in
4  * arch or when the jevents.py script cannot be run.
5  *
6  * The test cpu/soc is provided for testing.
7  */
8 #include "pmu-events/pmu-events.h"
9 #include "util/header.h"
10 #include "util/pmu.h"
11 #include <string.h>
12 #include <stddef.h>
13 
14 static const struct pmu_event pmu_events__test_soc_cpu[] = {
15 	{
16 		.name = "l3_cache_rd",
17 		.event = "event=0x40",
18 		.desc = "L3 cache access, read",
19 		.topic = "cache",
20 		.long_desc = "Attributable Level 3 cache access, read",
21 	},
22 	{
23 		.name = "segment_reg_loads.any",
24 		.event = "event=0x6,period=200000,umask=0x80",
25 		.desc = "Number of segment register loads",
26 		.topic = "other",
27 	},
28 	{
29 		.name = "dispatch_blocked.any",
30 		.event = "event=0x9,period=200000,umask=0x20",
31 		.desc = "Memory cluster signals to block micro-op dispatch for any reason",
32 		.topic = "other",
33 	},
34 	{
35 		.name = "eist_trans",
36 		.event = "event=0x3a,period=200000,umask=0x0",
37 		.desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
38 		.topic = "other",
39 	},
40 	{
41 		.name = "uncore_hisi_ddrc.flux_wcmd",
42 		.event = "event=0x2",
43 		.desc = "DDRC write commands. Unit: hisi_sccl,ddrc ",
44 		.topic = "uncore",
45 		.long_desc = "DDRC write commands",
46 		.pmu = "hisi_sccl,ddrc",
47 	},
48 	{
49 		.name = "unc_cbo_xsnp_response.miss_eviction",
50 		.event = "event=0x22,umask=0x81",
51 		.desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core. Unit: uncore_cbox ",
52 		.topic = "uncore",
53 		.long_desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core",
54 		.pmu = "uncore_cbox",
55 	},
56 	{
57 		.name = "event-hyphen",
58 		.event = "event=0xe0,umask=0x00",
59 		.desc = "UNC_CBO_HYPHEN. Unit: uncore_cbox ",
60 		.topic = "uncore",
61 		.long_desc = "UNC_CBO_HYPHEN",
62 		.pmu = "uncore_cbox",
63 	},
64 	{
65 		.name = "event-two-hyph",
66 		.event = "event=0xc0,umask=0x00",
67 		.desc = "UNC_CBO_TWO_HYPH. Unit: uncore_cbox ",
68 		.topic = "uncore",
69 		.long_desc = "UNC_CBO_TWO_HYPH",
70 		.pmu = "uncore_cbox",
71 	},
72 	{
73 		.name = "uncore_hisi_l3c.rd_hit_cpipe",
74 		.event = "event=0x7",
75 		.desc = "Total read hits. Unit: hisi_sccl,l3c ",
76 		.topic = "uncore",
77 		.long_desc = "Total read hits",
78 		.pmu = "hisi_sccl,l3c",
79 	},
80 	{
81 		.name = "uncore_imc_free_running.cache_miss",
82 		.event = "event=0x12",
83 		.desc = "Total cache misses. Unit: uncore_imc_free_running ",
84 		.topic = "uncore",
85 		.long_desc = "Total cache misses",
86 		.pmu = "uncore_imc_free_running",
87 	},
88 	{
89 		.name = "uncore_imc.cache_hits",
90 		.event = "event=0x34",
91 		.desc = "Total cache hits. Unit: uncore_imc ",
92 		.topic = "uncore",
93 		.long_desc = "Total cache hits",
94 		.pmu = "uncore_imc",
95 	},
96 	{
97 		.name = "bp_l1_btb_correct",
98 		.event = "event=0x8a",
99 		.desc = "L1 BTB Correction",
100 		.topic = "branch",
101 	},
102 	{
103 		.name = "bp_l2_btb_correct",
104 		.event = "event=0x8b",
105 		.desc = "L2 BTB Correction",
106 		.topic = "branch",
107 	},
108 	{
109 		.name = 0,
110 		.event = 0,
111 		.desc = 0,
112 	},
113 };
114 
115 static const struct pmu_metric pmu_metrics__test_soc_cpu[] = {
116 	{
117 		.metric_expr	= "1 / IPC",
118 		.metric_name	= "CPI",
119 	},
120 	{
121 		.metric_expr	= "inst_retired.any / cpu_clk_unhalted.thread",
122 		.metric_name	= "IPC",
123 		.metric_group	= "group1",
124 	},
125 	{
126 		.metric_expr	= "idq_uops_not_delivered.core / (4 * (( ( cpu_clk_unhalted.thread / 2 ) * "
127 		"( 1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk ) )))",
128 		.metric_name	= "Frontend_Bound_SMT",
129 	},
130 	{
131 		.metric_expr	= "l1d\\-loads\\-misses / inst_retired.any",
132 		.metric_name	= "dcache_miss_cpi",
133 	},
134 	{
135 		.metric_expr	= "l1i\\-loads\\-misses / inst_retired.any",
136 		.metric_name	= "icache_miss_cycles",
137 	},
138 	{
139 		.metric_expr	= "(dcache_miss_cpi + icache_miss_cycles)",
140 		.metric_name	= "cache_miss_cycles",
141 		.metric_group	= "group1",
142 	},
143 	{
144 		.metric_expr	= "l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit",
145 		.metric_name	= "DCache_L2_All_Hits",
146 	},
147 	{
148 		.metric_expr	= "max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + "
149 		"l2_rqsts.pf_miss + l2_rqsts.rfo_miss",
150 		.metric_name	= "DCache_L2_All_Miss",
151 	},
152 	{
153 		.metric_expr	= "DCache_L2_All_Hits + DCache_L2_All_Miss",
154 		.metric_name	= "DCache_L2_All",
155 	},
156 	{
157 		.metric_expr	= "d_ratio(DCache_L2_All_Hits, DCache_L2_All)",
158 		.metric_name	= "DCache_L2_Hits",
159 	},
160 	{
161 		.metric_expr	= "d_ratio(DCache_L2_All_Miss, DCache_L2_All)",
162 		.metric_name	= "DCache_L2_Misses",
163 	},
164 	{
165 		.metric_expr	= "ipc + M2",
166 		.metric_name	= "M1",
167 	},
168 	{
169 		.metric_expr	= "ipc + M1",
170 		.metric_name	= "M2",
171 	},
172 	{
173 		.metric_expr	= "1/M3",
174 		.metric_name	= "M3",
175 	},
176 	{
177 		.metric_expr	= "64 * l1d.replacement / 1000000000 / duration_time",
178 		.metric_name	= "L1D_Cache_Fill_BW",
179 	},
180 	{
181 		.metric_expr = 0,
182 		.metric_name = 0,
183 	},
184 };
185 
186 /* Struct used to make the PMU event table implementation opaque to callers. */
187 struct pmu_events_table {
188 	const struct pmu_event *entries;
189 };
190 
191 /* Struct used to make the PMU metric table implementation opaque to callers. */
192 struct pmu_metrics_table {
193 	const struct pmu_metric *entries;
194 };
195 
196 /*
197  * Map a CPU to its table of PMU events. The CPU is identified by the
198  * cpuid field, which is an arch-specific identifier for the CPU.
199  * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile
200  * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c)
201  *
202  * The  cpuid can contain any character other than the comma.
203  */
204 struct pmu_events_map {
205 	const char *arch;
206 	const char *cpuid;
207 	const struct pmu_events_table event_table;
208 	const struct pmu_metrics_table metric_table;
209 };
210 
211 /*
212  * Global table mapping each known CPU for the architecture to its
213  * table of PMU events.
214  */
215 static const struct pmu_events_map pmu_events_map[] = {
216 	{
217 		.arch = "testarch",
218 		.cpuid = "testcpu",
219 		.event_table = { pmu_events__test_soc_cpu },
220 		.metric_table = { pmu_metrics__test_soc_cpu },
221 	},
222 	{
223 		.arch = 0,
224 		.cpuid = 0,
225 		.event_table = { 0 },
226 		.metric_table = { 0 },
227 	},
228 };
229 
230 static const struct pmu_event pme_test_soc_sys[] = {
231 	{
232 		.name = "sys_ddr_pmu.write_cycles",
233 		.event = "event=0x2b",
234 		.desc = "ddr write-cycles event. Unit: uncore_sys_ddr_pmu ",
235 		.compat = "v8",
236 		.topic = "uncore",
237 		.pmu = "uncore_sys_ddr_pmu",
238 	},
239 	{
240 		.name = "sys_ccn_pmu.read_cycles",
241 		.event = "config=0x2c",
242 		.desc = "ccn read-cycles event. Unit: uncore_sys_ccn_pmu ",
243 		.compat = "0x01",
244 		.topic = "uncore",
245 		.pmu = "uncore_sys_ccn_pmu",
246 	},
247 	{
248 		.name = 0,
249 		.event = 0,
250 		.desc = 0,
251 	},
252 };
253 
254 struct pmu_sys_events {
255 	const char *name;
256 	const struct pmu_events_table table;
257 };
258 
259 static const struct pmu_sys_events pmu_sys_event_tables[] = {
260 	{
261 		.table = { pme_test_soc_sys },
262 		.name = "pme_test_soc_sys",
263 	},
264 	{
265 		.table = { 0 }
266 	},
267 };
268 
269 int pmu_events_table_for_each_event(const struct pmu_events_table *table, pmu_event_iter_fn fn,
270 				    void *data)
271 {
272 	for (const struct pmu_event *pe = &table->entries[0]; pe->name; pe++) {
273 		int ret = fn(pe, table, data);
274 
275 		if (ret)
276 			return ret;
277 	}
278 	return 0;
279 }
280 
281 int pmu_metrics_table_for_each_metric(const struct pmu_metrics_table *table, pmu_metric_iter_fn fn,
282 				      void *data)
283 {
284 	for (const struct pmu_metric *pm = &table->entries[0]; pm->metric_expr; pm++) {
285 		int ret = fn(pm, table, data);
286 
287 		if (ret)
288 			return ret;
289 	}
290 	return 0;
291 }
292 
293 const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
294 {
295 	const struct pmu_events_table *table = NULL;
296 	char *cpuid = perf_pmu__getcpuid(pmu);
297 	int i;
298 
299 	/* on some platforms which uses cpus map, cpuid can be NULL for
300 	 * PMUs other than CORE PMUs.
301 	 */
302 	if (!cpuid)
303 		return NULL;
304 
305 	i = 0;
306 	for (;;) {
307 		const struct pmu_events_map *map = &pmu_events_map[i++];
308 
309 		if (!map->cpuid)
310 			break;
311 
312 		if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
313 			table = &map->event_table;
314 			break;
315 		}
316 	}
317 	free(cpuid);
318 	return table;
319 }
320 
321 const struct pmu_metrics_table *perf_pmu__find_metrics_table(struct perf_pmu *pmu)
322 {
323 	const struct pmu_metrics_table *table = NULL;
324 	char *cpuid = perf_pmu__getcpuid(pmu);
325 	int i;
326 
327 	/* on some platforms which uses cpus map, cpuid can be NULL for
328 	 * PMUs other than CORE PMUs.
329 	 */
330 	if (!cpuid)
331 		return NULL;
332 
333 	i = 0;
334 	for (;;) {
335 		const struct pmu_events_map *map = &pmu_events_map[i++];
336 
337 		if (!map->cpuid)
338 			break;
339 
340 		if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
341 			table = &map->metric_table;
342 			break;
343 		}
344 	}
345 	free(cpuid);
346 	return table;
347 }
348 
349 const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
350 {
351 	for (const struct pmu_events_map *tables = &pmu_events_map[0];
352 	     tables->arch;
353 	     tables++) {
354 		if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
355 			return &tables->event_table;
356 	}
357 	return NULL;
358 }
359 
360 const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid)
361 {
362 	for (const struct pmu_events_map *tables = &pmu_events_map[0];
363 	     tables->arch;
364 	     tables++) {
365 		if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
366 			return &tables->metric_table;
367 	}
368 	return NULL;
369 }
370 
371 int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
372 {
373 	for (const struct pmu_events_map *tables = &pmu_events_map[0]; tables->arch; tables++) {
374 		int ret = pmu_events_table_for_each_event(&tables->event_table, fn, data);
375 
376 		if (ret)
377 			return ret;
378 	}
379 	return 0;
380 }
381 
382 int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data)
383 {
384 	for (const struct pmu_events_map *tables = &pmu_events_map[0];
385 	     tables->arch;
386 	     tables++) {
387 		int ret = pmu_metrics_table_for_each_metric(&tables->metric_table, fn, data);
388 
389 		if (ret)
390 			return ret;
391 	}
392 	return 0;
393 }
394 
395 const struct pmu_events_table *find_sys_events_table(const char *name)
396 {
397 	for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
398 	     tables->name;
399 	     tables++) {
400 		if (!strcmp(tables->name, name))
401 			return &tables->table;
402 	}
403 	return NULL;
404 }
405 
406 int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
407 {
408 	for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
409 	     tables->name;
410 	     tables++) {
411 		int ret = pmu_events_table_for_each_event(&tables->table, fn, data);
412 
413 		if (ret)
414 			return ret;
415 	}
416 	return 0;
417 }
418 
419 int pmu_for_each_sys_metric(pmu_metric_iter_fn fn __maybe_unused, void *data __maybe_unused)
420 {
421 	return 0;
422 }
423