xref: /linux/tools/perf/pmu-events/empty-pmu-events.c (revision f4f346c3465949ebba80c6cc52cd8d2eeaa545fd)
1 
2 /* SPDX-License-Identifier: GPL-2.0 */
3 /* THIS FILE WAS AUTOGENERATED BY jevents.py arch=none model=none ! */
4 
5 #include <pmu-events/pmu-events.h>
6 #include "util/header.h"
7 #include "util/pmu.h"
8 #include <string.h>
9 #include <stddef.h>
10 
11 struct compact_pmu_event {
12         int offset;
13 };
14 
15 struct pmu_table_entry {
16         const struct compact_pmu_event *entries;
17         uint32_t num_entries;
18         struct compact_pmu_event pmu_name;
19 };
20 
21 static const char *const big_c_string =
22 /* offset=0 */ "software\000"
23 /* offset=9 */ "cpu-clock\000software\000Per-CPU high-resolution timer based event\000config=0\000\00000\000\000\000\000\000"
24 /* offset=87 */ "task-clock\000software\000Per-task high-resolution timer based event\000config=1\000\00000\000\000\000\000\000"
25 /* offset=167 */ "faults\000software\000Number of page faults [This event is an alias of page-faults]\000config=2\000\00000\000\000\000\000\000"
26 /* offset=262 */ "page-faults\000software\000Number of page faults [This event is an alias of faults]\000config=2\000\00000\000\000\000\000\000"
27 /* offset=357 */ "context-switches\000software\000Number of context switches [This event is an alias of cs]\000config=3\000\00000\000\000\000\000\000"
28 /* offset=458 */ "cs\000software\000Number of context switches [This event is an alias of context-switches]\000config=3\000\00000\000\000\000\000\000"
29 /* offset=559 */ "cpu-migrations\000software\000Number of times a process has migrated to a new CPU [This event is an alias of migrations]\000config=4\000\00000\000\000\000\000\000"
30 /* offset=691 */ "migrations\000software\000Number of times a process has migrated to a new CPU [This event is an alias of cpu-migrations]\000config=4\000\00000\000\000\000\000\000"
31 /* offset=823 */ "minor-faults\000software\000Number of minor page faults. Minor faults don't require I/O to handle\000config=5\000\00000\000\000\000\000\000"
32 /* offset=932 */ "major-faults\000software\000Number of major page faults. Major faults require I/O to handle\000config=6\000\00000\000\000\000\000\000"
33 /* offset=1035 */ "alignment-faults\000software\000Number of kernel handled memory alignment faults\000config=7\000\00000\000\000\000\000\000"
34 /* offset=1127 */ "emulation-faults\000software\000Number of kernel handled unimplemented instruction faults handled through emulation\000config=8\000\00000\000\000\000\000\000"
35 /* offset=1254 */ "dummy\000software\000A placeholder event that doesn't count anything\000config=9\000\00000\000\000\000\000\000"
36 /* offset=1334 */ "bpf-output\000software\000An event used by BPF programs to write to the perf ring buffer\000config=0xa\000\00000\000\000\000\000\000"
37 /* offset=1436 */ "cgroup-switches\000software\000Number of context switches to a task in a different cgroup\000config=0xb\000\00000\000\000\000\000\000"
38 /* offset=1539 */ "tool\000"
39 /* offset=1544 */ "duration_time\000tool\000Wall clock interval time in nanoseconds\000config=1\000\00000\000\000\000\000\000"
40 /* offset=1620 */ "user_time\000tool\000User (non-kernel) time in nanoseconds\000config=2\000\00000\000\000\000\000\000"
41 /* offset=1690 */ "system_time\000tool\000System/kernel time in nanoseconds\000config=3\000\00000\000\000\000\000\000"
42 /* offset=1758 */ "has_pmem\000tool\0001 if persistent memory installed otherwise 0\000config=4\000\00000\000\000\000\000\000"
43 /* offset=1834 */ "num_cores\000tool\000Number of cores. A core consists of 1 or more thread, with each thread being associated with a logical Linux CPU\000config=5\000\00000\000\000\000\000\000"
44 /* offset=1979 */ "num_cpus\000tool\000Number of logical Linux CPUs. There may be multiple such CPUs on a core\000config=6\000\00000\000\000\000\000\000"
45 /* offset=2082 */ "num_cpus_online\000tool\000Number of online logical Linux CPUs. There may be multiple such CPUs on a core\000config=7\000\00000\000\000\000\000\000"
46 /* offset=2199 */ "num_dies\000tool\000Number of dies. Each die has 1 or more cores\000config=8\000\00000\000\000\000\000\000"
47 /* offset=2275 */ "num_packages\000tool\000Number of packages. Each package has 1 or more die\000config=9\000\00000\000\000\000\000\000"
48 /* offset=2361 */ "slots\000tool\000Number of functional units that in parallel can execute parts of an instruction\000config=0xa\000\00000\000\000\000\000\000"
49 /* offset=2471 */ "smt_on\000tool\0001 if simultaneous multithreading (aka hyperthreading) is enable otherwise 0\000config=0xb\000\00000\000\000\000\000\000"
50 /* offset=2578 */ "system_tsc_freq\000tool\000The amount a Time Stamp Counter (TSC) increases per second\000config=0xc\000\00000\000\000\000\000\000"
51 /* offset=2677 */ "default_core\000"
52 /* offset=2690 */ "bp_l1_btb_correct\000branch\000L1 BTB Correction\000event=0x8a\000\00000\000\000\000\000\000"
53 /* offset=2752 */ "bp_l2_btb_correct\000branch\000L2 BTB Correction\000event=0x8b\000\00000\000\000\000\000\000"
54 /* offset=2814 */ "l3_cache_rd\000cache\000L3 cache access, read\000event=0x40\000\00000\000\000\000\000Attributable Level 3 cache access, read\000"
55 /* offset=2912 */ "segment_reg_loads.any\000other\000Number of segment register loads\000event=6,period=200000,umask=0x80\000\00000\000\000\000\000\000"
56 /* offset=3014 */ "dispatch_blocked.any\000other\000Memory cluster signals to block micro-op dispatch for any reason\000event=9,period=200000,umask=0x20\000\00000\000\000\000\000\000"
57 /* offset=3147 */ "eist_trans\000other\000Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions\000event=0x3a,period=200000\000\00000\000\000\000\000\000"
58 /* offset=3265 */ "hisi_sccl,ddrc\000"
59 /* offset=3280 */ "uncore_hisi_ddrc.flux_wcmd\000uncore\000DDRC write commands\000event=2\000\00000\000\000\000\000\000"
60 /* offset=3350 */ "uncore_cbox\000"
61 /* offset=3362 */ "unc_cbo_xsnp_response.miss_eviction\000uncore\000A cross-core snoop resulted from L3 Eviction which misses in some processor core\000event=0x22,umask=0x81\000\00000\000\000\000\000\000"
62 /* offset=3516 */ "event-hyphen\000uncore\000UNC_CBO_HYPHEN\000event=0xe0\000\00000\000\000\000\000\000"
63 /* offset=3570 */ "event-two-hyph\000uncore\000UNC_CBO_TWO_HYPH\000event=0xc0\000\00000\000\000\000\000\000"
64 /* offset=3628 */ "hisi_sccl,l3c\000"
65 /* offset=3642 */ "uncore_hisi_l3c.rd_hit_cpipe\000uncore\000Total read hits\000event=7\000\00000\000\000\000\000\000"
66 /* offset=3710 */ "uncore_imc_free_running\000"
67 /* offset=3734 */ "uncore_imc_free_running.cache_miss\000uncore\000Total cache misses\000event=0x12\000\00000\000\000\000\000\000"
68 /* offset=3814 */ "uncore_imc\000"
69 /* offset=3825 */ "uncore_imc.cache_hits\000uncore\000Total cache hits\000event=0x34\000\00000\000\000\000\000\000"
70 /* offset=3890 */ "uncore_sys_ddr_pmu\000"
71 /* offset=3909 */ "sys_ddr_pmu.write_cycles\000uncore\000ddr write-cycles event\000event=0x2b\000v8\00000\000\000\000\000\000"
72 /* offset=3985 */ "uncore_sys_ccn_pmu\000"
73 /* offset=4004 */ "sys_ccn_pmu.read_cycles\000uncore\000ccn read-cycles event\000config=0x2c\0000x01\00000\000\000\000\000\000"
74 /* offset=4081 */ "uncore_sys_cmn_pmu\000"
75 /* offset=4100 */ "sys_cmn_pmu.hnf_cache_miss\000uncore\000Counts total cache misses in first lookup result (high priority)\000eventid=1,type=5\000(434|436|43c|43a).*\00000\000\000\000\000\000"
76 /* offset=4243 */ "CPI\000\0001 / IPC\000\000\000\000\000\000\000\00000"
77 /* offset=4265 */ "IPC\000group1\000inst_retired.any / cpu_clk_unhalted.thread\000\000\000\000\000\000\000\00000"
78 /* offset=4328 */ "Frontend_Bound_SMT\000\000idq_uops_not_delivered.core / (4 * (cpu_clk_unhalted.thread / 2 * (1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk)))\000\000\000\000\000\000\000\00000"
79 /* offset=4494 */ "dcache_miss_cpi\000\000l1d\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\00000"
80 /* offset=4558 */ "icache_miss_cycles\000\000l1i\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\00000"
81 /* offset=4625 */ "cache_miss_cycles\000group1\000dcache_miss_cpi + icache_miss_cycles\000\000\000\000\000\000\000\00000"
82 /* offset=4696 */ "DCache_L2_All_Hits\000\000l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit\000\000\000\000\000\000\000\00000"
83 /* offset=4790 */ "DCache_L2_All_Miss\000\000max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + l2_rqsts.pf_miss + l2_rqsts.rfo_miss\000\000\000\000\000\000\000\00000"
84 /* offset=4924 */ "DCache_L2_All\000\000DCache_L2_All_Hits + DCache_L2_All_Miss\000\000\000\000\000\000\000\00000"
85 /* offset=4988 */ "DCache_L2_Hits\000\000d_ratio(DCache_L2_All_Hits, DCache_L2_All)\000\000\000\000\000\000\000\00000"
86 /* offset=5056 */ "DCache_L2_Misses\000\000d_ratio(DCache_L2_All_Miss, DCache_L2_All)\000\000\000\000\000\000\000\00000"
87 /* offset=5126 */ "M1\000\000ipc + M2\000\000\000\000\000\000\000\00000"
88 /* offset=5148 */ "M2\000\000ipc + M1\000\000\000\000\000\000\000\00000"
89 /* offset=5170 */ "M3\000\0001 / M3\000\000\000\000\000\000\000\00000"
90 /* offset=5190 */ "L1D_Cache_Fill_BW\000\00064 * l1d.replacement / 1e9 / duration_time\000\000\000\000\000\000\000\00000"
91 ;
92 
93 static const struct compact_pmu_event pmu_events__common_software[] = {
94 { 1035 }, /* alignment-faults\000software\000Number of kernel handled memory alignment faults\000config=7\000\00000\000\000\000\000\000 */
95 { 1334 }, /* bpf-output\000software\000An event used by BPF programs to write to the perf ring buffer\000config=0xa\000\00000\000\000\000\000\000 */
96 { 1436 }, /* cgroup-switches\000software\000Number of context switches to a task in a different cgroup\000config=0xb\000\00000\000\000\000\000\000 */
97 { 357 }, /* context-switches\000software\000Number of context switches [This event is an alias of cs]\000config=3\000\00000\000\000\000\000\000 */
98 { 9 }, /* cpu-clock\000software\000Per-CPU high-resolution timer based event\000config=0\000\00000\000\000\000\000\000 */
99 { 559 }, /* cpu-migrations\000software\000Number of times a process has migrated to a new CPU [This event is an alias of migrations]\000config=4\000\00000\000\000\000\000\000 */
100 { 458 }, /* cs\000software\000Number of context switches [This event is an alias of context-switches]\000config=3\000\00000\000\000\000\000\000 */
101 { 1254 }, /* dummy\000software\000A placeholder event that doesn't count anything\000config=9\000\00000\000\000\000\000\000 */
102 { 1127 }, /* emulation-faults\000software\000Number of kernel handled unimplemented instruction faults handled through emulation\000config=8\000\00000\000\000\000\000\000 */
103 { 167 }, /* faults\000software\000Number of page faults [This event is an alias of page-faults]\000config=2\000\00000\000\000\000\000\000 */
104 { 932 }, /* major-faults\000software\000Number of major page faults. Major faults require I/O to handle\000config=6\000\00000\000\000\000\000\000 */
105 { 691 }, /* migrations\000software\000Number of times a process has migrated to a new CPU [This event is an alias of cpu-migrations]\000config=4\000\00000\000\000\000\000\000 */
106 { 823 }, /* minor-faults\000software\000Number of minor page faults. Minor faults don't require I/O to handle\000config=5\000\00000\000\000\000\000\000 */
107 { 262 }, /* page-faults\000software\000Number of page faults [This event is an alias of faults]\000config=2\000\00000\000\000\000\000\000 */
108 { 87 }, /* task-clock\000software\000Per-task high-resolution timer based event\000config=1\000\00000\000\000\000\000\000 */
109 };
110 static const struct compact_pmu_event pmu_events__common_tool[] = {
111 { 1544 }, /* duration_time\000tool\000Wall clock interval time in nanoseconds\000config=1\000\00000\000\000\000\000\000 */
112 { 1758 }, /* has_pmem\000tool\0001 if persistent memory installed otherwise 0\000config=4\000\00000\000\000\000\000\000 */
113 { 1834 }, /* num_cores\000tool\000Number of cores. A core consists of 1 or more thread, with each thread being associated with a logical Linux CPU\000config=5\000\00000\000\000\000\000\000 */
114 { 1979 }, /* num_cpus\000tool\000Number of logical Linux CPUs. There may be multiple such CPUs on a core\000config=6\000\00000\000\000\000\000\000 */
115 { 2082 }, /* num_cpus_online\000tool\000Number of online logical Linux CPUs. There may be multiple such CPUs on a core\000config=7\000\00000\000\000\000\000\000 */
116 { 2199 }, /* num_dies\000tool\000Number of dies. Each die has 1 or more cores\000config=8\000\00000\000\000\000\000\000 */
117 { 2275 }, /* num_packages\000tool\000Number of packages. Each package has 1 or more die\000config=9\000\00000\000\000\000\000\000 */
118 { 2361 }, /* slots\000tool\000Number of functional units that in parallel can execute parts of an instruction\000config=0xa\000\00000\000\000\000\000\000 */
119 { 2471 }, /* smt_on\000tool\0001 if simultaneous multithreading (aka hyperthreading) is enable otherwise 0\000config=0xb\000\00000\000\000\000\000\000 */
120 { 1690 }, /* system_time\000tool\000System/kernel time in nanoseconds\000config=3\000\00000\000\000\000\000\000 */
121 { 2578 }, /* system_tsc_freq\000tool\000The amount a Time Stamp Counter (TSC) increases per second\000config=0xc\000\00000\000\000\000\000\000 */
122 { 1620 }, /* user_time\000tool\000User (non-kernel) time in nanoseconds\000config=2\000\00000\000\000\000\000\000 */
123 
124 };
125 
126 const struct pmu_table_entry pmu_events__common[] = {
127 {
128      .entries = pmu_events__common_software,
129      .num_entries = ARRAY_SIZE(pmu_events__common_software),
130      .pmu_name = { 0 /* software\000 */ },
131 },
132 {
133      .entries = pmu_events__common_tool,
134      .num_entries = ARRAY_SIZE(pmu_events__common_tool),
135      .pmu_name = { 1539 /* tool\000 */ },
136 },
137 };
138 
139 static const struct compact_pmu_event pmu_events__test_soc_cpu_default_core[] = {
140 { 2690 }, /* bp_l1_btb_correct\000branch\000L1 BTB Correction\000event=0x8a\000\00000\000\000\000\000\000 */
141 { 2752 }, /* bp_l2_btb_correct\000branch\000L2 BTB Correction\000event=0x8b\000\00000\000\000\000\000\000 */
142 { 3014 }, /* dispatch_blocked.any\000other\000Memory cluster signals to block micro-op dispatch for any reason\000event=9,period=200000,umask=0x20\000\00000\000\000\000\000\000 */
143 { 3147 }, /* eist_trans\000other\000Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions\000event=0x3a,period=200000\000\00000\000\000\000\000\000 */
144 { 2814 }, /* l3_cache_rd\000cache\000L3 cache access, read\000event=0x40\000\00000\000\000\000\000Attributable Level 3 cache access, read\000 */
145 { 2912 }, /* segment_reg_loads.any\000other\000Number of segment register loads\000event=6,period=200000,umask=0x80\000\00000\000\000\000\000\000 */
146 };
147 static const struct compact_pmu_event pmu_events__test_soc_cpu_hisi_sccl_ddrc[] = {
148 { 3280 }, /* uncore_hisi_ddrc.flux_wcmd\000uncore\000DDRC write commands\000event=2\000\00000\000\000\000\000\000 */
149 };
150 static const struct compact_pmu_event pmu_events__test_soc_cpu_hisi_sccl_l3c[] = {
151 { 3642 }, /* uncore_hisi_l3c.rd_hit_cpipe\000uncore\000Total read hits\000event=7\000\00000\000\000\000\000\000 */
152 };
153 static const struct compact_pmu_event pmu_events__test_soc_cpu_uncore_cbox[] = {
154 { 3516 }, /* event-hyphen\000uncore\000UNC_CBO_HYPHEN\000event=0xe0\000\00000\000\000\000\000\000 */
155 { 3570 }, /* event-two-hyph\000uncore\000UNC_CBO_TWO_HYPH\000event=0xc0\000\00000\000\000\000\000\000 */
156 { 3362 }, /* unc_cbo_xsnp_response.miss_eviction\000uncore\000A cross-core snoop resulted from L3 Eviction which misses in some processor core\000event=0x22,umask=0x81\000\00000\000\000\000\000\000 */
157 };
158 static const struct compact_pmu_event pmu_events__test_soc_cpu_uncore_imc[] = {
159 { 3825 }, /* uncore_imc.cache_hits\000uncore\000Total cache hits\000event=0x34\000\00000\000\000\000\000\000 */
160 };
161 static const struct compact_pmu_event pmu_events__test_soc_cpu_uncore_imc_free_running[] = {
162 { 3734 }, /* uncore_imc_free_running.cache_miss\000uncore\000Total cache misses\000event=0x12\000\00000\000\000\000\000\000 */
163 
164 };
165 
166 const struct pmu_table_entry pmu_events__test_soc_cpu[] = {
167 {
168      .entries = pmu_events__test_soc_cpu_default_core,
169      .num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_default_core),
170      .pmu_name = { 2677 /* default_core\000 */ },
171 },
172 {
173      .entries = pmu_events__test_soc_cpu_hisi_sccl_ddrc,
174      .num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_hisi_sccl_ddrc),
175      .pmu_name = { 3265 /* hisi_sccl,ddrc\000 */ },
176 },
177 {
178      .entries = pmu_events__test_soc_cpu_hisi_sccl_l3c,
179      .num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_hisi_sccl_l3c),
180      .pmu_name = { 3628 /* hisi_sccl,l3c\000 */ },
181 },
182 {
183      .entries = pmu_events__test_soc_cpu_uncore_cbox,
184      .num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_uncore_cbox),
185      .pmu_name = { 3350 /* uncore_cbox\000 */ },
186 },
187 {
188      .entries = pmu_events__test_soc_cpu_uncore_imc,
189      .num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_uncore_imc),
190      .pmu_name = { 3814 /* uncore_imc\000 */ },
191 },
192 {
193      .entries = pmu_events__test_soc_cpu_uncore_imc_free_running,
194      .num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_uncore_imc_free_running),
195      .pmu_name = { 3710 /* uncore_imc_free_running\000 */ },
196 },
197 };
198 
199 static const struct compact_pmu_event pmu_metrics__test_soc_cpu_default_core[] = {
200 { 4243 }, /* CPI\000\0001 / IPC\000\000\000\000\000\000\000\00000 */
201 { 4924 }, /* DCache_L2_All\000\000DCache_L2_All_Hits + DCache_L2_All_Miss\000\000\000\000\000\000\000\00000 */
202 { 4696 }, /* DCache_L2_All_Hits\000\000l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit\000\000\000\000\000\000\000\00000 */
203 { 4790 }, /* DCache_L2_All_Miss\000\000max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + l2_rqsts.pf_miss + l2_rqsts.rfo_miss\000\000\000\000\000\000\000\00000 */
204 { 4988 }, /* DCache_L2_Hits\000\000d_ratio(DCache_L2_All_Hits, DCache_L2_All)\000\000\000\000\000\000\000\00000 */
205 { 5056 }, /* DCache_L2_Misses\000\000d_ratio(DCache_L2_All_Miss, DCache_L2_All)\000\000\000\000\000\000\000\00000 */
206 { 4328 }, /* Frontend_Bound_SMT\000\000idq_uops_not_delivered.core / (4 * (cpu_clk_unhalted.thread / 2 * (1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk)))\000\000\000\000\000\000\000\00000 */
207 { 4265 }, /* IPC\000group1\000inst_retired.any / cpu_clk_unhalted.thread\000\000\000\000\000\000\000\00000 */
208 { 5190 }, /* L1D_Cache_Fill_BW\000\00064 * l1d.replacement / 1e9 / duration_time\000\000\000\000\000\000\000\00000 */
209 { 5126 }, /* M1\000\000ipc + M2\000\000\000\000\000\000\000\00000 */
210 { 5148 }, /* M2\000\000ipc + M1\000\000\000\000\000\000\000\00000 */
211 { 5170 }, /* M3\000\0001 / M3\000\000\000\000\000\000\000\00000 */
212 { 4625 }, /* cache_miss_cycles\000group1\000dcache_miss_cpi + icache_miss_cycles\000\000\000\000\000\000\000\00000 */
213 { 4494 }, /* dcache_miss_cpi\000\000l1d\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\00000 */
214 { 4558 }, /* icache_miss_cycles\000\000l1i\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\00000 */
215 
216 };
217 
218 const struct pmu_table_entry pmu_metrics__test_soc_cpu[] = {
219 {
220      .entries = pmu_metrics__test_soc_cpu_default_core,
221      .num_entries = ARRAY_SIZE(pmu_metrics__test_soc_cpu_default_core),
222      .pmu_name = { 2677 /* default_core\000 */ },
223 },
224 };
225 
226 static const struct compact_pmu_event pmu_events__test_soc_sys_uncore_sys_ccn_pmu[] = {
227 { 4004 }, /* sys_ccn_pmu.read_cycles\000uncore\000ccn read-cycles event\000config=0x2c\0000x01\00000\000\000\000\000\000 */
228 };
229 static const struct compact_pmu_event pmu_events__test_soc_sys_uncore_sys_cmn_pmu[] = {
230 { 4100 }, /* sys_cmn_pmu.hnf_cache_miss\000uncore\000Counts total cache misses in first lookup result (high priority)\000eventid=1,type=5\000(434|436|43c|43a).*\00000\000\000\000\000\000 */
231 };
232 static const struct compact_pmu_event pmu_events__test_soc_sys_uncore_sys_ddr_pmu[] = {
233 { 3909 }, /* sys_ddr_pmu.write_cycles\000uncore\000ddr write-cycles event\000event=0x2b\000v8\00000\000\000\000\000\000 */
234 
235 };
236 
237 const struct pmu_table_entry pmu_events__test_soc_sys[] = {
238 {
239      .entries = pmu_events__test_soc_sys_uncore_sys_ccn_pmu,
240      .num_entries = ARRAY_SIZE(pmu_events__test_soc_sys_uncore_sys_ccn_pmu),
241      .pmu_name = { 3985 /* uncore_sys_ccn_pmu\000 */ },
242 },
243 {
244      .entries = pmu_events__test_soc_sys_uncore_sys_cmn_pmu,
245      .num_entries = ARRAY_SIZE(pmu_events__test_soc_sys_uncore_sys_cmn_pmu),
246      .pmu_name = { 4081 /* uncore_sys_cmn_pmu\000 */ },
247 },
248 {
249      .entries = pmu_events__test_soc_sys_uncore_sys_ddr_pmu,
250      .num_entries = ARRAY_SIZE(pmu_events__test_soc_sys_uncore_sys_ddr_pmu),
251      .pmu_name = { 3890 /* uncore_sys_ddr_pmu\000 */ },
252 },
253 };
254 
255 
256 /* Struct used to make the PMU event table implementation opaque to callers. */
257 struct pmu_events_table {
258         const struct pmu_table_entry *pmus;
259         uint32_t num_pmus;
260 };
261 
262 /* Struct used to make the PMU metric table implementation opaque to callers. */
263 struct pmu_metrics_table {
264         const struct pmu_table_entry *pmus;
265         uint32_t num_pmus;
266 };
267 
268 /*
269  * Map a CPU to its table of PMU events. The CPU is identified by the
270  * cpuid field, which is an arch-specific identifier for the CPU.
271  * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile
272  * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c)
273  *
274  * The  cpuid can contain any character other than the comma.
275  */
276 struct pmu_events_map {
277         const char *arch;
278         const char *cpuid;
279         struct pmu_events_table event_table;
280         struct pmu_metrics_table metric_table;
281 };
282 
283 /*
284  * Global table mapping each known CPU for the architecture to its
285  * table of PMU events.
286  */
287 const struct pmu_events_map pmu_events_map[] = {
288 {
289 	.arch = "common",
290 	.cpuid = "common",
291 	.event_table = {
292 		.pmus = pmu_events__common,
293 		.num_pmus = ARRAY_SIZE(pmu_events__common),
294 	},
295 	.metric_table = {},
296 },
297 {
298 	.arch = "testarch",
299 	.cpuid = "testcpu",
300 	.event_table = {
301 		.pmus = pmu_events__test_soc_cpu,
302 		.num_pmus = ARRAY_SIZE(pmu_events__test_soc_cpu),
303 	},
304 	.metric_table = {
305 		.pmus = pmu_metrics__test_soc_cpu,
306 		.num_pmus = ARRAY_SIZE(pmu_metrics__test_soc_cpu),
307 	}
308 },
309 {
310 	.arch = 0,
311 	.cpuid = 0,
312 	.event_table = { 0, 0 },
313 	.metric_table = { 0, 0 },
314 }
315 };
316 
317 struct pmu_sys_events {
318 	const char *name;
319 	struct pmu_events_table event_table;
320 	struct pmu_metrics_table metric_table;
321 };
322 
323 static const struct pmu_sys_events pmu_sys_event_tables[] = {
324 	{
325 		.event_table = {
326 			.pmus = pmu_events__test_soc_sys,
327 			.num_pmus = ARRAY_SIZE(pmu_events__test_soc_sys)
328 		},
329 		.name = "pmu_events__test_soc_sys",
330 	},
331 	{
332 		.event_table = { 0, 0 },
333 		.metric_table = { 0, 0 },
334 	},
335 };
336 
decompress_event(int offset,struct pmu_event * pe)337 static void decompress_event(int offset, struct pmu_event *pe)
338 {
339 	const char *p = &big_c_string[offset];
340 
341 	pe->name = (*p == '\0' ? NULL : p);
342 	while (*p++);
343 	pe->topic = (*p == '\0' ? NULL : p);
344 	while (*p++);
345 	pe->desc = (*p == '\0' ? NULL : p);
346 	while (*p++);
347 	pe->event = (*p == '\0' ? NULL : p);
348 	while (*p++);
349 	pe->compat = (*p == '\0' ? NULL : p);
350 	while (*p++);
351 	pe->deprecated = *p - '0';
352 	p++;
353 	pe->perpkg = *p - '0';
354 	p++;
355 	pe->unit = (*p == '\0' ? NULL : p);
356 	while (*p++);
357 	pe->retirement_latency_mean = (*p == '\0' ? NULL : p);
358 	while (*p++);
359 	pe->retirement_latency_min = (*p == '\0' ? NULL : p);
360 	while (*p++);
361 	pe->retirement_latency_max = (*p == '\0' ? NULL : p);
362 	while (*p++);
363 	pe->long_desc = (*p == '\0' ? NULL : p);
364 }
365 
decompress_metric(int offset,struct pmu_metric * pm)366 static void decompress_metric(int offset, struct pmu_metric *pm)
367 {
368 	const char *p = &big_c_string[offset];
369 
370 	pm->metric_name = (*p == '\0' ? NULL : p);
371 	while (*p++);
372 	pm->metric_group = (*p == '\0' ? NULL : p);
373 	while (*p++);
374 	pm->metric_expr = (*p == '\0' ? NULL : p);
375 	while (*p++);
376 	pm->metric_threshold = (*p == '\0' ? NULL : p);
377 	while (*p++);
378 	pm->desc = (*p == '\0' ? NULL : p);
379 	while (*p++);
380 	pm->long_desc = (*p == '\0' ? NULL : p);
381 	while (*p++);
382 	pm->unit = (*p == '\0' ? NULL : p);
383 	while (*p++);
384 	pm->compat = (*p == '\0' ? NULL : p);
385 	while (*p++);
386 	pm->metricgroup_no_group = (*p == '\0' ? NULL : p);
387 	while (*p++);
388 	pm->default_metricgroup_name = (*p == '\0' ? NULL : p);
389 	while (*p++);
390 	pm->aggr_mode = *p - '0';
391 	p++;
392 	pm->event_grouping = *p - '0';
393 }
394 
pmu_events_table__for_each_event_pmu(const struct pmu_events_table * table,const struct pmu_table_entry * pmu,pmu_event_iter_fn fn,void * data)395 static int pmu_events_table__for_each_event_pmu(const struct pmu_events_table *table,
396                                                 const struct pmu_table_entry *pmu,
397                                                 pmu_event_iter_fn fn,
398                                                 void *data)
399 {
400         int ret;
401         struct pmu_event pe = {
402                 .pmu = &big_c_string[pmu->pmu_name.offset],
403         };
404 
405         for (uint32_t i = 0; i < pmu->num_entries; i++) {
406                 decompress_event(pmu->entries[i].offset, &pe);
407                 if (!pe.name)
408                         continue;
409                 ret = fn(&pe, table, data);
410                 if (ret)
411                         return ret;
412         }
413         return 0;
414  }
415 
pmu_events_table__find_event_pmu(const struct pmu_events_table * table,const struct pmu_table_entry * pmu,const char * name,pmu_event_iter_fn fn,void * data)416 static int pmu_events_table__find_event_pmu(const struct pmu_events_table *table,
417                                             const struct pmu_table_entry *pmu,
418                                             const char *name,
419                                             pmu_event_iter_fn fn,
420                                             void *data)
421 {
422         struct pmu_event pe = {
423                 .pmu = &big_c_string[pmu->pmu_name.offset],
424         };
425         int low = 0, high = pmu->num_entries - 1;
426 
427         while (low <= high) {
428                 int cmp, mid = (low + high) / 2;
429 
430                 decompress_event(pmu->entries[mid].offset, &pe);
431 
432                 if (!pe.name && !name)
433                         goto do_call;
434 
435                 if (!pe.name && name) {
436                         low = mid + 1;
437                         continue;
438                 }
439                 if (pe.name && !name) {
440                         high = mid - 1;
441                         continue;
442                 }
443 
444                 cmp = strcasecmp(pe.name, name);
445                 if (cmp < 0) {
446                         low = mid + 1;
447                         continue;
448                 }
449                 if (cmp > 0) {
450                         high = mid - 1;
451                         continue;
452                 }
453   do_call:
454                 return fn ? fn(&pe, table, data) : 0;
455         }
456         return PMU_EVENTS__NOT_FOUND;
457 }
458 
pmu_events_table__for_each_event(const struct pmu_events_table * table,struct perf_pmu * pmu,pmu_event_iter_fn fn,void * data)459 int pmu_events_table__for_each_event(const struct pmu_events_table *table,
460                                     struct perf_pmu *pmu,
461                                     pmu_event_iter_fn fn,
462                                     void *data)
463 {
464         for (size_t i = 0; i < table->num_pmus; i++) {
465                 const struct pmu_table_entry *table_pmu = &table->pmus[i];
466                 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
467                 int ret;
468 
469                 if (pmu && !perf_pmu__name_wildcard_match(pmu, pmu_name))
470                         continue;
471 
472                 ret = pmu_events_table__for_each_event_pmu(table, table_pmu, fn, data);
473                 if (ret)
474                         return ret;
475         }
476         return 0;
477 }
478 
pmu_events_table__find_event(const struct pmu_events_table * table,struct perf_pmu * pmu,const char * name,pmu_event_iter_fn fn,void * data)479 int pmu_events_table__find_event(const struct pmu_events_table *table,
480                                  struct perf_pmu *pmu,
481                                  const char *name,
482                                  pmu_event_iter_fn fn,
483                                  void *data)
484 {
485         for (size_t i = 0; i < table->num_pmus; i++) {
486                 const struct pmu_table_entry *table_pmu = &table->pmus[i];
487                 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
488                 int ret;
489 
490                 if (pmu && !perf_pmu__name_wildcard_match(pmu, pmu_name))
491                         continue;
492 
493                 ret = pmu_events_table__find_event_pmu(table, table_pmu, name, fn, data);
494                 if (ret != PMU_EVENTS__NOT_FOUND)
495                         return ret;
496         }
497         return PMU_EVENTS__NOT_FOUND;
498 }
499 
pmu_events_table__num_events(const struct pmu_events_table * table,struct perf_pmu * pmu)500 size_t pmu_events_table__num_events(const struct pmu_events_table *table,
501                                     struct perf_pmu *pmu)
502 {
503         size_t count = 0;
504 
505         for (size_t i = 0; i < table->num_pmus; i++) {
506                 const struct pmu_table_entry *table_pmu = &table->pmus[i];
507                 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
508 
509                 if (perf_pmu__name_wildcard_match(pmu, pmu_name))
510                         count += table_pmu->num_entries;
511         }
512         return count;
513 }
514 
pmu_metrics_table__for_each_metric_pmu(const struct pmu_metrics_table * table,const struct pmu_table_entry * pmu,pmu_metric_iter_fn fn,void * data)515 static int pmu_metrics_table__for_each_metric_pmu(const struct pmu_metrics_table *table,
516                                                 const struct pmu_table_entry *pmu,
517                                                 pmu_metric_iter_fn fn,
518                                                 void *data)
519 {
520         int ret;
521         struct pmu_metric pm = {
522                 .pmu = &big_c_string[pmu->pmu_name.offset],
523         };
524 
525         for (uint32_t i = 0; i < pmu->num_entries; i++) {
526                 decompress_metric(pmu->entries[i].offset, &pm);
527                 if (!pm.metric_expr)
528                         continue;
529                 ret = fn(&pm, table, data);
530                 if (ret)
531                         return ret;
532         }
533         return 0;
534 }
535 
pmu_metrics_table__find_metric_pmu(const struct pmu_metrics_table * table,const struct pmu_table_entry * pmu,const char * metric,pmu_metric_iter_fn fn,void * data)536 static int pmu_metrics_table__find_metric_pmu(const struct pmu_metrics_table *table,
537                                             const struct pmu_table_entry *pmu,
538                                             const char *metric,
539                                             pmu_metric_iter_fn fn,
540                                             void *data)
541 {
542         struct pmu_metric pm = {
543                 .pmu = &big_c_string[pmu->pmu_name.offset],
544         };
545         int low = 0, high = pmu->num_entries - 1;
546 
547         while (low <= high) {
548                 int cmp, mid = (low + high) / 2;
549 
550                 decompress_metric(pmu->entries[mid].offset, &pm);
551 
552                 if (!pm.metric_name && !metric)
553                         goto do_call;
554 
555                 if (!pm.metric_name && metric) {
556                         low = mid + 1;
557                         continue;
558                 }
559                 if (pm.metric_name && !metric) {
560                         high = mid - 1;
561                         continue;
562                 }
563 
564                 cmp = strcmp(pm.metric_name, metric);
565                 if (cmp < 0) {
566                         low = mid + 1;
567                         continue;
568                 }
569                 if (cmp > 0) {
570                         high = mid - 1;
571                         continue;
572                 }
573   do_call:
574                 return fn ? fn(&pm, table, data) : 0;
575         }
576         return PMU_METRICS__NOT_FOUND;
577 }
578 
pmu_metrics_table__for_each_metric(const struct pmu_metrics_table * table,pmu_metric_iter_fn fn,void * data)579 int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table,
580                                      pmu_metric_iter_fn fn,
581                                      void *data)
582 {
583         for (size_t i = 0; i < table->num_pmus; i++) {
584                 int ret = pmu_metrics_table__for_each_metric_pmu(table, &table->pmus[i],
585                                                                  fn, data);
586 
587                 if (ret)
588                         return ret;
589         }
590         return 0;
591 }
592 
pmu_metrics_table__find_metric(const struct pmu_metrics_table * table,struct perf_pmu * pmu,const char * metric,pmu_metric_iter_fn fn,void * data)593 int pmu_metrics_table__find_metric(const struct pmu_metrics_table *table,
594                                  struct perf_pmu *pmu,
595                                  const char *metric,
596                                  pmu_metric_iter_fn fn,
597                                  void *data)
598 {
599         for (size_t i = 0; i < table->num_pmus; i++) {
600                 const struct pmu_table_entry *table_pmu = &table->pmus[i];
601                 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
602                 int ret;
603 
604                 if (pmu && !perf_pmu__name_wildcard_match(pmu, pmu_name))
605                         continue;
606 
607                 ret = pmu_metrics_table__find_metric_pmu(table, table_pmu, metric, fn, data);
608                 if (ret != PMU_METRICS__NOT_FOUND)
609                         return ret;
610         }
611         return PMU_METRICS__NOT_FOUND;
612 }
613 
map_for_cpu(struct perf_cpu cpu)614 static const struct pmu_events_map *map_for_cpu(struct perf_cpu cpu)
615 {
616         static struct {
617                 const struct pmu_events_map *map;
618                 struct perf_cpu cpu;
619         } last_result;
620         static struct {
621                 const struct pmu_events_map *map;
622                 char *cpuid;
623         } last_map_search;
624         static bool has_last_result, has_last_map_search;
625         const struct pmu_events_map *map = NULL;
626         char *cpuid = NULL;
627         size_t i;
628 
629         if (has_last_result && last_result.cpu.cpu == cpu.cpu)
630                 return last_result.map;
631 
632         cpuid = get_cpuid_allow_env_override(cpu);
633 
634         /*
635          * On some platforms which uses cpus map, cpuid can be NULL for
636          * PMUs other than CORE PMUs.
637          */
638         if (!cpuid)
639                 goto out_update_last_result;
640 
641         if (has_last_map_search && !strcmp(last_map_search.cpuid, cpuid)) {
642                 map = last_map_search.map;
643                 free(cpuid);
644         } else {
645                 i = 0;
646                 for (;;) {
647                         map = &pmu_events_map[i++];
648 
649                         if (!map->arch) {
650                                 map = NULL;
651                                 break;
652                         }
653 
654                         if (!strcmp_cpuid_str(map->cpuid, cpuid))
655                                 break;
656                }
657                free(last_map_search.cpuid);
658                last_map_search.cpuid = cpuid;
659                last_map_search.map = map;
660                has_last_map_search = true;
661         }
662 out_update_last_result:
663         last_result.cpu = cpu;
664         last_result.map = map;
665         has_last_result = true;
666         return map;
667 }
668 
map_for_pmu(struct perf_pmu * pmu)669 static const struct pmu_events_map *map_for_pmu(struct perf_pmu *pmu)
670 {
671         struct perf_cpu cpu = {-1};
672 
673         if (pmu) {
674                 for (size_t i = 0; i < ARRAY_SIZE(pmu_events__common); i++) {
675                         const char *pmu_name = &big_c_string[pmu_events__common[i].pmu_name.offset];
676 
677                         if (!strcmp(pmu_name, pmu->name)) {
678                                 const struct pmu_events_map *map = &pmu_events_map[0];
679 
680                                 while (strcmp("common", map->arch))
681                                         map++;
682                                 return map;
683                         }
684                 }
685                 cpu = perf_cpu_map__min(pmu->cpus);
686         }
687         return map_for_cpu(cpu);
688 }
689 
perf_pmu__find_events_table(struct perf_pmu * pmu)690 const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
691 {
692         const struct pmu_events_map *map = map_for_pmu(pmu);
693 
694         if (!map)
695                 return NULL;
696 
697         if (!pmu)
698                 return &map->event_table;
699 
700         for (size_t i = 0; i < map->event_table.num_pmus; i++) {
701                 const struct pmu_table_entry *table_pmu = &map->event_table.pmus[i];
702                 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
703 
704                 if (perf_pmu__name_wildcard_match(pmu, pmu_name))
705                          return &map->event_table;
706         }
707         return NULL;
708 }
709 
pmu_metrics_table__find(void)710 const struct pmu_metrics_table *pmu_metrics_table__find(void)
711 {
712         struct perf_cpu cpu = {-1};
713         const struct pmu_events_map *map = map_for_cpu(cpu);
714 
715         return map ? &map->metric_table : NULL;
716 }
717 
find_core_events_table(const char * arch,const char * cpuid)718 const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
719 {
720         for (const struct pmu_events_map *tables = &pmu_events_map[0];
721              tables->arch;
722              tables++) {
723                 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
724                         return &tables->event_table;
725         }
726         return NULL;
727 }
728 
find_core_metrics_table(const char * arch,const char * cpuid)729 const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid)
730 {
731         for (const struct pmu_events_map *tables = &pmu_events_map[0];
732              tables->arch;
733              tables++) {
734                 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
735                         return &tables->metric_table;
736         }
737         return NULL;
738 }
739 
pmu_for_each_core_event(pmu_event_iter_fn fn,void * data)740 int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
741 {
742         for (const struct pmu_events_map *tables = &pmu_events_map[0];
743              tables->arch;
744              tables++) {
745                 int ret = pmu_events_table__for_each_event(&tables->event_table,
746                                                            /*pmu=*/ NULL, fn, data);
747 
748                 if (ret)
749                         return ret;
750         }
751         return 0;
752 }
753 
pmu_for_each_core_metric(pmu_metric_iter_fn fn,void * data)754 int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data)
755 {
756         for (const struct pmu_events_map *tables = &pmu_events_map[0];
757              tables->arch;
758              tables++) {
759                 int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data);
760 
761                 if (ret)
762                         return ret;
763         }
764         return 0;
765 }
766 
find_sys_events_table(const char * name)767 const struct pmu_events_table *find_sys_events_table(const char *name)
768 {
769         for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
770              tables->name;
771              tables++) {
772                 if (!strcmp(tables->name, name))
773                         return &tables->event_table;
774         }
775         return NULL;
776 }
777 
pmu_for_each_sys_event(pmu_event_iter_fn fn,void * data)778 int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
779 {
780         for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
781              tables->name;
782              tables++) {
783                 int ret = pmu_events_table__for_each_event(&tables->event_table,
784                                                            /*pmu=*/ NULL, fn, data);
785 
786                 if (ret)
787                         return ret;
788         }
789         return 0;
790 }
791 
pmu_for_each_sys_metric(pmu_metric_iter_fn fn,void * data)792 int pmu_for_each_sys_metric(pmu_metric_iter_fn fn, void *data)
793 {
794         for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
795              tables->name;
796              tables++) {
797                 int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data);
798 
799                 if (ret)
800                         return ret;
801         }
802         return 0;
803 }
804 
805 static const int metricgroups[][2] = {
806 
807 };
808 
describe_metricgroup(const char * group)809 const char *describe_metricgroup(const char *group)
810 {
811         int low = 0, high = (int)ARRAY_SIZE(metricgroups) - 1;
812 
813         while (low <= high) {
814                 int mid = (low + high) / 2;
815                 const char *mgroup = &big_c_string[metricgroups[mid][0]];
816                 int cmp = strcmp(mgroup, group);
817 
818                 if (cmp == 0) {
819                         return &big_c_string[metricgroups[mid][1]];
820                 } else if (cmp < 0) {
821                         low = mid + 1;
822                 } else {
823                         high = mid - 1;
824                 }
825         }
826         return NULL;
827 }
828