1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * An empty pmu-events.c file used when there is no architecture json files in 4 * arch or when the jevents.py script cannot be run. 5 * 6 * The test cpu/soc is provided for testing. 7 */ 8 #include "pmu-events/pmu-events.h" 9 #include "util/header.h" 10 #include "util/pmu.h" 11 #include <string.h> 12 #include <stddef.h> 13 14 static const struct pmu_event pmu_events__test_soc_cpu[] = { 15 { 16 .name = "l3_cache_rd", 17 .event = "event=0x40", 18 .desc = "L3 cache access, read", 19 .topic = "cache", 20 .long_desc = "Attributable Level 3 cache access, read", 21 }, 22 { 23 .name = "segment_reg_loads.any", 24 .event = "event=0x6,period=200000,umask=0x80", 25 .desc = "Number of segment register loads", 26 .topic = "other", 27 }, 28 { 29 .name = "dispatch_blocked.any", 30 .event = "event=0x9,period=200000,umask=0x20", 31 .desc = "Memory cluster signals to block micro-op dispatch for any reason", 32 .topic = "other", 33 }, 34 { 35 .name = "eist_trans", 36 .event = "event=0x3a,period=200000,umask=0x0", 37 .desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions", 38 .topic = "other", 39 }, 40 { 41 .name = "uncore_hisi_ddrc.flux_wcmd", 42 .event = "event=0x2", 43 .desc = "DDRC write commands. Unit: hisi_sccl,ddrc ", 44 .topic = "uncore", 45 .long_desc = "DDRC write commands", 46 .pmu = "hisi_sccl,ddrc", 47 }, 48 { 49 .name = "unc_cbo_xsnp_response.miss_eviction", 50 .event = "event=0x22,umask=0x81", 51 .desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core. Unit: uncore_cbox ", 52 .topic = "uncore", 53 .long_desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core", 54 .pmu = "uncore_cbox", 55 }, 56 { 57 .name = "event-hyphen", 58 .event = "event=0xe0,umask=0x00", 59 .desc = "UNC_CBO_HYPHEN. Unit: uncore_cbox ", 60 .topic = "uncore", 61 .long_desc = "UNC_CBO_HYPHEN", 62 .pmu = "uncore_cbox", 63 }, 64 { 65 .name = "event-two-hyph", 66 .event = "event=0xc0,umask=0x00", 67 .desc = "UNC_CBO_TWO_HYPH. Unit: uncore_cbox ", 68 .topic = "uncore", 69 .long_desc = "UNC_CBO_TWO_HYPH", 70 .pmu = "uncore_cbox", 71 }, 72 { 73 .name = "uncore_hisi_l3c.rd_hit_cpipe", 74 .event = "event=0x7", 75 .desc = "Total read hits. Unit: hisi_sccl,l3c ", 76 .topic = "uncore", 77 .long_desc = "Total read hits", 78 .pmu = "hisi_sccl,l3c", 79 }, 80 { 81 .name = "uncore_imc_free_running.cache_miss", 82 .event = "event=0x12", 83 .desc = "Total cache misses. Unit: uncore_imc_free_running ", 84 .topic = "uncore", 85 .long_desc = "Total cache misses", 86 .pmu = "uncore_imc_free_running", 87 }, 88 { 89 .name = "uncore_imc.cache_hits", 90 .event = "event=0x34", 91 .desc = "Total cache hits. Unit: uncore_imc ", 92 .topic = "uncore", 93 .long_desc = "Total cache hits", 94 .pmu = "uncore_imc", 95 }, 96 { 97 .name = "bp_l1_btb_correct", 98 .event = "event=0x8a", 99 .desc = "L1 BTB Correction", 100 .topic = "branch", 101 }, 102 { 103 .name = "bp_l2_btb_correct", 104 .event = "event=0x8b", 105 .desc = "L2 BTB Correction", 106 .topic = "branch", 107 }, 108 { 109 .name = 0, 110 .event = 0, 111 .desc = 0, 112 }, 113 }; 114 115 static const struct pmu_metric pmu_metrics__test_soc_cpu[] = { 116 { 117 .metric_expr = "1 / IPC", 118 .metric_name = "CPI", 119 }, 120 { 121 .metric_expr = "inst_retired.any / cpu_clk_unhalted.thread", 122 .metric_name = "IPC", 123 .metric_group = "group1", 124 }, 125 { 126 .metric_expr = "idq_uops_not_delivered.core / (4 * (( ( cpu_clk_unhalted.thread / 2 ) * " 127 "( 1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk ) )))", 128 .metric_name = "Frontend_Bound_SMT", 129 }, 130 { 131 .metric_expr = "l1d\\-loads\\-misses / inst_retired.any", 132 .metric_name = "dcache_miss_cpi", 133 }, 134 { 135 .metric_expr = "l1i\\-loads\\-misses / inst_retired.any", 136 .metric_name = "icache_miss_cycles", 137 }, 138 { 139 .metric_expr = "(dcache_miss_cpi + icache_miss_cycles)", 140 .metric_name = "cache_miss_cycles", 141 .metric_group = "group1", 142 }, 143 { 144 .metric_expr = "l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit", 145 .metric_name = "DCache_L2_All_Hits", 146 }, 147 { 148 .metric_expr = "max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + " 149 "l2_rqsts.pf_miss + l2_rqsts.rfo_miss", 150 .metric_name = "DCache_L2_All_Miss", 151 }, 152 { 153 .metric_expr = "DCache_L2_All_Hits + DCache_L2_All_Miss", 154 .metric_name = "DCache_L2_All", 155 }, 156 { 157 .metric_expr = "d_ratio(DCache_L2_All_Hits, DCache_L2_All)", 158 .metric_name = "DCache_L2_Hits", 159 }, 160 { 161 .metric_expr = "d_ratio(DCache_L2_All_Miss, DCache_L2_All)", 162 .metric_name = "DCache_L2_Misses", 163 }, 164 { 165 .metric_expr = "ipc + M2", 166 .metric_name = "M1", 167 }, 168 { 169 .metric_expr = "ipc + M1", 170 .metric_name = "M2", 171 }, 172 { 173 .metric_expr = "1/M3", 174 .metric_name = "M3", 175 }, 176 { 177 .metric_expr = "64 * l1d.replacement / 1000000000 / duration_time", 178 .metric_name = "L1D_Cache_Fill_BW", 179 }, 180 { 181 .metric_expr = 0, 182 .metric_name = 0, 183 }, 184 }; 185 186 /* Struct used to make the PMU event table implementation opaque to callers. */ 187 struct pmu_events_table { 188 const struct pmu_event *entries; 189 }; 190 191 /* Struct used to make the PMU metric table implementation opaque to callers. */ 192 struct pmu_metrics_table { 193 const struct pmu_metric *entries; 194 }; 195 196 /* 197 * Map a CPU to its table of PMU events. The CPU is identified by the 198 * cpuid field, which is an arch-specific identifier for the CPU. 199 * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile 200 * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c) 201 * 202 * The cpuid can contain any character other than the comma. 203 */ 204 struct pmu_events_map { 205 const char *arch; 206 const char *cpuid; 207 const struct pmu_events_table event_table; 208 const struct pmu_metrics_table metric_table; 209 }; 210 211 /* 212 * Global table mapping each known CPU for the architecture to its 213 * table of PMU events. 214 */ 215 static const struct pmu_events_map pmu_events_map[] = { 216 { 217 .arch = "testarch", 218 .cpuid = "testcpu", 219 .event_table = { pmu_events__test_soc_cpu }, 220 .metric_table = { pmu_metrics__test_soc_cpu }, 221 }, 222 { 223 .arch = 0, 224 .cpuid = 0, 225 .event_table = { 0 }, 226 .metric_table = { 0 }, 227 }, 228 }; 229 230 static const struct pmu_event pmu_events__test_soc_sys[] = { 231 { 232 .name = "sys_ddr_pmu.write_cycles", 233 .event = "event=0x2b", 234 .desc = "ddr write-cycles event. Unit: uncore_sys_ddr_pmu ", 235 .compat = "v8", 236 .topic = "uncore", 237 .pmu = "uncore_sys_ddr_pmu", 238 }, 239 { 240 .name = "sys_ccn_pmu.read_cycles", 241 .event = "config=0x2c", 242 .desc = "ccn read-cycles event. Unit: uncore_sys_ccn_pmu ", 243 .compat = "0x01", 244 .topic = "uncore", 245 .pmu = "uncore_sys_ccn_pmu", 246 }, 247 { 248 .name = "sys_cmn_pmu.hnf_cache_miss", 249 .event = "eventid=0x1,type=0x5", 250 .desc = "Counts total cache misses in first lookup result (high priority). Unit: uncore_sys_cmn_pmu ", 251 .compat = "(434|436|43c|43a).*", 252 .topic = "uncore", 253 .pmu = "uncore_sys_cmn_pmu", 254 }, 255 { 256 .name = 0, 257 .event = 0, 258 .desc = 0, 259 }, 260 }; 261 262 struct pmu_sys_events { 263 const char *name; 264 const struct pmu_events_table table; 265 }; 266 267 static const struct pmu_sys_events pmu_sys_event_tables[] = { 268 { 269 .table = { pmu_events__test_soc_sys }, 270 .name = "pmu_events__test_soc_sys", 271 }, 272 { 273 .table = { 0 } 274 }, 275 }; 276 277 int pmu_events_table__for_each_event(const struct pmu_events_table *table, struct perf_pmu *pmu, 278 pmu_event_iter_fn fn, void *data) 279 { 280 for (const struct pmu_event *pe = &table->entries[0]; pe->name; pe++) { 281 int ret; 282 283 if (pmu && !pmu__name_match(pmu, pe->pmu)) 284 continue; 285 286 ret = fn(pe, table, data); 287 if (ret) 288 return ret; 289 } 290 return 0; 291 } 292 293 int pmu_events_table__find_event(const struct pmu_events_table *table, 294 struct perf_pmu *pmu, 295 const char *name, 296 pmu_event_iter_fn fn, 297 void *data) 298 { 299 for (const struct pmu_event *pe = &table->entries[0]; pe->name; pe++) { 300 if (pmu && !pmu__name_match(pmu, pe->pmu)) 301 continue; 302 303 if (!strcasecmp(pe->name, name)) 304 return fn(pe, table, data); 305 } 306 return -1000; 307 } 308 309 size_t pmu_events_table__num_events(const struct pmu_events_table *table, 310 struct perf_pmu *pmu) 311 { 312 size_t count = 0; 313 314 for (const struct pmu_event *pe = &table->entries[0]; pe->name; pe++) { 315 if (pmu && !pmu__name_match(pmu, pe->pmu)) 316 continue; 317 318 count++; 319 } 320 return count; 321 } 322 323 int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table, pmu_metric_iter_fn fn, 324 void *data) 325 { 326 for (const struct pmu_metric *pm = &table->entries[0]; pm->metric_expr; pm++) { 327 int ret = fn(pm, table, data); 328 329 if (ret) 330 return ret; 331 } 332 return 0; 333 } 334 335 const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu) 336 { 337 const struct pmu_events_table *table = NULL; 338 char *cpuid = perf_pmu__getcpuid(pmu); 339 int i; 340 341 /* on some platforms which uses cpus map, cpuid can be NULL for 342 * PMUs other than CORE PMUs. 343 */ 344 if (!cpuid) 345 return NULL; 346 347 i = 0; 348 for (;;) { 349 const struct pmu_events_map *map = &pmu_events_map[i++]; 350 351 if (!map->cpuid) 352 break; 353 354 if (!strcmp_cpuid_str(map->cpuid, cpuid)) { 355 table = &map->event_table; 356 break; 357 } 358 } 359 free(cpuid); 360 return table; 361 } 362 363 const struct pmu_metrics_table *perf_pmu__find_metrics_table(struct perf_pmu *pmu) 364 { 365 const struct pmu_metrics_table *table = NULL; 366 char *cpuid = perf_pmu__getcpuid(pmu); 367 int i; 368 369 /* on some platforms which uses cpus map, cpuid can be NULL for 370 * PMUs other than CORE PMUs. 371 */ 372 if (!cpuid) 373 return NULL; 374 375 i = 0; 376 for (;;) { 377 const struct pmu_events_map *map = &pmu_events_map[i++]; 378 379 if (!map->cpuid) 380 break; 381 382 if (!strcmp_cpuid_str(map->cpuid, cpuid)) { 383 table = &map->metric_table; 384 break; 385 } 386 } 387 free(cpuid); 388 return table; 389 } 390 391 const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid) 392 { 393 for (const struct pmu_events_map *tables = &pmu_events_map[0]; 394 tables->arch; 395 tables++) { 396 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid)) 397 return &tables->event_table; 398 } 399 return NULL; 400 } 401 402 const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid) 403 { 404 for (const struct pmu_events_map *tables = &pmu_events_map[0]; 405 tables->arch; 406 tables++) { 407 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid)) 408 return &tables->metric_table; 409 } 410 return NULL; 411 } 412 413 int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data) 414 { 415 for (const struct pmu_events_map *tables = &pmu_events_map[0]; tables->arch; tables++) { 416 int ret = pmu_events_table__for_each_event(&tables->event_table, 417 /*pmu=*/ NULL, fn, data); 418 419 if (ret) 420 return ret; 421 } 422 return 0; 423 } 424 425 int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data) 426 { 427 for (const struct pmu_events_map *tables = &pmu_events_map[0]; 428 tables->arch; 429 tables++) { 430 int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data); 431 432 if (ret) 433 return ret; 434 } 435 return 0; 436 } 437 438 const struct pmu_events_table *find_sys_events_table(const char *name) 439 { 440 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0]; 441 tables->name; 442 tables++) { 443 if (!strcmp(tables->name, name)) 444 return &tables->table; 445 } 446 return NULL; 447 } 448 449 int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data) 450 { 451 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0]; 452 tables->name; 453 tables++) { 454 int ret = pmu_events_table__for_each_event(&tables->table, /*pmu=*/ NULL, fn, data); 455 456 if (ret) 457 return ret; 458 } 459 return 0; 460 } 461 462 int pmu_for_each_sys_metric(pmu_metric_iter_fn fn __maybe_unused, void *data __maybe_unused) 463 { 464 return 0; 465 } 466 467 const char *describe_metricgroup(const char *group __maybe_unused) 468 { 469 return NULL; 470 } 471