1 // SPDX-License-Identifier: GPL-2.0 2 #include <math.h> 3 #include <stdio.h> 4 #include "evsel.h" 5 #include "stat.h" 6 #include "color.h" 7 #include "debug.h" 8 #include "pmu.h" 9 #include "rblist.h" 10 #include "evlist.h" 11 #include "expr.h" 12 #include "metricgroup.h" 13 #include "cgroup.h" 14 #include "units.h" 15 #include <linux/zalloc.h> 16 #include "iostat.h" 17 #include "util/hashmap.h" 18 #include "tool_pmu.h" 19 20 struct stats walltime_nsecs_stats; 21 struct rusage_stats ru_stats; 22 23 enum { 24 CTX_BIT_USER = 1 << 0, 25 CTX_BIT_KERNEL = 1 << 1, 26 CTX_BIT_HV = 1 << 2, 27 CTX_BIT_HOST = 1 << 3, 28 CTX_BIT_IDLE = 1 << 4, 29 CTX_BIT_MAX = 1 << 5, 30 }; 31 32 enum stat_type { 33 STAT_NONE = 0, 34 STAT_NSECS, 35 STAT_CYCLES, 36 STAT_INSTRUCTIONS, 37 STAT_STALLED_CYCLES_FRONT, 38 STAT_STALLED_CYCLES_BACK, 39 STAT_BRANCHES, 40 STAT_BRANCH_MISS, 41 STAT_CACHE_REFS, 42 STAT_CACHE_MISSES, 43 STAT_L1_DCACHE, 44 STAT_L1_ICACHE, 45 STAT_LL_CACHE, 46 STAT_ITLB_CACHE, 47 STAT_DTLB_CACHE, 48 STAT_L1D_MISS, 49 STAT_L1I_MISS, 50 STAT_LL_MISS, 51 STAT_DTLB_MISS, 52 STAT_ITLB_MISS, 53 STAT_MAX 54 }; 55 56 static int evsel_context(const struct evsel *evsel) 57 { 58 int ctx = 0; 59 60 if (evsel->core.attr.exclude_kernel) 61 ctx |= CTX_BIT_KERNEL; 62 if (evsel->core.attr.exclude_user) 63 ctx |= CTX_BIT_USER; 64 if (evsel->core.attr.exclude_hv) 65 ctx |= CTX_BIT_HV; 66 if (evsel->core.attr.exclude_host) 67 ctx |= CTX_BIT_HOST; 68 if (evsel->core.attr.exclude_idle) 69 ctx |= CTX_BIT_IDLE; 70 71 return ctx; 72 } 73 74 void perf_stat__reset_shadow_stats(void) 75 { 76 memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats)); 77 memset(&ru_stats, 0, sizeof(ru_stats)); 78 } 79 80 static enum stat_type evsel__stat_type(struct evsel *evsel) 81 { 82 /* Fake perf_hw_cache_op_id values for use with evsel__match. */ 83 u64 PERF_COUNT_hw_cache_l1d_miss = PERF_COUNT_HW_CACHE_L1D | 84 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 85 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16); 86 u64 PERF_COUNT_hw_cache_l1i_miss = PERF_COUNT_HW_CACHE_L1I | 87 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 88 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16); 89 u64 PERF_COUNT_hw_cache_ll_miss = PERF_COUNT_HW_CACHE_LL | 90 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 91 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16); 92 u64 PERF_COUNT_hw_cache_dtlb_miss = PERF_COUNT_HW_CACHE_DTLB | 93 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 94 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16); 95 u64 PERF_COUNT_hw_cache_itlb_miss = PERF_COUNT_HW_CACHE_ITLB | 96 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 97 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16); 98 99 if (evsel__is_clock(evsel)) 100 return STAT_NSECS; 101 else if (evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) 102 return STAT_CYCLES; 103 else if (evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) 104 return STAT_INSTRUCTIONS; 105 else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) 106 return STAT_STALLED_CYCLES_FRONT; 107 else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) 108 return STAT_STALLED_CYCLES_BACK; 109 else if (evsel__match(evsel, HARDWARE, HW_BRANCH_INSTRUCTIONS)) 110 return STAT_BRANCHES; 111 else if (evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) 112 return STAT_BRANCH_MISS; 113 else if (evsel__match(evsel, HARDWARE, HW_CACHE_REFERENCES)) 114 return STAT_CACHE_REFS; 115 else if (evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) 116 return STAT_CACHE_MISSES; 117 else if (evsel__match(evsel, HW_CACHE, HW_CACHE_L1D)) 118 return STAT_L1_DCACHE; 119 else if (evsel__match(evsel, HW_CACHE, HW_CACHE_L1I)) 120 return STAT_L1_ICACHE; 121 else if (evsel__match(evsel, HW_CACHE, HW_CACHE_LL)) 122 return STAT_LL_CACHE; 123 else if (evsel__match(evsel, HW_CACHE, HW_CACHE_DTLB)) 124 return STAT_DTLB_CACHE; 125 else if (evsel__match(evsel, HW_CACHE, HW_CACHE_ITLB)) 126 return STAT_ITLB_CACHE; 127 else if (evsel__match(evsel, HW_CACHE, hw_cache_l1d_miss)) 128 return STAT_L1D_MISS; 129 else if (evsel__match(evsel, HW_CACHE, hw_cache_l1i_miss)) 130 return STAT_L1I_MISS; 131 else if (evsel__match(evsel, HW_CACHE, hw_cache_ll_miss)) 132 return STAT_LL_MISS; 133 else if (evsel__match(evsel, HW_CACHE, hw_cache_dtlb_miss)) 134 return STAT_DTLB_MISS; 135 else if (evsel__match(evsel, HW_CACHE, hw_cache_itlb_miss)) 136 return STAT_ITLB_MISS; 137 return STAT_NONE; 138 } 139 140 static enum metric_threshold_classify get_ratio_thresh(const double ratios[3], double val) 141 { 142 assert(ratios[0] > ratios[1]); 143 assert(ratios[1] > ratios[2]); 144 145 return val > ratios[1] 146 ? (val > ratios[0] ? METRIC_THRESHOLD_BAD : METRIC_THRESHOLD_NEARLY_BAD) 147 : (val > ratios[2] ? METRIC_THRESHOLD_LESS_GOOD : METRIC_THRESHOLD_GOOD); 148 } 149 150 static double find_stat(const struct evsel *evsel, int aggr_idx, enum stat_type type) 151 { 152 struct evsel *cur; 153 int evsel_ctx = evsel_context(evsel); 154 155 evlist__for_each_entry(evsel->evlist, cur) { 156 struct perf_stat_aggr *aggr; 157 158 /* Ignore the evsel that is being searched from. */ 159 if (evsel == cur) 160 continue; 161 162 /* Ignore evsels that are part of different groups. */ 163 if (evsel->core.leader->nr_members > 1 && 164 evsel->core.leader != cur->core.leader) 165 continue; 166 /* Ignore evsels with mismatched modifiers. */ 167 if (evsel_ctx != evsel_context(cur)) 168 continue; 169 /* Ignore if not the cgroup we're looking for. */ 170 if (evsel->cgrp != cur->cgrp) 171 continue; 172 /* Ignore if not the stat we're looking for. */ 173 if (type != evsel__stat_type(cur)) 174 continue; 175 176 /* 177 * Except the SW CLOCK events, 178 * ignore if not the PMU we're looking for. 179 */ 180 if ((type != STAT_NSECS) && (evsel->pmu != cur->pmu)) 181 continue; 182 183 aggr = &cur->stats->aggr[aggr_idx]; 184 if (type == STAT_NSECS) 185 return aggr->counts.val; 186 return aggr->counts.val * cur->scale; 187 } 188 return 0.0; 189 } 190 191 static void print_ratio(struct perf_stat_config *config, 192 const struct evsel *evsel, int aggr_idx, 193 double numerator, struct perf_stat_output_ctx *out, 194 enum stat_type denominator_type, 195 const double thresh_ratios[3], const char *_unit) 196 { 197 double denominator = find_stat(evsel, aggr_idx, denominator_type); 198 double ratio = 0; 199 enum metric_threshold_classify thresh = METRIC_THRESHOLD_UNKNOWN; 200 const char *fmt = NULL; 201 const char *unit = NULL; 202 203 if (numerator && denominator) { 204 ratio = numerator / denominator * 100.0; 205 thresh = get_ratio_thresh(thresh_ratios, ratio); 206 fmt = "%7.2f%%"; 207 unit = _unit; 208 } 209 out->print_metric(config, out->ctx, thresh, fmt, unit, ratio); 210 } 211 212 static void print_stalled_cycles_front(struct perf_stat_config *config, 213 const struct evsel *evsel, 214 int aggr_idx, double stalled, 215 struct perf_stat_output_ctx *out) 216 { 217 const double thresh_ratios[3] = {50.0, 30.0, 10.0}; 218 219 print_ratio(config, evsel, aggr_idx, stalled, out, STAT_CYCLES, thresh_ratios, 220 "frontend cycles idle"); 221 } 222 223 static void print_stalled_cycles_back(struct perf_stat_config *config, 224 const struct evsel *evsel, 225 int aggr_idx, double stalled, 226 struct perf_stat_output_ctx *out) 227 { 228 const double thresh_ratios[3] = {75.0, 50.0, 20.0}; 229 230 print_ratio(config, evsel, aggr_idx, stalled, out, STAT_CYCLES, thresh_ratios, 231 "backend cycles idle"); 232 } 233 234 static void print_branch_miss(struct perf_stat_config *config, 235 const struct evsel *evsel, 236 int aggr_idx, double misses, 237 struct perf_stat_output_ctx *out) 238 { 239 const double thresh_ratios[3] = {20.0, 10.0, 5.0}; 240 241 print_ratio(config, evsel, aggr_idx, misses, out, STAT_BRANCHES, thresh_ratios, 242 "of all branches"); 243 } 244 245 static void print_l1d_miss(struct perf_stat_config *config, 246 const struct evsel *evsel, 247 int aggr_idx, double misses, 248 struct perf_stat_output_ctx *out) 249 { 250 const double thresh_ratios[3] = {20.0, 10.0, 5.0}; 251 252 print_ratio(config, evsel, aggr_idx, misses, out, STAT_L1_DCACHE, thresh_ratios, 253 "of all L1-dcache accesses"); 254 } 255 256 static void print_l1i_miss(struct perf_stat_config *config, 257 const struct evsel *evsel, 258 int aggr_idx, double misses, 259 struct perf_stat_output_ctx *out) 260 { 261 const double thresh_ratios[3] = {20.0, 10.0, 5.0}; 262 263 print_ratio(config, evsel, aggr_idx, misses, out, STAT_L1_ICACHE, thresh_ratios, 264 "of all L1-icache accesses"); 265 } 266 267 static void print_ll_miss(struct perf_stat_config *config, 268 const struct evsel *evsel, 269 int aggr_idx, double misses, 270 struct perf_stat_output_ctx *out) 271 { 272 const double thresh_ratios[3] = {20.0, 10.0, 5.0}; 273 274 print_ratio(config, evsel, aggr_idx, misses, out, STAT_LL_CACHE, thresh_ratios, 275 "of all LL-cache accesses"); 276 } 277 278 static void print_dtlb_miss(struct perf_stat_config *config, 279 const struct evsel *evsel, 280 int aggr_idx, double misses, 281 struct perf_stat_output_ctx *out) 282 { 283 const double thresh_ratios[3] = {20.0, 10.0, 5.0}; 284 285 print_ratio(config, evsel, aggr_idx, misses, out, STAT_DTLB_CACHE, thresh_ratios, 286 "of all dTLB cache accesses"); 287 } 288 289 static void print_itlb_miss(struct perf_stat_config *config, 290 const struct evsel *evsel, 291 int aggr_idx, double misses, 292 struct perf_stat_output_ctx *out) 293 { 294 const double thresh_ratios[3] = {20.0, 10.0, 5.0}; 295 296 print_ratio(config, evsel, aggr_idx, misses, out, STAT_ITLB_CACHE, thresh_ratios, 297 "of all iTLB cache accesses"); 298 } 299 300 static void print_cache_miss(struct perf_stat_config *config, 301 const struct evsel *evsel, 302 int aggr_idx, double misses, 303 struct perf_stat_output_ctx *out) 304 { 305 const double thresh_ratios[3] = {20.0, 10.0, 5.0}; 306 307 print_ratio(config, evsel, aggr_idx, misses, out, STAT_CACHE_REFS, thresh_ratios, 308 "of all cache refs"); 309 } 310 311 static void print_instructions(struct perf_stat_config *config, 312 const struct evsel *evsel, 313 int aggr_idx, double instructions, 314 struct perf_stat_output_ctx *out) 315 { 316 print_metric_t print_metric = out->print_metric; 317 void *ctxp = out->ctx; 318 double cycles = find_stat(evsel, aggr_idx, STAT_CYCLES); 319 double max_stalled = max(find_stat(evsel, aggr_idx, STAT_STALLED_CYCLES_FRONT), 320 find_stat(evsel, aggr_idx, STAT_STALLED_CYCLES_BACK)); 321 322 if (cycles) { 323 print_metric(config, ctxp, METRIC_THRESHOLD_UNKNOWN, "%7.2f ", 324 "insn per cycle", instructions / cycles); 325 } else { 326 print_metric(config, ctxp, METRIC_THRESHOLD_UNKNOWN, /*fmt=*/NULL, 327 "insn per cycle", 0); 328 } 329 if (max_stalled && instructions) { 330 out->new_line(config, ctxp); 331 print_metric(config, ctxp, METRIC_THRESHOLD_UNKNOWN, "%7.2f ", 332 "stalled cycles per insn", max_stalled / instructions); 333 } 334 } 335 336 static void print_cycles(struct perf_stat_config *config, 337 const struct evsel *evsel, 338 int aggr_idx, double cycles, 339 struct perf_stat_output_ctx *out) 340 { 341 double nsecs = find_stat(evsel, aggr_idx, STAT_NSECS); 342 343 if (cycles && nsecs) { 344 double ratio = cycles / nsecs; 345 346 out->print_metric(config, out->ctx, METRIC_THRESHOLD_UNKNOWN, "%8.3f", 347 "GHz", ratio); 348 } else { 349 out->print_metric(config, out->ctx, METRIC_THRESHOLD_UNKNOWN, /*fmt=*/NULL, 350 "GHz", 0); 351 } 352 } 353 354 static void print_nsecs(struct perf_stat_config *config, 355 const struct evsel *evsel, 356 int aggr_idx __maybe_unused, double nsecs, 357 struct perf_stat_output_ctx *out) 358 { 359 print_metric_t print_metric = out->print_metric; 360 void *ctxp = out->ctx; 361 double wall_time = avg_stats(&walltime_nsecs_stats); 362 363 if (wall_time) { 364 print_metric(config, ctxp, METRIC_THRESHOLD_UNKNOWN, "%8.3f", "CPUs utilized", 365 nsecs / (wall_time * evsel->scale)); 366 } else { 367 print_metric(config, ctxp, METRIC_THRESHOLD_UNKNOWN, /*fmt=*/NULL, 368 "CPUs utilized", 0); 369 } 370 } 371 372 static int prepare_metric(const struct metric_expr *mexp, 373 const struct evsel *evsel, 374 struct expr_parse_ctx *pctx, 375 int aggr_idx) 376 { 377 struct evsel * const *metric_events = mexp->metric_events; 378 struct metric_ref *metric_refs = mexp->metric_refs; 379 int i; 380 381 for (i = 0; metric_events[i]; i++) { 382 char *n; 383 double val; 384 int source_count = 0; 385 386 if (evsel__is_tool(metric_events[i])) { 387 struct stats *stats; 388 double scale; 389 390 switch (evsel__tool_event(metric_events[i])) { 391 case TOOL_PMU__EVENT_DURATION_TIME: 392 stats = &walltime_nsecs_stats; 393 scale = 1e-9; 394 break; 395 case TOOL_PMU__EVENT_USER_TIME: 396 stats = &ru_stats.ru_utime_usec_stat; 397 scale = 1e-6; 398 break; 399 case TOOL_PMU__EVENT_SYSTEM_TIME: 400 stats = &ru_stats.ru_stime_usec_stat; 401 scale = 1e-6; 402 break; 403 case TOOL_PMU__EVENT_NONE: 404 pr_err("Invalid tool event 'none'"); 405 abort(); 406 case TOOL_PMU__EVENT_MAX: 407 pr_err("Invalid tool event 'max'"); 408 abort(); 409 case TOOL_PMU__EVENT_HAS_PMEM: 410 case TOOL_PMU__EVENT_NUM_CORES: 411 case TOOL_PMU__EVENT_NUM_CPUS: 412 case TOOL_PMU__EVENT_NUM_CPUS_ONLINE: 413 case TOOL_PMU__EVENT_NUM_DIES: 414 case TOOL_PMU__EVENT_NUM_PACKAGES: 415 case TOOL_PMU__EVENT_SLOTS: 416 case TOOL_PMU__EVENT_SMT_ON: 417 case TOOL_PMU__EVENT_SYSTEM_TSC_FREQ: 418 default: 419 pr_err("Unexpected tool event '%s'", evsel__name(metric_events[i])); 420 abort(); 421 } 422 val = avg_stats(stats) * scale; 423 source_count = 1; 424 } else { 425 struct perf_stat_evsel *ps = metric_events[i]->stats; 426 struct perf_stat_aggr *aggr; 427 428 /* 429 * If there are multiple uncore PMUs and we're not 430 * reading the leader's stats, determine the stats for 431 * the appropriate uncore PMU. 432 */ 433 if (evsel && evsel->metric_leader && 434 evsel->pmu != evsel->metric_leader->pmu && 435 mexp->metric_events[i]->pmu == evsel->metric_leader->pmu) { 436 struct evsel *pos; 437 438 evlist__for_each_entry(evsel->evlist, pos) { 439 if (pos->pmu != evsel->pmu) 440 continue; 441 if (pos->metric_leader != mexp->metric_events[i]) 442 continue; 443 ps = pos->stats; 444 source_count = 1; 445 break; 446 } 447 } 448 aggr = &ps->aggr[aggr_idx]; 449 if (!aggr) 450 break; 451 452 if (!metric_events[i]->supported) { 453 /* 454 * Not supported events will have a count of 0, 455 * which can be confusing in a 456 * metric. Explicitly set the value to NAN. Not 457 * counted events (enable time of 0) are read as 458 * 0. 459 */ 460 val = NAN; 461 source_count = 0; 462 } else { 463 val = aggr->counts.val; 464 if (!source_count) 465 source_count = evsel__source_count(metric_events[i]); 466 } 467 } 468 n = strdup(evsel__metric_id(metric_events[i])); 469 if (!n) 470 return -ENOMEM; 471 472 expr__add_id_val_source_count(pctx, n, val, source_count); 473 } 474 475 for (int j = 0; metric_refs && metric_refs[j].metric_name; j++) { 476 int ret = expr__add_ref(pctx, &metric_refs[j]); 477 478 if (ret) 479 return ret; 480 } 481 482 return i; 483 } 484 485 static void generic_metric(struct perf_stat_config *config, 486 struct metric_expr *mexp, 487 struct evsel *evsel, 488 int aggr_idx, 489 struct perf_stat_output_ctx *out) 490 { 491 print_metric_t print_metric = out->print_metric; 492 const char *metric_name = mexp->metric_name; 493 const char *metric_expr = mexp->metric_expr; 494 const char *metric_threshold = mexp->metric_threshold; 495 const char *metric_unit = mexp->metric_unit; 496 struct evsel * const *metric_events = mexp->metric_events; 497 int runtime = mexp->runtime; 498 struct expr_parse_ctx *pctx; 499 double ratio, scale, threshold; 500 int i; 501 void *ctxp = out->ctx; 502 enum metric_threshold_classify thresh = METRIC_THRESHOLD_UNKNOWN; 503 504 pctx = expr__ctx_new(); 505 if (!pctx) 506 return; 507 508 if (config->user_requested_cpu_list) 509 pctx->sctx.user_requested_cpu_list = strdup(config->user_requested_cpu_list); 510 pctx->sctx.runtime = runtime; 511 pctx->sctx.system_wide = config->system_wide; 512 i = prepare_metric(mexp, evsel, pctx, aggr_idx); 513 if (i < 0) { 514 expr__ctx_free(pctx); 515 return; 516 } 517 if (!metric_events[i]) { 518 if (expr__parse(&ratio, pctx, metric_expr) == 0) { 519 char *unit; 520 char metric_bf[128]; 521 522 if (metric_threshold && 523 expr__parse(&threshold, pctx, metric_threshold) == 0 && 524 !isnan(threshold)) { 525 thresh = fpclassify(threshold) == FP_ZERO 526 ? METRIC_THRESHOLD_GOOD : METRIC_THRESHOLD_BAD; 527 } 528 529 if (metric_unit && metric_name) { 530 if (perf_pmu__convert_scale(metric_unit, 531 &unit, &scale) >= 0) { 532 ratio *= scale; 533 } 534 if (strstr(metric_expr, "?")) 535 scnprintf(metric_bf, sizeof(metric_bf), 536 "%s %s_%d", unit, metric_name, runtime); 537 else 538 scnprintf(metric_bf, sizeof(metric_bf), 539 "%s %s", unit, metric_name); 540 541 print_metric(config, ctxp, thresh, "%8.1f", 542 metric_bf, ratio); 543 } else { 544 print_metric(config, ctxp, thresh, "%8.2f", 545 metric_name ? 546 metric_name : 547 out->force_header ? evsel->name : "", 548 ratio); 549 } 550 } else { 551 print_metric(config, ctxp, thresh, /*fmt=*/NULL, 552 out->force_header ? 553 (metric_name ?: evsel->name) : "", 0); 554 } 555 } else { 556 print_metric(config, ctxp, thresh, /*fmt=*/NULL, 557 out->force_header ? 558 (metric_name ?: evsel->name) : "", 0); 559 } 560 561 expr__ctx_free(pctx); 562 } 563 564 double test_generic_metric(struct metric_expr *mexp, int aggr_idx) 565 { 566 struct expr_parse_ctx *pctx; 567 double ratio = 0.0; 568 569 pctx = expr__ctx_new(); 570 if (!pctx) 571 return NAN; 572 573 if (prepare_metric(mexp, /*evsel=*/NULL, pctx, aggr_idx) < 0) 574 goto out; 575 576 if (expr__parse(&ratio, pctx, mexp->metric_expr)) 577 ratio = 0.0; 578 579 out: 580 expr__ctx_free(pctx); 581 return ratio; 582 } 583 584 static void perf_stat__print_metricgroup_header(struct perf_stat_config *config, 585 struct evsel *evsel, 586 void *ctxp, 587 const char *name, 588 struct perf_stat_output_ctx *out) 589 { 590 bool need_full_name = perf_pmus__num_core_pmus() > 1; 591 static const char *last_name; 592 static const struct perf_pmu *last_pmu; 593 char full_name[64]; 594 595 /* 596 * A metricgroup may have several metric events, 597 * e.g.,TopdownL1 on e-core of ADL. 598 * The name has been output by the first metric 599 * event. Only align with other metics from 600 * different metric events. 601 */ 602 if (last_name && !strcmp(last_name, name)) { 603 if (!need_full_name || last_pmu != evsel->pmu) { 604 out->print_metricgroup_header(config, ctxp, NULL); 605 return; 606 } 607 } 608 609 if (need_full_name && evsel->pmu) 610 scnprintf(full_name, sizeof(full_name), "%s (%s)", name, evsel->pmu->name); 611 else 612 scnprintf(full_name, sizeof(full_name), "%s", name); 613 614 out->print_metricgroup_header(config, ctxp, full_name); 615 616 last_name = name; 617 last_pmu = evsel->pmu; 618 } 619 620 /** 621 * perf_stat__print_shadow_stats_metricgroup - Print out metrics associated with the evsel 622 * For the non-default, all metrics associated 623 * with the evsel are printed. 624 * For the default mode, only the metrics from 625 * the same metricgroup and the name of the 626 * metricgroup are printed. To print the metrics 627 * from the next metricgroup (if available), 628 * invoke the function with correspoinding 629 * metric_expr. 630 */ 631 void *perf_stat__print_shadow_stats_metricgroup(struct perf_stat_config *config, 632 struct evsel *evsel, 633 int aggr_idx, 634 int *num, 635 void *from, 636 struct perf_stat_output_ctx *out, 637 struct rblist *metric_events) 638 { 639 struct metric_event *me; 640 struct metric_expr *mexp = from; 641 void *ctxp = out->ctx; 642 bool header_printed = false; 643 const char *name = NULL; 644 645 me = metricgroup__lookup(metric_events, evsel, false); 646 if (me == NULL) 647 return NULL; 648 649 if (!mexp) 650 mexp = list_first_entry(&me->head, typeof(*mexp), nd); 651 652 list_for_each_entry_from(mexp, &me->head, nd) { 653 /* Print the display name of the Default metricgroup */ 654 if (!config->metric_only && me->is_default) { 655 if (!name) 656 name = mexp->default_metricgroup_name; 657 /* 658 * Two or more metricgroup may share the same metric 659 * event, e.g., TopdownL1 and TopdownL2 on SPR. 660 * Return and print the prefix, e.g., noise, running 661 * for the next metricgroup. 662 */ 663 if (strcmp(name, mexp->default_metricgroup_name)) 664 return (void *)mexp; 665 /* Only print the name of the metricgroup once */ 666 if (!header_printed) { 667 header_printed = true; 668 perf_stat__print_metricgroup_header(config, evsel, ctxp, 669 name, out); 670 } 671 } 672 673 if ((*num)++ > 0) 674 out->new_line(config, ctxp); 675 generic_metric(config, mexp, evsel, aggr_idx, out); 676 } 677 678 return NULL; 679 } 680 681 void perf_stat__print_shadow_stats(struct perf_stat_config *config, 682 struct evsel *evsel, 683 double avg, int aggr_idx, 684 struct perf_stat_output_ctx *out, 685 struct rblist *metric_events) 686 { 687 typedef void (*stat_print_function_t)(struct perf_stat_config *config, 688 const struct evsel *evsel, 689 int aggr_idx, double misses, 690 struct perf_stat_output_ctx *out); 691 static const stat_print_function_t stat_print_function[STAT_MAX] = { 692 [STAT_INSTRUCTIONS] = print_instructions, 693 [STAT_BRANCH_MISS] = print_branch_miss, 694 [STAT_L1D_MISS] = print_l1d_miss, 695 [STAT_L1I_MISS] = print_l1i_miss, 696 [STAT_DTLB_MISS] = print_dtlb_miss, 697 [STAT_ITLB_MISS] = print_itlb_miss, 698 [STAT_LL_MISS] = print_ll_miss, 699 [STAT_CACHE_MISSES] = print_cache_miss, 700 [STAT_STALLED_CYCLES_FRONT] = print_stalled_cycles_front, 701 [STAT_STALLED_CYCLES_BACK] = print_stalled_cycles_back, 702 [STAT_CYCLES] = print_cycles, 703 [STAT_NSECS] = print_nsecs, 704 }; 705 print_metric_t print_metric = out->print_metric; 706 void *ctxp = out->ctx; 707 int num = 1; 708 709 if (config->iostat_run) { 710 iostat_print_metric(config, evsel, out); 711 } else { 712 stat_print_function_t fn = stat_print_function[evsel__stat_type(evsel)]; 713 714 if (fn) 715 fn(config, evsel, aggr_idx, avg, out); 716 else { 717 double nsecs = find_stat(evsel, aggr_idx, STAT_NSECS); 718 719 if (nsecs) { 720 char unit = ' '; 721 char unit_buf[10] = "/sec"; 722 double ratio = convert_unit_double(1000000000.0 * avg / nsecs, 723 &unit); 724 725 if (unit != ' ') 726 snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit); 727 print_metric(config, ctxp, METRIC_THRESHOLD_UNKNOWN, "%8.3f", 728 unit_buf, ratio); 729 } else { 730 num = 0; 731 } 732 } 733 } 734 735 perf_stat__print_shadow_stats_metricgroup(config, evsel, aggr_idx, 736 &num, NULL, out, metric_events); 737 738 if (num == 0) { 739 print_metric(config, ctxp, METRIC_THRESHOLD_UNKNOWN, 740 /*fmt=*/NULL, /*unit=*/NULL, 0); 741 } 742 } 743 744 /** 745 * perf_stat__skip_metric_event - Skip the evsel in the Default metricgroup, 746 * if it's not running or not the metric event. 747 */ 748 bool perf_stat__skip_metric_event(struct evsel *evsel, 749 struct rblist *metric_events, 750 u64 ena, u64 run) 751 { 752 if (!evsel->default_metricgroup) 753 return false; 754 755 if (!ena || !run) 756 return true; 757 758 return !metricgroup__lookup(metric_events, evsel, false); 759 } 760