1 // SPDX-License-Identifier: GPL-2.0 2 #include <math.h> 3 #include <stdio.h> 4 #include "evsel.h" 5 #include "stat.h" 6 #include "color.h" 7 #include "debug.h" 8 #include "pmu.h" 9 #include "rblist.h" 10 #include "evlist.h" 11 #include "expr.h" 12 #include "metricgroup.h" 13 #include "cgroup.h" 14 #include "units.h" 15 #include <linux/zalloc.h> 16 #include "iostat.h" 17 #include "util/hashmap.h" 18 #include "tool_pmu.h" 19 20 struct stats walltime_nsecs_stats; 21 struct rusage_stats ru_stats; 22 23 enum { 24 CTX_BIT_USER = 1 << 0, 25 CTX_BIT_KERNEL = 1 << 1, 26 CTX_BIT_HV = 1 << 2, 27 CTX_BIT_HOST = 1 << 3, 28 CTX_BIT_IDLE = 1 << 4, 29 CTX_BIT_MAX = 1 << 5, 30 }; 31 32 enum stat_type { 33 STAT_NONE = 0, 34 STAT_NSECS, 35 STAT_CYCLES, 36 STAT_INSTRUCTIONS, 37 STAT_STALLED_CYCLES_FRONT, 38 STAT_STALLED_CYCLES_BACK, 39 STAT_BRANCHES, 40 STAT_BRANCH_MISS, 41 STAT_CACHE_REFS, 42 STAT_CACHE_MISSES, 43 STAT_L1_DCACHE, 44 STAT_L1_ICACHE, 45 STAT_LL_CACHE, 46 STAT_ITLB_CACHE, 47 STAT_DTLB_CACHE, 48 STAT_L1D_MISS, 49 STAT_L1I_MISS, 50 STAT_LL_MISS, 51 STAT_DTLB_MISS, 52 STAT_ITLB_MISS, 53 STAT_MAX 54 }; 55 56 static int evsel_context(const struct evsel *evsel) 57 { 58 int ctx = 0; 59 60 if (evsel->core.attr.exclude_kernel) 61 ctx |= CTX_BIT_KERNEL; 62 if (evsel->core.attr.exclude_user) 63 ctx |= CTX_BIT_USER; 64 if (evsel->core.attr.exclude_hv) 65 ctx |= CTX_BIT_HV; 66 if (evsel->core.attr.exclude_host) 67 ctx |= CTX_BIT_HOST; 68 if (evsel->core.attr.exclude_idle) 69 ctx |= CTX_BIT_IDLE; 70 71 return ctx; 72 } 73 74 void perf_stat__reset_shadow_stats(void) 75 { 76 memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats)); 77 memset(&ru_stats, 0, sizeof(ru_stats)); 78 } 79 80 static enum stat_type evsel__stat_type(struct evsel *evsel) 81 { 82 /* Fake perf_hw_cache_op_id values for use with evsel__match. */ 83 u64 PERF_COUNT_hw_cache_l1d_miss = PERF_COUNT_HW_CACHE_L1D | 84 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 85 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16); 86 u64 PERF_COUNT_hw_cache_l1i_miss = PERF_COUNT_HW_CACHE_L1I | 87 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 88 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16); 89 u64 PERF_COUNT_hw_cache_ll_miss = PERF_COUNT_HW_CACHE_LL | 90 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 91 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16); 92 u64 PERF_COUNT_hw_cache_dtlb_miss = PERF_COUNT_HW_CACHE_DTLB | 93 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 94 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16); 95 u64 PERF_COUNT_hw_cache_itlb_miss = PERF_COUNT_HW_CACHE_ITLB | 96 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 97 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16); 98 99 if (evsel__is_clock(evsel)) 100 return STAT_NSECS; 101 else if (evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) 102 return STAT_CYCLES; 103 else if (evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) 104 return STAT_INSTRUCTIONS; 105 else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) 106 return STAT_STALLED_CYCLES_FRONT; 107 else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) 108 return STAT_STALLED_CYCLES_BACK; 109 else if (evsel__match(evsel, HARDWARE, HW_BRANCH_INSTRUCTIONS)) 110 return STAT_BRANCHES; 111 else if (evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) 112 return STAT_BRANCH_MISS; 113 else if (evsel__match(evsel, HARDWARE, HW_CACHE_REFERENCES)) 114 return STAT_CACHE_REFS; 115 else if (evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) 116 return STAT_CACHE_MISSES; 117 else if (evsel__match(evsel, HW_CACHE, HW_CACHE_L1D)) 118 return STAT_L1_DCACHE; 119 else if (evsel__match(evsel, HW_CACHE, HW_CACHE_L1I)) 120 return STAT_L1_ICACHE; 121 else if (evsel__match(evsel, HW_CACHE, HW_CACHE_LL)) 122 return STAT_LL_CACHE; 123 else if (evsel__match(evsel, HW_CACHE, HW_CACHE_DTLB)) 124 return STAT_DTLB_CACHE; 125 else if (evsel__match(evsel, HW_CACHE, HW_CACHE_ITLB)) 126 return STAT_ITLB_CACHE; 127 else if (evsel__match(evsel, HW_CACHE, hw_cache_l1d_miss)) 128 return STAT_L1D_MISS; 129 else if (evsel__match(evsel, HW_CACHE, hw_cache_l1i_miss)) 130 return STAT_L1I_MISS; 131 else if (evsel__match(evsel, HW_CACHE, hw_cache_ll_miss)) 132 return STAT_LL_MISS; 133 else if (evsel__match(evsel, HW_CACHE, hw_cache_dtlb_miss)) 134 return STAT_DTLB_MISS; 135 else if (evsel__match(evsel, HW_CACHE, hw_cache_itlb_miss)) 136 return STAT_ITLB_MISS; 137 return STAT_NONE; 138 } 139 140 static enum metric_threshold_classify get_ratio_thresh(const double ratios[3], double val) 141 { 142 assert(ratios[0] > ratios[1]); 143 assert(ratios[1] > ratios[2]); 144 145 return val > ratios[1] 146 ? (val > ratios[0] ? METRIC_THRESHOLD_BAD : METRIC_THRESHOLD_NEARLY_BAD) 147 : (val > ratios[2] ? METRIC_THRESHOLD_LESS_GOOD : METRIC_THRESHOLD_GOOD); 148 } 149 150 static double find_stat(const struct evsel *evsel, int aggr_idx, enum stat_type type) 151 { 152 struct evsel *cur; 153 int evsel_ctx = evsel_context(evsel); 154 struct perf_pmu *evsel_pmu = evsel__find_pmu(evsel); 155 156 evlist__for_each_entry(evsel->evlist, cur) { 157 struct perf_stat_aggr *aggr; 158 159 /* Ignore the evsel that is being searched from. */ 160 if (evsel == cur) 161 continue; 162 163 /* Ignore evsels that are part of different groups. */ 164 if (evsel->core.leader->nr_members > 1 && 165 evsel->core.leader != cur->core.leader) 166 continue; 167 /* Ignore evsels with mismatched modifiers. */ 168 if (evsel_ctx != evsel_context(cur)) 169 continue; 170 /* Ignore if not the cgroup we're looking for. */ 171 if (evsel->cgrp != cur->cgrp) 172 continue; 173 /* Ignore if not the stat we're looking for. */ 174 if (type != evsel__stat_type(cur)) 175 continue; 176 177 /* 178 * Except the SW CLOCK events, 179 * ignore if not the PMU we're looking for. 180 */ 181 if ((type != STAT_NSECS) && (evsel_pmu != evsel__find_pmu(cur))) 182 continue; 183 184 aggr = &cur->stats->aggr[aggr_idx]; 185 if (type == STAT_NSECS) 186 return aggr->counts.val; 187 return aggr->counts.val * cur->scale; 188 } 189 return 0.0; 190 } 191 192 static void print_ratio(struct perf_stat_config *config, 193 const struct evsel *evsel, int aggr_idx, 194 double numerator, struct perf_stat_output_ctx *out, 195 enum stat_type denominator_type, 196 const double thresh_ratios[3], const char *_unit) 197 { 198 double denominator = find_stat(evsel, aggr_idx, denominator_type); 199 double ratio = 0; 200 enum metric_threshold_classify thresh = METRIC_THRESHOLD_UNKNOWN; 201 const char *fmt = NULL; 202 const char *unit = NULL; 203 204 if (numerator && denominator) { 205 ratio = numerator / denominator * 100.0; 206 thresh = get_ratio_thresh(thresh_ratios, ratio); 207 fmt = "%7.2f%%"; 208 unit = _unit; 209 } 210 out->print_metric(config, out->ctx, thresh, fmt, unit, ratio); 211 } 212 213 static void print_stalled_cycles_front(struct perf_stat_config *config, 214 const struct evsel *evsel, 215 int aggr_idx, double stalled, 216 struct perf_stat_output_ctx *out) 217 { 218 const double thresh_ratios[3] = {50.0, 30.0, 10.0}; 219 220 print_ratio(config, evsel, aggr_idx, stalled, out, STAT_CYCLES, thresh_ratios, 221 "frontend cycles idle"); 222 } 223 224 static void print_stalled_cycles_back(struct perf_stat_config *config, 225 const struct evsel *evsel, 226 int aggr_idx, double stalled, 227 struct perf_stat_output_ctx *out) 228 { 229 const double thresh_ratios[3] = {75.0, 50.0, 20.0}; 230 231 print_ratio(config, evsel, aggr_idx, stalled, out, STAT_CYCLES, thresh_ratios, 232 "backend cycles idle"); 233 } 234 235 static void print_branch_miss(struct perf_stat_config *config, 236 const struct evsel *evsel, 237 int aggr_idx, double misses, 238 struct perf_stat_output_ctx *out) 239 { 240 const double thresh_ratios[3] = {20.0, 10.0, 5.0}; 241 242 print_ratio(config, evsel, aggr_idx, misses, out, STAT_BRANCHES, thresh_ratios, 243 "of all branches"); 244 } 245 246 static void print_l1d_miss(struct perf_stat_config *config, 247 const struct evsel *evsel, 248 int aggr_idx, double misses, 249 struct perf_stat_output_ctx *out) 250 { 251 const double thresh_ratios[3] = {20.0, 10.0, 5.0}; 252 253 print_ratio(config, evsel, aggr_idx, misses, out, STAT_L1_DCACHE, thresh_ratios, 254 "of all L1-dcache accesses"); 255 } 256 257 static void print_l1i_miss(struct perf_stat_config *config, 258 const struct evsel *evsel, 259 int aggr_idx, double misses, 260 struct perf_stat_output_ctx *out) 261 { 262 const double thresh_ratios[3] = {20.0, 10.0, 5.0}; 263 264 print_ratio(config, evsel, aggr_idx, misses, out, STAT_L1_ICACHE, thresh_ratios, 265 "of all L1-icache accesses"); 266 } 267 268 static void print_ll_miss(struct perf_stat_config *config, 269 const struct evsel *evsel, 270 int aggr_idx, double misses, 271 struct perf_stat_output_ctx *out) 272 { 273 const double thresh_ratios[3] = {20.0, 10.0, 5.0}; 274 275 print_ratio(config, evsel, aggr_idx, misses, out, STAT_LL_CACHE, thresh_ratios, 276 "of all LL-cache accesses"); 277 } 278 279 static void print_dtlb_miss(struct perf_stat_config *config, 280 const struct evsel *evsel, 281 int aggr_idx, double misses, 282 struct perf_stat_output_ctx *out) 283 { 284 const double thresh_ratios[3] = {20.0, 10.0, 5.0}; 285 286 print_ratio(config, evsel, aggr_idx, misses, out, STAT_DTLB_CACHE, thresh_ratios, 287 "of all dTLB cache accesses"); 288 } 289 290 static void print_itlb_miss(struct perf_stat_config *config, 291 const struct evsel *evsel, 292 int aggr_idx, double misses, 293 struct perf_stat_output_ctx *out) 294 { 295 const double thresh_ratios[3] = {20.0, 10.0, 5.0}; 296 297 print_ratio(config, evsel, aggr_idx, misses, out, STAT_ITLB_CACHE, thresh_ratios, 298 "of all iTLB cache accesses"); 299 } 300 301 static void print_cache_miss(struct perf_stat_config *config, 302 const struct evsel *evsel, 303 int aggr_idx, double misses, 304 struct perf_stat_output_ctx *out) 305 { 306 const double thresh_ratios[3] = {20.0, 10.0, 5.0}; 307 308 print_ratio(config, evsel, aggr_idx, misses, out, STAT_CACHE_REFS, thresh_ratios, 309 "of all cache refs"); 310 } 311 312 static void print_instructions(struct perf_stat_config *config, 313 const struct evsel *evsel, 314 int aggr_idx, double instructions, 315 struct perf_stat_output_ctx *out) 316 { 317 print_metric_t print_metric = out->print_metric; 318 void *ctxp = out->ctx; 319 double cycles = find_stat(evsel, aggr_idx, STAT_CYCLES); 320 double max_stalled = max(find_stat(evsel, aggr_idx, STAT_STALLED_CYCLES_FRONT), 321 find_stat(evsel, aggr_idx, STAT_STALLED_CYCLES_BACK)); 322 323 if (cycles) { 324 print_metric(config, ctxp, METRIC_THRESHOLD_UNKNOWN, "%7.2f ", 325 "insn per cycle", instructions / cycles); 326 } else { 327 print_metric(config, ctxp, METRIC_THRESHOLD_UNKNOWN, /*fmt=*/NULL, 328 "insn per cycle", 0); 329 } 330 if (max_stalled && instructions) { 331 if (out->new_line) 332 out->new_line(config, ctxp); 333 print_metric(config, ctxp, METRIC_THRESHOLD_UNKNOWN, "%7.2f ", 334 "stalled cycles per insn", max_stalled / instructions); 335 } 336 } 337 338 static void print_cycles(struct perf_stat_config *config, 339 const struct evsel *evsel, 340 int aggr_idx, double cycles, 341 struct perf_stat_output_ctx *out) 342 { 343 double nsecs = find_stat(evsel, aggr_idx, STAT_NSECS); 344 345 if (cycles && nsecs) { 346 double ratio = cycles / nsecs; 347 348 out->print_metric(config, out->ctx, METRIC_THRESHOLD_UNKNOWN, "%8.3f", 349 "GHz", ratio); 350 } else { 351 out->print_metric(config, out->ctx, METRIC_THRESHOLD_UNKNOWN, /*fmt=*/NULL, 352 "GHz", 0); 353 } 354 } 355 356 static void print_nsecs(struct perf_stat_config *config, 357 const struct evsel *evsel, 358 int aggr_idx __maybe_unused, double nsecs, 359 struct perf_stat_output_ctx *out) 360 { 361 print_metric_t print_metric = out->print_metric; 362 void *ctxp = out->ctx; 363 double wall_time = avg_stats(&walltime_nsecs_stats); 364 365 if (wall_time) { 366 print_metric(config, ctxp, METRIC_THRESHOLD_UNKNOWN, "%8.3f", "CPUs utilized", 367 nsecs / (wall_time * evsel->scale)); 368 } else { 369 print_metric(config, ctxp, METRIC_THRESHOLD_UNKNOWN, /*fmt=*/NULL, 370 "CPUs utilized", 0); 371 } 372 } 373 374 static int prepare_metric(const struct metric_expr *mexp, 375 const struct evsel *evsel, 376 struct expr_parse_ctx *pctx, 377 int aggr_idx) 378 { 379 struct evsel * const *metric_events = mexp->metric_events; 380 struct metric_ref *metric_refs = mexp->metric_refs; 381 int i; 382 383 for (i = 0; metric_events[i]; i++) { 384 char *n; 385 double val; 386 int source_count = 0; 387 388 if (evsel__is_tool(metric_events[i])) { 389 struct stats *stats; 390 double scale; 391 392 switch (evsel__tool_event(metric_events[i])) { 393 case TOOL_PMU__EVENT_DURATION_TIME: 394 stats = &walltime_nsecs_stats; 395 scale = 1e-9; 396 break; 397 case TOOL_PMU__EVENT_USER_TIME: 398 stats = &ru_stats.ru_utime_usec_stat; 399 scale = 1e-6; 400 break; 401 case TOOL_PMU__EVENT_SYSTEM_TIME: 402 stats = &ru_stats.ru_stime_usec_stat; 403 scale = 1e-6; 404 break; 405 case TOOL_PMU__EVENT_NONE: 406 pr_err("Invalid tool event 'none'"); 407 abort(); 408 case TOOL_PMU__EVENT_MAX: 409 pr_err("Invalid tool event 'max'"); 410 abort(); 411 case TOOL_PMU__EVENT_HAS_PMEM: 412 case TOOL_PMU__EVENT_NUM_CORES: 413 case TOOL_PMU__EVENT_NUM_CPUS: 414 case TOOL_PMU__EVENT_NUM_CPUS_ONLINE: 415 case TOOL_PMU__EVENT_NUM_DIES: 416 case TOOL_PMU__EVENT_NUM_PACKAGES: 417 case TOOL_PMU__EVENT_SLOTS: 418 case TOOL_PMU__EVENT_SMT_ON: 419 case TOOL_PMU__EVENT_SYSTEM_TSC_FREQ: 420 default: 421 pr_err("Unexpected tool event '%s'", evsel__name(metric_events[i])); 422 abort(); 423 } 424 val = avg_stats(stats) * scale; 425 source_count = 1; 426 } else { 427 struct perf_stat_evsel *ps = metric_events[i]->stats; 428 struct perf_stat_aggr *aggr; 429 430 /* 431 * If there are multiple uncore PMUs and we're not 432 * reading the leader's stats, determine the stats for 433 * the appropriate uncore PMU. 434 */ 435 if (evsel && evsel->metric_leader && 436 evsel->pmu != evsel->metric_leader->pmu && 437 mexp->metric_events[i]->pmu == evsel->metric_leader->pmu) { 438 struct evsel *pos; 439 440 evlist__for_each_entry(evsel->evlist, pos) { 441 if (pos->pmu != evsel->pmu) 442 continue; 443 if (pos->metric_leader != mexp->metric_events[i]) 444 continue; 445 ps = pos->stats; 446 source_count = 1; 447 break; 448 } 449 } 450 aggr = &ps->aggr[aggr_idx]; 451 if (!aggr) 452 break; 453 454 if (!metric_events[i]->supported) { 455 /* 456 * Not supported events will have a count of 0, 457 * which can be confusing in a 458 * metric. Explicitly set the value to NAN. Not 459 * counted events (enable time of 0) are read as 460 * 0. 461 */ 462 val = NAN; 463 source_count = 0; 464 } else { 465 val = aggr->counts.val; 466 if (!source_count) 467 source_count = evsel__source_count(metric_events[i]); 468 } 469 } 470 n = strdup(evsel__metric_id(metric_events[i])); 471 if (!n) 472 return -ENOMEM; 473 474 expr__add_id_val_source_count(pctx, n, val, source_count); 475 } 476 477 for (int j = 0; metric_refs && metric_refs[j].metric_name; j++) { 478 int ret = expr__add_ref(pctx, &metric_refs[j]); 479 480 if (ret) 481 return ret; 482 } 483 484 return i; 485 } 486 487 static void generic_metric(struct perf_stat_config *config, 488 struct metric_expr *mexp, 489 struct evsel *evsel, 490 int aggr_idx, 491 struct perf_stat_output_ctx *out) 492 { 493 print_metric_t print_metric = out->print_metric; 494 const char *metric_name = mexp->metric_name; 495 const char *metric_expr = mexp->metric_expr; 496 const char *metric_threshold = mexp->metric_threshold; 497 const char *metric_unit = mexp->metric_unit; 498 struct evsel * const *metric_events = mexp->metric_events; 499 int runtime = mexp->runtime; 500 struct expr_parse_ctx *pctx; 501 double ratio, scale, threshold; 502 int i; 503 void *ctxp = out->ctx; 504 enum metric_threshold_classify thresh = METRIC_THRESHOLD_UNKNOWN; 505 506 pctx = expr__ctx_new(); 507 if (!pctx) 508 return; 509 510 if (config->user_requested_cpu_list) 511 pctx->sctx.user_requested_cpu_list = strdup(config->user_requested_cpu_list); 512 pctx->sctx.runtime = runtime; 513 pctx->sctx.system_wide = config->system_wide; 514 i = prepare_metric(mexp, evsel, pctx, aggr_idx); 515 if (i < 0) { 516 expr__ctx_free(pctx); 517 return; 518 } 519 if (!metric_events[i]) { 520 if (expr__parse(&ratio, pctx, metric_expr) == 0) { 521 char *unit; 522 char metric_bf[128]; 523 524 if (metric_threshold && 525 expr__parse(&threshold, pctx, metric_threshold) == 0 && 526 !isnan(threshold)) { 527 thresh = fpclassify(threshold) == FP_ZERO 528 ? METRIC_THRESHOLD_GOOD : METRIC_THRESHOLD_BAD; 529 } 530 531 if (metric_unit && metric_name) { 532 if (perf_pmu__convert_scale(metric_unit, 533 &unit, &scale) >= 0) { 534 ratio *= scale; 535 } 536 if (strstr(metric_expr, "?")) 537 scnprintf(metric_bf, sizeof(metric_bf), 538 "%s %s_%d", unit, metric_name, runtime); 539 else 540 scnprintf(metric_bf, sizeof(metric_bf), 541 "%s %s", unit, metric_name); 542 543 print_metric(config, ctxp, thresh, "%8.1f", 544 metric_bf, ratio); 545 } else { 546 print_metric(config, ctxp, thresh, "%8.2f", 547 metric_name ? 548 metric_name : 549 out->force_header ? evsel->name : "", 550 ratio); 551 } 552 } else { 553 print_metric(config, ctxp, thresh, /*fmt=*/NULL, 554 out->force_header ? 555 (metric_name ?: evsel->name) : "", 0); 556 } 557 } else { 558 print_metric(config, ctxp, thresh, /*fmt=*/NULL, 559 out->force_header ? 560 (metric_name ?: evsel->name) : "", 0); 561 } 562 563 expr__ctx_free(pctx); 564 } 565 566 double test_generic_metric(struct metric_expr *mexp, int aggr_idx) 567 { 568 struct expr_parse_ctx *pctx; 569 double ratio = 0.0; 570 571 pctx = expr__ctx_new(); 572 if (!pctx) 573 return NAN; 574 575 if (prepare_metric(mexp, /*evsel=*/NULL, pctx, aggr_idx) < 0) 576 goto out; 577 578 if (expr__parse(&ratio, pctx, mexp->metric_expr)) 579 ratio = 0.0; 580 581 out: 582 expr__ctx_free(pctx); 583 return ratio; 584 } 585 586 static void perf_stat__print_metricgroup_header(struct perf_stat_config *config, 587 struct evsel *evsel, 588 void *ctxp, 589 const char *name, 590 struct perf_stat_output_ctx *out) 591 { 592 bool need_full_name = perf_pmus__num_core_pmus() > 1; 593 static const char *last_name; 594 static const struct perf_pmu *last_pmu; 595 char full_name[64]; 596 597 /* 598 * A metricgroup may have several metric events, 599 * e.g.,TopdownL1 on e-core of ADL. 600 * The name has been output by the first metric 601 * event. Only align with other metics from 602 * different metric events. 603 */ 604 if (last_name && !strcmp(last_name, name)) { 605 if (!need_full_name || last_pmu != evsel->pmu) { 606 out->print_metricgroup_header(config, ctxp, NULL); 607 return; 608 } 609 } 610 611 if (need_full_name && evsel->pmu) 612 scnprintf(full_name, sizeof(full_name), "%s (%s)", name, evsel->pmu->name); 613 else 614 scnprintf(full_name, sizeof(full_name), "%s", name); 615 616 out->print_metricgroup_header(config, ctxp, full_name); 617 618 last_name = name; 619 last_pmu = evsel->pmu; 620 } 621 622 /** 623 * perf_stat__print_shadow_stats_metricgroup - Print out metrics associated with the evsel 624 * For the non-default, all metrics associated 625 * with the evsel are printed. 626 * For the default mode, only the metrics from 627 * the same metricgroup and the name of the 628 * metricgroup are printed. To print the metrics 629 * from the next metricgroup (if available), 630 * invoke the function with correspoinding 631 * metric_expr. 632 */ 633 void *perf_stat__print_shadow_stats_metricgroup(struct perf_stat_config *config, 634 struct evsel *evsel, 635 int aggr_idx, 636 int *num, 637 void *from, 638 struct perf_stat_output_ctx *out, 639 struct rblist *metric_events) 640 { 641 struct metric_event *me; 642 struct metric_expr *mexp = from; 643 void *ctxp = out->ctx; 644 bool header_printed = false; 645 const char *name = NULL; 646 647 me = metricgroup__lookup(metric_events, evsel, false); 648 if (me == NULL) 649 return NULL; 650 651 if (!mexp) 652 mexp = list_first_entry(&me->head, typeof(*mexp), nd); 653 654 list_for_each_entry_from(mexp, &me->head, nd) { 655 /* Print the display name of the Default metricgroup */ 656 if (!config->metric_only && me->is_default) { 657 if (!name) 658 name = mexp->default_metricgroup_name; 659 /* 660 * Two or more metricgroup may share the same metric 661 * event, e.g., TopdownL1 and TopdownL2 on SPR. 662 * Return and print the prefix, e.g., noise, running 663 * for the next metricgroup. 664 */ 665 if (strcmp(name, mexp->default_metricgroup_name)) 666 return (void *)mexp; 667 /* Only print the name of the metricgroup once */ 668 if (!header_printed) { 669 header_printed = true; 670 perf_stat__print_metricgroup_header(config, evsel, ctxp, 671 name, out); 672 } 673 } 674 675 if ((*num)++ > 0 && out->new_line) 676 out->new_line(config, ctxp); 677 generic_metric(config, mexp, evsel, aggr_idx, out); 678 } 679 680 return NULL; 681 } 682 683 void perf_stat__print_shadow_stats(struct perf_stat_config *config, 684 struct evsel *evsel, 685 double avg, int aggr_idx, 686 struct perf_stat_output_ctx *out, 687 struct rblist *metric_events) 688 { 689 typedef void (*stat_print_function_t)(struct perf_stat_config *config, 690 const struct evsel *evsel, 691 int aggr_idx, double misses, 692 struct perf_stat_output_ctx *out); 693 static const stat_print_function_t stat_print_function[STAT_MAX] = { 694 [STAT_INSTRUCTIONS] = print_instructions, 695 [STAT_BRANCH_MISS] = print_branch_miss, 696 [STAT_L1D_MISS] = print_l1d_miss, 697 [STAT_L1I_MISS] = print_l1i_miss, 698 [STAT_DTLB_MISS] = print_dtlb_miss, 699 [STAT_ITLB_MISS] = print_itlb_miss, 700 [STAT_LL_MISS] = print_ll_miss, 701 [STAT_CACHE_MISSES] = print_cache_miss, 702 [STAT_STALLED_CYCLES_FRONT] = print_stalled_cycles_front, 703 [STAT_STALLED_CYCLES_BACK] = print_stalled_cycles_back, 704 [STAT_CYCLES] = print_cycles, 705 [STAT_NSECS] = print_nsecs, 706 }; 707 print_metric_t print_metric = out->print_metric; 708 void *ctxp = out->ctx; 709 int num = 1; 710 711 if (config->iostat_run) { 712 iostat_print_metric(config, evsel, out); 713 } else { 714 stat_print_function_t fn = stat_print_function[evsel__stat_type(evsel)]; 715 716 if (fn) 717 fn(config, evsel, aggr_idx, avg, out); 718 else { 719 double nsecs = find_stat(evsel, aggr_idx, STAT_NSECS); 720 721 if (nsecs) { 722 char unit = ' '; 723 char unit_buf[10] = "/sec"; 724 double ratio = convert_unit_double(1000000000.0 * avg / nsecs, 725 &unit); 726 727 if (unit != ' ') 728 snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit); 729 print_metric(config, ctxp, METRIC_THRESHOLD_UNKNOWN, "%8.3f", 730 unit_buf, ratio); 731 } else { 732 num = 0; 733 } 734 } 735 } 736 737 perf_stat__print_shadow_stats_metricgroup(config, evsel, aggr_idx, 738 &num, NULL, out, metric_events); 739 740 if (num == 0) { 741 print_metric(config, ctxp, METRIC_THRESHOLD_UNKNOWN, 742 /*fmt=*/NULL, /*unit=*/NULL, 0); 743 } 744 } 745 746 /** 747 * perf_stat__skip_metric_event - Skip the evsel in the Default metricgroup, 748 * if it's not running or not the metric event. 749 */ 750 bool perf_stat__skip_metric_event(struct evsel *evsel, 751 struct rblist *metric_events, 752 u64 ena, u64 run) 753 { 754 if (!evsel->default_metricgroup) 755 return false; 756 757 if (!ena || !run) 758 return true; 759 760 return !metricgroup__lookup(metric_events, evsel, false); 761 } 762