1 #include <stdio.h> 2 #include "evsel.h" 3 #include "stat.h" 4 #include "color.h" 5 #include "pmu.h" 6 #include "rblist.h" 7 #include "evlist.h" 8 #include "expr.h" 9 10 enum { 11 CTX_BIT_USER = 1 << 0, 12 CTX_BIT_KERNEL = 1 << 1, 13 CTX_BIT_HV = 1 << 2, 14 CTX_BIT_HOST = 1 << 3, 15 CTX_BIT_IDLE = 1 << 4, 16 CTX_BIT_MAX = 1 << 5, 17 }; 18 19 #define NUM_CTX CTX_BIT_MAX 20 21 /* 22 * AGGR_GLOBAL: Use CPU 0 23 * AGGR_SOCKET: Use first CPU of socket 24 * AGGR_CORE: Use first CPU of core 25 * AGGR_NONE: Use matching CPU 26 * AGGR_THREAD: Not supported? 27 */ 28 static struct stats runtime_nsecs_stats[MAX_NR_CPUS]; 29 static struct stats runtime_cycles_stats[NUM_CTX][MAX_NR_CPUS]; 30 static struct stats runtime_stalled_cycles_front_stats[NUM_CTX][MAX_NR_CPUS]; 31 static struct stats runtime_stalled_cycles_back_stats[NUM_CTX][MAX_NR_CPUS]; 32 static struct stats runtime_branches_stats[NUM_CTX][MAX_NR_CPUS]; 33 static struct stats runtime_cacherefs_stats[NUM_CTX][MAX_NR_CPUS]; 34 static struct stats runtime_l1_dcache_stats[NUM_CTX][MAX_NR_CPUS]; 35 static struct stats runtime_l1_icache_stats[NUM_CTX][MAX_NR_CPUS]; 36 static struct stats runtime_ll_cache_stats[NUM_CTX][MAX_NR_CPUS]; 37 static struct stats runtime_itlb_cache_stats[NUM_CTX][MAX_NR_CPUS]; 38 static struct stats runtime_dtlb_cache_stats[NUM_CTX][MAX_NR_CPUS]; 39 static struct stats runtime_cycles_in_tx_stats[NUM_CTX][MAX_NR_CPUS]; 40 static struct stats runtime_transaction_stats[NUM_CTX][MAX_NR_CPUS]; 41 static struct stats runtime_elision_stats[NUM_CTX][MAX_NR_CPUS]; 42 static struct stats runtime_topdown_total_slots[NUM_CTX][MAX_NR_CPUS]; 43 static struct stats runtime_topdown_slots_issued[NUM_CTX][MAX_NR_CPUS]; 44 static struct stats runtime_topdown_slots_retired[NUM_CTX][MAX_NR_CPUS]; 45 static struct stats runtime_topdown_fetch_bubbles[NUM_CTX][MAX_NR_CPUS]; 46 static struct stats runtime_topdown_recovery_bubbles[NUM_CTX][MAX_NR_CPUS]; 47 static struct rblist runtime_saved_values; 48 static bool have_frontend_stalled; 49 50 struct stats walltime_nsecs_stats; 51 52 struct saved_value { 53 struct rb_node rb_node; 54 struct perf_evsel *evsel; 55 int cpu; 56 int ctx; 57 struct stats stats; 58 }; 59 60 static int saved_value_cmp(struct rb_node *rb_node, const void *entry) 61 { 62 struct saved_value *a = container_of(rb_node, 63 struct saved_value, 64 rb_node); 65 const struct saved_value *b = entry; 66 67 if (a->ctx != b->ctx) 68 return a->ctx - b->ctx; 69 if (a->cpu != b->cpu) 70 return a->cpu - b->cpu; 71 return a->evsel - b->evsel; 72 } 73 74 static struct rb_node *saved_value_new(struct rblist *rblist __maybe_unused, 75 const void *entry) 76 { 77 struct saved_value *nd = malloc(sizeof(struct saved_value)); 78 79 if (!nd) 80 return NULL; 81 memcpy(nd, entry, sizeof(struct saved_value)); 82 return &nd->rb_node; 83 } 84 85 static struct saved_value *saved_value_lookup(struct perf_evsel *evsel, 86 int cpu, int ctx, 87 bool create) 88 { 89 struct rb_node *nd; 90 struct saved_value dm = { 91 .cpu = cpu, 92 .ctx = ctx, 93 .evsel = evsel, 94 }; 95 nd = rblist__find(&runtime_saved_values, &dm); 96 if (nd) 97 return container_of(nd, struct saved_value, rb_node); 98 if (create) { 99 rblist__add_node(&runtime_saved_values, &dm); 100 nd = rblist__find(&runtime_saved_values, &dm); 101 if (nd) 102 return container_of(nd, struct saved_value, rb_node); 103 } 104 return NULL; 105 } 106 107 void perf_stat__init_shadow_stats(void) 108 { 109 have_frontend_stalled = pmu_have_event("cpu", "stalled-cycles-frontend"); 110 rblist__init(&runtime_saved_values); 111 runtime_saved_values.node_cmp = saved_value_cmp; 112 runtime_saved_values.node_new = saved_value_new; 113 /* No delete for now */ 114 } 115 116 static int evsel_context(struct perf_evsel *evsel) 117 { 118 int ctx = 0; 119 120 if (evsel->attr.exclude_kernel) 121 ctx |= CTX_BIT_KERNEL; 122 if (evsel->attr.exclude_user) 123 ctx |= CTX_BIT_USER; 124 if (evsel->attr.exclude_hv) 125 ctx |= CTX_BIT_HV; 126 if (evsel->attr.exclude_host) 127 ctx |= CTX_BIT_HOST; 128 if (evsel->attr.exclude_idle) 129 ctx |= CTX_BIT_IDLE; 130 131 return ctx; 132 } 133 134 void perf_stat__reset_shadow_stats(void) 135 { 136 struct rb_node *pos, *next; 137 138 memset(runtime_nsecs_stats, 0, sizeof(runtime_nsecs_stats)); 139 memset(runtime_cycles_stats, 0, sizeof(runtime_cycles_stats)); 140 memset(runtime_stalled_cycles_front_stats, 0, sizeof(runtime_stalled_cycles_front_stats)); 141 memset(runtime_stalled_cycles_back_stats, 0, sizeof(runtime_stalled_cycles_back_stats)); 142 memset(runtime_branches_stats, 0, sizeof(runtime_branches_stats)); 143 memset(runtime_cacherefs_stats, 0, sizeof(runtime_cacherefs_stats)); 144 memset(runtime_l1_dcache_stats, 0, sizeof(runtime_l1_dcache_stats)); 145 memset(runtime_l1_icache_stats, 0, sizeof(runtime_l1_icache_stats)); 146 memset(runtime_ll_cache_stats, 0, sizeof(runtime_ll_cache_stats)); 147 memset(runtime_itlb_cache_stats, 0, sizeof(runtime_itlb_cache_stats)); 148 memset(runtime_dtlb_cache_stats, 0, sizeof(runtime_dtlb_cache_stats)); 149 memset(runtime_cycles_in_tx_stats, 0, 150 sizeof(runtime_cycles_in_tx_stats)); 151 memset(runtime_transaction_stats, 0, 152 sizeof(runtime_transaction_stats)); 153 memset(runtime_elision_stats, 0, sizeof(runtime_elision_stats)); 154 memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats)); 155 memset(runtime_topdown_total_slots, 0, sizeof(runtime_topdown_total_slots)); 156 memset(runtime_topdown_slots_retired, 0, sizeof(runtime_topdown_slots_retired)); 157 memset(runtime_topdown_slots_issued, 0, sizeof(runtime_topdown_slots_issued)); 158 memset(runtime_topdown_fetch_bubbles, 0, sizeof(runtime_topdown_fetch_bubbles)); 159 memset(runtime_topdown_recovery_bubbles, 0, sizeof(runtime_topdown_recovery_bubbles)); 160 161 next = rb_first(&runtime_saved_values.entries); 162 while (next) { 163 pos = next; 164 next = rb_next(pos); 165 memset(&container_of(pos, struct saved_value, rb_node)->stats, 166 0, 167 sizeof(struct stats)); 168 } 169 } 170 171 /* 172 * Update various tracking values we maintain to print 173 * more semantic information such as miss/hit ratios, 174 * instruction rates, etc: 175 */ 176 void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count, 177 int cpu) 178 { 179 int ctx = evsel_context(counter); 180 181 if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) || 182 perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK)) 183 update_stats(&runtime_nsecs_stats[cpu], count[0]); 184 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) 185 update_stats(&runtime_cycles_stats[ctx][cpu], count[0]); 186 else if (perf_stat_evsel__is(counter, CYCLES_IN_TX)) 187 update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count[0]); 188 else if (perf_stat_evsel__is(counter, TRANSACTION_START)) 189 update_stats(&runtime_transaction_stats[ctx][cpu], count[0]); 190 else if (perf_stat_evsel__is(counter, ELISION_START)) 191 update_stats(&runtime_elision_stats[ctx][cpu], count[0]); 192 else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS)) 193 update_stats(&runtime_topdown_total_slots[ctx][cpu], count[0]); 194 else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED)) 195 update_stats(&runtime_topdown_slots_issued[ctx][cpu], count[0]); 196 else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED)) 197 update_stats(&runtime_topdown_slots_retired[ctx][cpu], count[0]); 198 else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES)) 199 update_stats(&runtime_topdown_fetch_bubbles[ctx][cpu],count[0]); 200 else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES)) 201 update_stats(&runtime_topdown_recovery_bubbles[ctx][cpu], count[0]); 202 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) 203 update_stats(&runtime_stalled_cycles_front_stats[ctx][cpu], count[0]); 204 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND)) 205 update_stats(&runtime_stalled_cycles_back_stats[ctx][cpu], count[0]); 206 else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) 207 update_stats(&runtime_branches_stats[ctx][cpu], count[0]); 208 else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES)) 209 update_stats(&runtime_cacherefs_stats[ctx][cpu], count[0]); 210 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D)) 211 update_stats(&runtime_l1_dcache_stats[ctx][cpu], count[0]); 212 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I)) 213 update_stats(&runtime_ll_cache_stats[ctx][cpu], count[0]); 214 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL)) 215 update_stats(&runtime_ll_cache_stats[ctx][cpu], count[0]); 216 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB)) 217 update_stats(&runtime_dtlb_cache_stats[ctx][cpu], count[0]); 218 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB)) 219 update_stats(&runtime_itlb_cache_stats[ctx][cpu], count[0]); 220 221 if (counter->collect_stat) { 222 struct saved_value *v = saved_value_lookup(counter, cpu, ctx, 223 true); 224 update_stats(&v->stats, count[0]); 225 } 226 } 227 228 /* used for get_ratio_color() */ 229 enum grc_type { 230 GRC_STALLED_CYCLES_FE, 231 GRC_STALLED_CYCLES_BE, 232 GRC_CACHE_MISSES, 233 GRC_MAX_NR 234 }; 235 236 static const char *get_ratio_color(enum grc_type type, double ratio) 237 { 238 static const double grc_table[GRC_MAX_NR][3] = { 239 [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 }, 240 [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 }, 241 [GRC_CACHE_MISSES] = { 20.0, 10.0, 5.0 }, 242 }; 243 const char *color = PERF_COLOR_NORMAL; 244 245 if (ratio > grc_table[type][0]) 246 color = PERF_COLOR_RED; 247 else if (ratio > grc_table[type][1]) 248 color = PERF_COLOR_MAGENTA; 249 else if (ratio > grc_table[type][2]) 250 color = PERF_COLOR_YELLOW; 251 252 return color; 253 } 254 255 static struct perf_evsel *perf_stat__find_event(struct perf_evlist *evsel_list, 256 const char *name) 257 { 258 struct perf_evsel *c2; 259 260 evlist__for_each_entry (evsel_list, c2) { 261 if (!strcasecmp(c2->name, name)) 262 return c2; 263 } 264 return NULL; 265 } 266 267 /* Mark MetricExpr target events and link events using them to them. */ 268 void perf_stat__collect_metric_expr(struct perf_evlist *evsel_list) 269 { 270 struct perf_evsel *counter, *leader, **metric_events, *oc; 271 bool found; 272 const char **metric_names; 273 int i; 274 int num_metric_names; 275 276 evlist__for_each_entry(evsel_list, counter) { 277 bool invalid = false; 278 279 leader = counter->leader; 280 if (!counter->metric_expr) 281 continue; 282 metric_events = counter->metric_events; 283 if (!metric_events) { 284 if (expr__find_other(counter->metric_expr, counter->name, 285 &metric_names, &num_metric_names) < 0) 286 continue; 287 288 metric_events = calloc(sizeof(struct perf_evsel *), 289 num_metric_names + 1); 290 if (!metric_events) 291 return; 292 counter->metric_events = metric_events; 293 } 294 295 for (i = 0; i < num_metric_names; i++) { 296 found = false; 297 if (leader) { 298 /* Search in group */ 299 for_each_group_member (oc, leader) { 300 if (!strcasecmp(oc->name, metric_names[i])) { 301 found = true; 302 break; 303 } 304 } 305 } 306 if (!found) { 307 /* Search ignoring groups */ 308 oc = perf_stat__find_event(evsel_list, metric_names[i]); 309 } 310 if (!oc) { 311 /* Deduping one is good enough to handle duplicated PMUs. */ 312 static char *printed; 313 314 /* 315 * Adding events automatically would be difficult, because 316 * it would risk creating groups that are not schedulable. 317 * perf stat doesn't understand all the scheduling constraints 318 * of events. So we ask the user instead to add the missing 319 * events. 320 */ 321 if (!printed || strcasecmp(printed, metric_names[i])) { 322 fprintf(stderr, 323 "Add %s event to groups to get metric expression for %s\n", 324 metric_names[i], 325 counter->name); 326 printed = strdup(metric_names[i]); 327 } 328 invalid = true; 329 continue; 330 } 331 metric_events[i] = oc; 332 oc->collect_stat = true; 333 } 334 metric_events[i] = NULL; 335 free(metric_names); 336 if (invalid) { 337 free(metric_events); 338 counter->metric_events = NULL; 339 counter->metric_expr = NULL; 340 } 341 } 342 } 343 344 static void print_stalled_cycles_frontend(int cpu, 345 struct perf_evsel *evsel, double avg, 346 struct perf_stat_output_ctx *out) 347 { 348 double total, ratio = 0.0; 349 const char *color; 350 int ctx = evsel_context(evsel); 351 352 total = avg_stats(&runtime_cycles_stats[ctx][cpu]); 353 354 if (total) 355 ratio = avg / total * 100.0; 356 357 color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio); 358 359 if (ratio) 360 out->print_metric(out->ctx, color, "%7.2f%%", "frontend cycles idle", 361 ratio); 362 else 363 out->print_metric(out->ctx, NULL, NULL, "frontend cycles idle", 0); 364 } 365 366 static void print_stalled_cycles_backend(int cpu, 367 struct perf_evsel *evsel, double avg, 368 struct perf_stat_output_ctx *out) 369 { 370 double total, ratio = 0.0; 371 const char *color; 372 int ctx = evsel_context(evsel); 373 374 total = avg_stats(&runtime_cycles_stats[ctx][cpu]); 375 376 if (total) 377 ratio = avg / total * 100.0; 378 379 color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio); 380 381 out->print_metric(out->ctx, color, "%7.2f%%", "backend cycles idle", ratio); 382 } 383 384 static void print_branch_misses(int cpu, 385 struct perf_evsel *evsel, 386 double avg, 387 struct perf_stat_output_ctx *out) 388 { 389 double total, ratio = 0.0; 390 const char *color; 391 int ctx = evsel_context(evsel); 392 393 total = avg_stats(&runtime_branches_stats[ctx][cpu]); 394 395 if (total) 396 ratio = avg / total * 100.0; 397 398 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 399 400 out->print_metric(out->ctx, color, "%7.2f%%", "of all branches", ratio); 401 } 402 403 static void print_l1_dcache_misses(int cpu, 404 struct perf_evsel *evsel, 405 double avg, 406 struct perf_stat_output_ctx *out) 407 { 408 double total, ratio = 0.0; 409 const char *color; 410 int ctx = evsel_context(evsel); 411 412 total = avg_stats(&runtime_l1_dcache_stats[ctx][cpu]); 413 414 if (total) 415 ratio = avg / total * 100.0; 416 417 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 418 419 out->print_metric(out->ctx, color, "%7.2f%%", "of all L1-dcache hits", ratio); 420 } 421 422 static void print_l1_icache_misses(int cpu, 423 struct perf_evsel *evsel, 424 double avg, 425 struct perf_stat_output_ctx *out) 426 { 427 double total, ratio = 0.0; 428 const char *color; 429 int ctx = evsel_context(evsel); 430 431 total = avg_stats(&runtime_l1_icache_stats[ctx][cpu]); 432 433 if (total) 434 ratio = avg / total * 100.0; 435 436 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 437 out->print_metric(out->ctx, color, "%7.2f%%", "of all L1-icache hits", ratio); 438 } 439 440 static void print_dtlb_cache_misses(int cpu, 441 struct perf_evsel *evsel, 442 double avg, 443 struct perf_stat_output_ctx *out) 444 { 445 double total, ratio = 0.0; 446 const char *color; 447 int ctx = evsel_context(evsel); 448 449 total = avg_stats(&runtime_dtlb_cache_stats[ctx][cpu]); 450 451 if (total) 452 ratio = avg / total * 100.0; 453 454 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 455 out->print_metric(out->ctx, color, "%7.2f%%", "of all dTLB cache hits", ratio); 456 } 457 458 static void print_itlb_cache_misses(int cpu, 459 struct perf_evsel *evsel, 460 double avg, 461 struct perf_stat_output_ctx *out) 462 { 463 double total, ratio = 0.0; 464 const char *color; 465 int ctx = evsel_context(evsel); 466 467 total = avg_stats(&runtime_itlb_cache_stats[ctx][cpu]); 468 469 if (total) 470 ratio = avg / total * 100.0; 471 472 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 473 out->print_metric(out->ctx, color, "%7.2f%%", "of all iTLB cache hits", ratio); 474 } 475 476 static void print_ll_cache_misses(int cpu, 477 struct perf_evsel *evsel, 478 double avg, 479 struct perf_stat_output_ctx *out) 480 { 481 double total, ratio = 0.0; 482 const char *color; 483 int ctx = evsel_context(evsel); 484 485 total = avg_stats(&runtime_ll_cache_stats[ctx][cpu]); 486 487 if (total) 488 ratio = avg / total * 100.0; 489 490 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 491 out->print_metric(out->ctx, color, "%7.2f%%", "of all LL-cache hits", ratio); 492 } 493 494 /* 495 * High level "TopDown" CPU core pipe line bottleneck break down. 496 * 497 * Basic concept following 498 * Yasin, A Top Down Method for Performance analysis and Counter architecture 499 * ISPASS14 500 * 501 * The CPU pipeline is divided into 4 areas that can be bottlenecks: 502 * 503 * Frontend -> Backend -> Retiring 504 * BadSpeculation in addition means out of order execution that is thrown away 505 * (for example branch mispredictions) 506 * Frontend is instruction decoding. 507 * Backend is execution, like computation and accessing data in memory 508 * Retiring is good execution that is not directly bottlenecked 509 * 510 * The formulas are computed in slots. 511 * A slot is an entry in the pipeline each for the pipeline width 512 * (for example a 4-wide pipeline has 4 slots for each cycle) 513 * 514 * Formulas: 515 * BadSpeculation = ((SlotsIssued - SlotsRetired) + RecoveryBubbles) / 516 * TotalSlots 517 * Retiring = SlotsRetired / TotalSlots 518 * FrontendBound = FetchBubbles / TotalSlots 519 * BackendBound = 1.0 - BadSpeculation - Retiring - FrontendBound 520 * 521 * The kernel provides the mapping to the low level CPU events and any scaling 522 * needed for the CPU pipeline width, for example: 523 * 524 * TotalSlots = Cycles * 4 525 * 526 * The scaling factor is communicated in the sysfs unit. 527 * 528 * In some cases the CPU may not be able to measure all the formulas due to 529 * missing events. In this case multiple formulas are combined, as possible. 530 * 531 * Full TopDown supports more levels to sub-divide each area: for example 532 * BackendBound into computing bound and memory bound. For now we only 533 * support Level 1 TopDown. 534 */ 535 536 static double sanitize_val(double x) 537 { 538 if (x < 0 && x >= -0.02) 539 return 0.0; 540 return x; 541 } 542 543 static double td_total_slots(int ctx, int cpu) 544 { 545 return avg_stats(&runtime_topdown_total_slots[ctx][cpu]); 546 } 547 548 static double td_bad_spec(int ctx, int cpu) 549 { 550 double bad_spec = 0; 551 double total_slots; 552 double total; 553 554 total = avg_stats(&runtime_topdown_slots_issued[ctx][cpu]) - 555 avg_stats(&runtime_topdown_slots_retired[ctx][cpu]) + 556 avg_stats(&runtime_topdown_recovery_bubbles[ctx][cpu]); 557 total_slots = td_total_slots(ctx, cpu); 558 if (total_slots) 559 bad_spec = total / total_slots; 560 return sanitize_val(bad_spec); 561 } 562 563 static double td_retiring(int ctx, int cpu) 564 { 565 double retiring = 0; 566 double total_slots = td_total_slots(ctx, cpu); 567 double ret_slots = avg_stats(&runtime_topdown_slots_retired[ctx][cpu]); 568 569 if (total_slots) 570 retiring = ret_slots / total_slots; 571 return retiring; 572 } 573 574 static double td_fe_bound(int ctx, int cpu) 575 { 576 double fe_bound = 0; 577 double total_slots = td_total_slots(ctx, cpu); 578 double fetch_bub = avg_stats(&runtime_topdown_fetch_bubbles[ctx][cpu]); 579 580 if (total_slots) 581 fe_bound = fetch_bub / total_slots; 582 return fe_bound; 583 } 584 585 static double td_be_bound(int ctx, int cpu) 586 { 587 double sum = (td_fe_bound(ctx, cpu) + 588 td_bad_spec(ctx, cpu) + 589 td_retiring(ctx, cpu)); 590 if (sum == 0) 591 return 0; 592 return sanitize_val(1.0 - sum); 593 } 594 595 void perf_stat__print_shadow_stats(struct perf_evsel *evsel, 596 double avg, int cpu, 597 struct perf_stat_output_ctx *out) 598 { 599 void *ctxp = out->ctx; 600 print_metric_t print_metric = out->print_metric; 601 double total, ratio = 0.0, total2; 602 const char *color = NULL; 603 int ctx = evsel_context(evsel); 604 605 if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) { 606 total = avg_stats(&runtime_cycles_stats[ctx][cpu]); 607 if (total) { 608 ratio = avg / total; 609 print_metric(ctxp, NULL, "%7.2f ", 610 "insn per cycle", ratio); 611 } else { 612 print_metric(ctxp, NULL, NULL, "insn per cycle", 0); 613 } 614 total = avg_stats(&runtime_stalled_cycles_front_stats[ctx][cpu]); 615 total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[ctx][cpu])); 616 617 if (total && avg) { 618 out->new_line(ctxp); 619 ratio = total / avg; 620 print_metric(ctxp, NULL, "%7.2f ", 621 "stalled cycles per insn", 622 ratio); 623 } else if (have_frontend_stalled) { 624 print_metric(ctxp, NULL, NULL, 625 "stalled cycles per insn", 0); 626 } 627 } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) { 628 if (runtime_branches_stats[ctx][cpu].n != 0) 629 print_branch_misses(cpu, evsel, avg, out); 630 else 631 print_metric(ctxp, NULL, NULL, "of all branches", 0); 632 } else if ( 633 evsel->attr.type == PERF_TYPE_HW_CACHE && 634 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D | 635 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 636 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { 637 if (runtime_l1_dcache_stats[ctx][cpu].n != 0) 638 print_l1_dcache_misses(cpu, evsel, avg, out); 639 else 640 print_metric(ctxp, NULL, NULL, "of all L1-dcache hits", 0); 641 } else if ( 642 evsel->attr.type == PERF_TYPE_HW_CACHE && 643 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I | 644 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 645 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { 646 if (runtime_l1_icache_stats[ctx][cpu].n != 0) 647 print_l1_icache_misses(cpu, evsel, avg, out); 648 else 649 print_metric(ctxp, NULL, NULL, "of all L1-icache hits", 0); 650 } else if ( 651 evsel->attr.type == PERF_TYPE_HW_CACHE && 652 evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB | 653 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 654 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { 655 if (runtime_dtlb_cache_stats[ctx][cpu].n != 0) 656 print_dtlb_cache_misses(cpu, evsel, avg, out); 657 else 658 print_metric(ctxp, NULL, NULL, "of all dTLB cache hits", 0); 659 } else if ( 660 evsel->attr.type == PERF_TYPE_HW_CACHE && 661 evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB | 662 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 663 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { 664 if (runtime_itlb_cache_stats[ctx][cpu].n != 0) 665 print_itlb_cache_misses(cpu, evsel, avg, out); 666 else 667 print_metric(ctxp, NULL, NULL, "of all iTLB cache hits", 0); 668 } else if ( 669 evsel->attr.type == PERF_TYPE_HW_CACHE && 670 evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL | 671 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 672 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { 673 if (runtime_ll_cache_stats[ctx][cpu].n != 0) 674 print_ll_cache_misses(cpu, evsel, avg, out); 675 else 676 print_metric(ctxp, NULL, NULL, "of all LL-cache hits", 0); 677 } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) { 678 total = avg_stats(&runtime_cacherefs_stats[ctx][cpu]); 679 680 if (total) 681 ratio = avg * 100 / total; 682 683 if (runtime_cacherefs_stats[ctx][cpu].n != 0) 684 print_metric(ctxp, NULL, "%8.3f %%", 685 "of all cache refs", ratio); 686 else 687 print_metric(ctxp, NULL, NULL, "of all cache refs", 0); 688 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) { 689 print_stalled_cycles_frontend(cpu, evsel, avg, out); 690 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) { 691 print_stalled_cycles_backend(cpu, evsel, avg, out); 692 } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) { 693 total = avg_stats(&runtime_nsecs_stats[cpu]); 694 695 if (total) { 696 ratio = avg / total; 697 print_metric(ctxp, NULL, "%8.3f", "GHz", ratio); 698 } else { 699 print_metric(ctxp, NULL, NULL, "Ghz", 0); 700 } 701 } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) { 702 total = avg_stats(&runtime_cycles_stats[ctx][cpu]); 703 if (total) 704 print_metric(ctxp, NULL, 705 "%7.2f%%", "transactional cycles", 706 100.0 * (avg / total)); 707 else 708 print_metric(ctxp, NULL, NULL, "transactional cycles", 709 0); 710 } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) { 711 total = avg_stats(&runtime_cycles_stats[ctx][cpu]); 712 total2 = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]); 713 if (total2 < avg) 714 total2 = avg; 715 if (total) 716 print_metric(ctxp, NULL, "%7.2f%%", "aborted cycles", 717 100.0 * ((total2-avg) / total)); 718 else 719 print_metric(ctxp, NULL, NULL, "aborted cycles", 0); 720 } else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) { 721 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]); 722 723 if (avg) 724 ratio = total / avg; 725 726 if (runtime_cycles_in_tx_stats[ctx][cpu].n != 0) 727 print_metric(ctxp, NULL, "%8.0f", 728 "cycles / transaction", ratio); 729 else 730 print_metric(ctxp, NULL, NULL, "cycles / transaction", 731 0); 732 } else if (perf_stat_evsel__is(evsel, ELISION_START)) { 733 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]); 734 735 if (avg) 736 ratio = total / avg; 737 738 print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio); 739 } else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK) || 740 perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK)) { 741 if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0) 742 print_metric(ctxp, NULL, "%8.3f", "CPUs utilized", 743 avg / ratio); 744 else 745 print_metric(ctxp, NULL, NULL, "CPUs utilized", 0); 746 } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) { 747 double fe_bound = td_fe_bound(ctx, cpu); 748 749 if (fe_bound > 0.2) 750 color = PERF_COLOR_RED; 751 print_metric(ctxp, color, "%8.1f%%", "frontend bound", 752 fe_bound * 100.); 753 } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) { 754 double retiring = td_retiring(ctx, cpu); 755 756 if (retiring > 0.7) 757 color = PERF_COLOR_GREEN; 758 print_metric(ctxp, color, "%8.1f%%", "retiring", 759 retiring * 100.); 760 } else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) { 761 double bad_spec = td_bad_spec(ctx, cpu); 762 763 if (bad_spec > 0.1) 764 color = PERF_COLOR_RED; 765 print_metric(ctxp, color, "%8.1f%%", "bad speculation", 766 bad_spec * 100.); 767 } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) { 768 double be_bound = td_be_bound(ctx, cpu); 769 const char *name = "backend bound"; 770 static int have_recovery_bubbles = -1; 771 772 /* In case the CPU does not support topdown-recovery-bubbles */ 773 if (have_recovery_bubbles < 0) 774 have_recovery_bubbles = pmu_have_event("cpu", 775 "topdown-recovery-bubbles"); 776 if (!have_recovery_bubbles) 777 name = "backend bound/bad spec"; 778 779 if (be_bound > 0.2) 780 color = PERF_COLOR_RED; 781 if (td_total_slots(ctx, cpu) > 0) 782 print_metric(ctxp, color, "%8.1f%%", name, 783 be_bound * 100.); 784 else 785 print_metric(ctxp, NULL, NULL, name, 0); 786 } else if (evsel->metric_expr) { 787 struct parse_ctx pctx; 788 int i; 789 790 expr__ctx_init(&pctx); 791 expr__add_id(&pctx, evsel->name, avg); 792 for (i = 0; evsel->metric_events[i]; i++) { 793 struct saved_value *v; 794 795 v = saved_value_lookup(evsel->metric_events[i], cpu, ctx, false); 796 if (!v) 797 break; 798 expr__add_id(&pctx, evsel->metric_events[i]->name, 799 avg_stats(&v->stats)); 800 } 801 if (!evsel->metric_events[i]) { 802 const char *p = evsel->metric_expr; 803 804 if (expr__parse(&ratio, &pctx, &p) == 0) 805 print_metric(ctxp, NULL, "%8.1f", 806 out->force_header ? evsel->name : "", 807 ratio); 808 else 809 print_metric(ctxp, NULL, NULL, "", 0); 810 } else 811 print_metric(ctxp, NULL, NULL, "", 0); 812 } else if (runtime_nsecs_stats[cpu].n != 0) { 813 char unit = 'M'; 814 char unit_buf[10]; 815 816 total = avg_stats(&runtime_nsecs_stats[cpu]); 817 818 if (total) 819 ratio = 1000.0 * avg / total; 820 if (ratio < 0.001) { 821 ratio *= 1000; 822 unit = 'K'; 823 } 824 snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit); 825 print_metric(ctxp, NULL, "%8.3f", unit_buf, ratio); 826 } else { 827 print_metric(ctxp, NULL, NULL, NULL, 0); 828 } 829 } 830