1 #include <stdio.h> 2 #include "evsel.h" 3 #include "stat.h" 4 #include "color.h" 5 #include "pmu.h" 6 7 enum { 8 CTX_BIT_USER = 1 << 0, 9 CTX_BIT_KERNEL = 1 << 1, 10 CTX_BIT_HV = 1 << 2, 11 CTX_BIT_HOST = 1 << 3, 12 CTX_BIT_IDLE = 1 << 4, 13 CTX_BIT_MAX = 1 << 5, 14 }; 15 16 #define NUM_CTX CTX_BIT_MAX 17 18 /* 19 * AGGR_GLOBAL: Use CPU 0 20 * AGGR_SOCKET: Use first CPU of socket 21 * AGGR_CORE: Use first CPU of core 22 * AGGR_NONE: Use matching CPU 23 * AGGR_THREAD: Not supported? 24 */ 25 static struct stats runtime_nsecs_stats[MAX_NR_CPUS]; 26 static struct stats runtime_cycles_stats[NUM_CTX][MAX_NR_CPUS]; 27 static struct stats runtime_stalled_cycles_front_stats[NUM_CTX][MAX_NR_CPUS]; 28 static struct stats runtime_stalled_cycles_back_stats[NUM_CTX][MAX_NR_CPUS]; 29 static struct stats runtime_branches_stats[NUM_CTX][MAX_NR_CPUS]; 30 static struct stats runtime_cacherefs_stats[NUM_CTX][MAX_NR_CPUS]; 31 static struct stats runtime_l1_dcache_stats[NUM_CTX][MAX_NR_CPUS]; 32 static struct stats runtime_l1_icache_stats[NUM_CTX][MAX_NR_CPUS]; 33 static struct stats runtime_ll_cache_stats[NUM_CTX][MAX_NR_CPUS]; 34 static struct stats runtime_itlb_cache_stats[NUM_CTX][MAX_NR_CPUS]; 35 static struct stats runtime_dtlb_cache_stats[NUM_CTX][MAX_NR_CPUS]; 36 static struct stats runtime_cycles_in_tx_stats[NUM_CTX][MAX_NR_CPUS]; 37 static struct stats runtime_transaction_stats[NUM_CTX][MAX_NR_CPUS]; 38 static struct stats runtime_elision_stats[NUM_CTX][MAX_NR_CPUS]; 39 static bool have_frontend_stalled; 40 41 struct stats walltime_nsecs_stats; 42 43 void perf_stat__init_shadow_stats(void) 44 { 45 have_frontend_stalled = pmu_have_event("cpu", "stalled-cycles-frontend"); 46 } 47 48 static int evsel_context(struct perf_evsel *evsel) 49 { 50 int ctx = 0; 51 52 if (evsel->attr.exclude_kernel) 53 ctx |= CTX_BIT_KERNEL; 54 if (evsel->attr.exclude_user) 55 ctx |= CTX_BIT_USER; 56 if (evsel->attr.exclude_hv) 57 ctx |= CTX_BIT_HV; 58 if (evsel->attr.exclude_host) 59 ctx |= CTX_BIT_HOST; 60 if (evsel->attr.exclude_idle) 61 ctx |= CTX_BIT_IDLE; 62 63 return ctx; 64 } 65 66 void perf_stat__reset_shadow_stats(void) 67 { 68 memset(runtime_nsecs_stats, 0, sizeof(runtime_nsecs_stats)); 69 memset(runtime_cycles_stats, 0, sizeof(runtime_cycles_stats)); 70 memset(runtime_stalled_cycles_front_stats, 0, sizeof(runtime_stalled_cycles_front_stats)); 71 memset(runtime_stalled_cycles_back_stats, 0, sizeof(runtime_stalled_cycles_back_stats)); 72 memset(runtime_branches_stats, 0, sizeof(runtime_branches_stats)); 73 memset(runtime_cacherefs_stats, 0, sizeof(runtime_cacherefs_stats)); 74 memset(runtime_l1_dcache_stats, 0, sizeof(runtime_l1_dcache_stats)); 75 memset(runtime_l1_icache_stats, 0, sizeof(runtime_l1_icache_stats)); 76 memset(runtime_ll_cache_stats, 0, sizeof(runtime_ll_cache_stats)); 77 memset(runtime_itlb_cache_stats, 0, sizeof(runtime_itlb_cache_stats)); 78 memset(runtime_dtlb_cache_stats, 0, sizeof(runtime_dtlb_cache_stats)); 79 memset(runtime_cycles_in_tx_stats, 0, 80 sizeof(runtime_cycles_in_tx_stats)); 81 memset(runtime_transaction_stats, 0, 82 sizeof(runtime_transaction_stats)); 83 memset(runtime_elision_stats, 0, sizeof(runtime_elision_stats)); 84 memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats)); 85 } 86 87 /* 88 * Update various tracking values we maintain to print 89 * more semantic information such as miss/hit ratios, 90 * instruction rates, etc: 91 */ 92 void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count, 93 int cpu) 94 { 95 int ctx = evsel_context(counter); 96 97 if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) 98 update_stats(&runtime_nsecs_stats[cpu], count[0]); 99 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) 100 update_stats(&runtime_cycles_stats[ctx][cpu], count[0]); 101 else if (perf_stat_evsel__is(counter, CYCLES_IN_TX)) 102 update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count[0]); 103 else if (perf_stat_evsel__is(counter, TRANSACTION_START)) 104 update_stats(&runtime_transaction_stats[ctx][cpu], count[0]); 105 else if (perf_stat_evsel__is(counter, ELISION_START)) 106 update_stats(&runtime_elision_stats[ctx][cpu], count[0]); 107 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) 108 update_stats(&runtime_stalled_cycles_front_stats[ctx][cpu], count[0]); 109 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND)) 110 update_stats(&runtime_stalled_cycles_back_stats[ctx][cpu], count[0]); 111 else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) 112 update_stats(&runtime_branches_stats[ctx][cpu], count[0]); 113 else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES)) 114 update_stats(&runtime_cacherefs_stats[ctx][cpu], count[0]); 115 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D)) 116 update_stats(&runtime_l1_dcache_stats[ctx][cpu], count[0]); 117 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I)) 118 update_stats(&runtime_ll_cache_stats[ctx][cpu], count[0]); 119 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL)) 120 update_stats(&runtime_ll_cache_stats[ctx][cpu], count[0]); 121 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB)) 122 update_stats(&runtime_dtlb_cache_stats[ctx][cpu], count[0]); 123 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB)) 124 update_stats(&runtime_itlb_cache_stats[ctx][cpu], count[0]); 125 } 126 127 /* used for get_ratio_color() */ 128 enum grc_type { 129 GRC_STALLED_CYCLES_FE, 130 GRC_STALLED_CYCLES_BE, 131 GRC_CACHE_MISSES, 132 GRC_MAX_NR 133 }; 134 135 static const char *get_ratio_color(enum grc_type type, double ratio) 136 { 137 static const double grc_table[GRC_MAX_NR][3] = { 138 [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 }, 139 [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 }, 140 [GRC_CACHE_MISSES] = { 20.0, 10.0, 5.0 }, 141 }; 142 const char *color = PERF_COLOR_NORMAL; 143 144 if (ratio > grc_table[type][0]) 145 color = PERF_COLOR_RED; 146 else if (ratio > grc_table[type][1]) 147 color = PERF_COLOR_MAGENTA; 148 else if (ratio > grc_table[type][2]) 149 color = PERF_COLOR_YELLOW; 150 151 return color; 152 } 153 154 static void print_stalled_cycles_frontend(int cpu, 155 struct perf_evsel *evsel 156 __maybe_unused, double avg, 157 struct perf_stat_output_ctx *out) 158 { 159 double total, ratio = 0.0; 160 const char *color; 161 int ctx = evsel_context(evsel); 162 163 total = avg_stats(&runtime_cycles_stats[ctx][cpu]); 164 165 if (total) 166 ratio = avg / total * 100.0; 167 168 color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio); 169 170 if (ratio) 171 out->print_metric(out->ctx, color, "%7.2f%%", "frontend cycles idle", 172 ratio); 173 else 174 out->print_metric(out->ctx, NULL, NULL, "frontend cycles idle", 0); 175 } 176 177 static void print_stalled_cycles_backend(int cpu, 178 struct perf_evsel *evsel 179 __maybe_unused, double avg, 180 struct perf_stat_output_ctx *out) 181 { 182 double total, ratio = 0.0; 183 const char *color; 184 int ctx = evsel_context(evsel); 185 186 total = avg_stats(&runtime_cycles_stats[ctx][cpu]); 187 188 if (total) 189 ratio = avg / total * 100.0; 190 191 color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio); 192 193 out->print_metric(out->ctx, color, "%6.2f%%", "backend cycles idle", ratio); 194 } 195 196 static void print_branch_misses(int cpu, 197 struct perf_evsel *evsel __maybe_unused, 198 double avg, 199 struct perf_stat_output_ctx *out) 200 { 201 double total, ratio = 0.0; 202 const char *color; 203 int ctx = evsel_context(evsel); 204 205 total = avg_stats(&runtime_branches_stats[ctx][cpu]); 206 207 if (total) 208 ratio = avg / total * 100.0; 209 210 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 211 212 out->print_metric(out->ctx, color, "%7.2f%%", "of all branches", ratio); 213 } 214 215 static void print_l1_dcache_misses(int cpu, 216 struct perf_evsel *evsel __maybe_unused, 217 double avg, 218 struct perf_stat_output_ctx *out) 219 { 220 double total, ratio = 0.0; 221 const char *color; 222 int ctx = evsel_context(evsel); 223 224 total = avg_stats(&runtime_l1_dcache_stats[ctx][cpu]); 225 226 if (total) 227 ratio = avg / total * 100.0; 228 229 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 230 231 out->print_metric(out->ctx, color, "%7.2f%%", "of all L1-dcache hits", ratio); 232 } 233 234 static void print_l1_icache_misses(int cpu, 235 struct perf_evsel *evsel __maybe_unused, 236 double avg, 237 struct perf_stat_output_ctx *out) 238 { 239 double total, ratio = 0.0; 240 const char *color; 241 int ctx = evsel_context(evsel); 242 243 total = avg_stats(&runtime_l1_icache_stats[ctx][cpu]); 244 245 if (total) 246 ratio = avg / total * 100.0; 247 248 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 249 out->print_metric(out->ctx, color, "%7.2f%%", "of all L1-icache hits", ratio); 250 } 251 252 static void print_dtlb_cache_misses(int cpu, 253 struct perf_evsel *evsel __maybe_unused, 254 double avg, 255 struct perf_stat_output_ctx *out) 256 { 257 double total, ratio = 0.0; 258 const char *color; 259 int ctx = evsel_context(evsel); 260 261 total = avg_stats(&runtime_dtlb_cache_stats[ctx][cpu]); 262 263 if (total) 264 ratio = avg / total * 100.0; 265 266 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 267 out->print_metric(out->ctx, color, "%7.2f%%", "of all dTLB cache hits", ratio); 268 } 269 270 static void print_itlb_cache_misses(int cpu, 271 struct perf_evsel *evsel __maybe_unused, 272 double avg, 273 struct perf_stat_output_ctx *out) 274 { 275 double total, ratio = 0.0; 276 const char *color; 277 int ctx = evsel_context(evsel); 278 279 total = avg_stats(&runtime_itlb_cache_stats[ctx][cpu]); 280 281 if (total) 282 ratio = avg / total * 100.0; 283 284 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 285 out->print_metric(out->ctx, color, "%7.2f%%", "of all iTLB cache hits", ratio); 286 } 287 288 static void print_ll_cache_misses(int cpu, 289 struct perf_evsel *evsel __maybe_unused, 290 double avg, 291 struct perf_stat_output_ctx *out) 292 { 293 double total, ratio = 0.0; 294 const char *color; 295 int ctx = evsel_context(evsel); 296 297 total = avg_stats(&runtime_ll_cache_stats[ctx][cpu]); 298 299 if (total) 300 ratio = avg / total * 100.0; 301 302 color = get_ratio_color(GRC_CACHE_MISSES, ratio); 303 out->print_metric(out->ctx, color, "%7.2f%%", "of all LL-cache hits", ratio); 304 } 305 306 void perf_stat__print_shadow_stats(struct perf_evsel *evsel, 307 double avg, int cpu, 308 struct perf_stat_output_ctx *out) 309 { 310 void *ctxp = out->ctx; 311 print_metric_t print_metric = out->print_metric; 312 double total, ratio = 0.0, total2; 313 int ctx = evsel_context(evsel); 314 315 if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) { 316 total = avg_stats(&runtime_cycles_stats[ctx][cpu]); 317 if (total) { 318 ratio = avg / total; 319 print_metric(ctxp, NULL, "%7.2f ", 320 "insn per cycle", ratio); 321 } else { 322 print_metric(ctxp, NULL, NULL, "insn per cycle", 0); 323 } 324 total = avg_stats(&runtime_stalled_cycles_front_stats[ctx][cpu]); 325 total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[ctx][cpu])); 326 327 if (total && avg) { 328 out->new_line(ctxp); 329 ratio = total / avg; 330 print_metric(ctxp, NULL, "%7.2f ", 331 "stalled cycles per insn", 332 ratio); 333 } else if (have_frontend_stalled) { 334 print_metric(ctxp, NULL, NULL, 335 "stalled cycles per insn", 0); 336 } 337 } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) { 338 if (runtime_branches_stats[ctx][cpu].n != 0) 339 print_branch_misses(cpu, evsel, avg, out); 340 else 341 print_metric(ctxp, NULL, NULL, "of all branches", 0); 342 } else if ( 343 evsel->attr.type == PERF_TYPE_HW_CACHE && 344 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D | 345 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 346 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { 347 if (runtime_l1_dcache_stats[ctx][cpu].n != 0) 348 print_l1_dcache_misses(cpu, evsel, avg, out); 349 else 350 print_metric(ctxp, NULL, NULL, "of all L1-dcache hits", 0); 351 } else if ( 352 evsel->attr.type == PERF_TYPE_HW_CACHE && 353 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I | 354 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 355 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { 356 if (runtime_l1_icache_stats[ctx][cpu].n != 0) 357 print_l1_icache_misses(cpu, evsel, avg, out); 358 else 359 print_metric(ctxp, NULL, NULL, "of all L1-icache hits", 0); 360 } else if ( 361 evsel->attr.type == PERF_TYPE_HW_CACHE && 362 evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB | 363 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 364 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { 365 if (runtime_dtlb_cache_stats[ctx][cpu].n != 0) 366 print_dtlb_cache_misses(cpu, evsel, avg, out); 367 else 368 print_metric(ctxp, NULL, NULL, "of all dTLB cache hits", 0); 369 } else if ( 370 evsel->attr.type == PERF_TYPE_HW_CACHE && 371 evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB | 372 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 373 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { 374 if (runtime_itlb_cache_stats[ctx][cpu].n != 0) 375 print_itlb_cache_misses(cpu, evsel, avg, out); 376 else 377 print_metric(ctxp, NULL, NULL, "of all iTLB cache hits", 0); 378 } else if ( 379 evsel->attr.type == PERF_TYPE_HW_CACHE && 380 evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL | 381 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | 382 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { 383 if (runtime_ll_cache_stats[ctx][cpu].n != 0) 384 print_ll_cache_misses(cpu, evsel, avg, out); 385 else 386 print_metric(ctxp, NULL, NULL, "of all LL-cache hits", 0); 387 } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) { 388 total = avg_stats(&runtime_cacherefs_stats[ctx][cpu]); 389 390 if (total) 391 ratio = avg * 100 / total; 392 393 if (runtime_cacherefs_stats[ctx][cpu].n != 0) 394 print_metric(ctxp, NULL, "%8.3f %%", 395 "of all cache refs", ratio); 396 else 397 print_metric(ctxp, NULL, NULL, "of all cache refs", 0); 398 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) { 399 print_stalled_cycles_frontend(cpu, evsel, avg, out); 400 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) { 401 print_stalled_cycles_backend(cpu, evsel, avg, out); 402 } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) { 403 total = avg_stats(&runtime_nsecs_stats[cpu]); 404 405 if (total) { 406 ratio = avg / total; 407 print_metric(ctxp, NULL, "%8.3f", "GHz", ratio); 408 } else { 409 print_metric(ctxp, NULL, NULL, "Ghz", 0); 410 } 411 } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) { 412 total = avg_stats(&runtime_cycles_stats[ctx][cpu]); 413 if (total) 414 print_metric(ctxp, NULL, 415 "%7.2f%%", "transactional cycles", 416 100.0 * (avg / total)); 417 else 418 print_metric(ctxp, NULL, NULL, "transactional cycles", 419 0); 420 } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) { 421 total = avg_stats(&runtime_cycles_stats[ctx][cpu]); 422 total2 = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]); 423 if (total2 < avg) 424 total2 = avg; 425 if (total) 426 print_metric(ctxp, NULL, "%7.2f%%", "aborted cycles", 427 100.0 * ((total2-avg) / total)); 428 else 429 print_metric(ctxp, NULL, NULL, "aborted cycles", 0); 430 } else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) { 431 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]); 432 433 if (avg) 434 ratio = total / avg; 435 436 if (runtime_cycles_in_tx_stats[ctx][cpu].n != 0) 437 print_metric(ctxp, NULL, "%8.0f", 438 "cycles / transaction", ratio); 439 else 440 print_metric(ctxp, NULL, NULL, "cycles / transaction", 441 0); 442 } else if (perf_stat_evsel__is(evsel, ELISION_START)) { 443 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]); 444 445 if (avg) 446 ratio = total / avg; 447 448 print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio); 449 } else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) { 450 if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0) 451 print_metric(ctxp, NULL, "%8.3f", "CPUs utilized", 452 avg / ratio); 453 else 454 print_metric(ctxp, NULL, NULL, "CPUs utilized", 0); 455 } else if (runtime_nsecs_stats[cpu].n != 0) { 456 char unit = 'M'; 457 char unit_buf[10]; 458 459 total = avg_stats(&runtime_nsecs_stats[cpu]); 460 461 if (total) 462 ratio = 1000.0 * avg / total; 463 if (ratio < 0.001) { 464 ratio *= 1000; 465 unit = 'K'; 466 } 467 snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit); 468 print_metric(ctxp, NULL, "%8.3f", unit_buf, ratio); 469 } else { 470 print_metric(ctxp, NULL, NULL, NULL, 0); 471 } 472 } 473