1 // SPDX-License-Identifier: GPL-2.0 2 #include <inttypes.h> 3 #include <math.h> 4 #include <stdlib.h> 5 #include <string.h> 6 #include <linux/compiler.h> 7 8 #include "../util/callchain.h" 9 #include "../util/debug.h" 10 #include "../util/hist.h" 11 #include "../util/sort.h" 12 #include "../util/evsel.h" 13 #include "../util/evlist.h" 14 #include "../util/thread.h" 15 #include "../util/util.h" 16 17 /* hist period print (hpp) functions */ 18 19 #define hpp__call_print_fn(hpp, fn, fmt, ...) \ 20 ({ \ 21 int __ret = fn(hpp, fmt, ##__VA_ARGS__); \ 22 advance_hpp(hpp, __ret); \ 23 __ret; \ 24 }) 25 26 static int __hpp__fmt_print(struct perf_hpp *hpp, struct hists *hists, u64 val, 27 int nr_samples, const char *fmt, int len, 28 hpp_snprint_fn print_fn, enum perf_hpp_fmt_type fmtype) 29 { 30 if (fmtype == PERF_HPP_FMT_TYPE__PERCENT) { 31 double percent = 0.0; 32 u64 total = hists__total_period(hists); 33 34 if (total) 35 percent = 100.0 * val / total; 36 37 return hpp__call_print_fn(hpp, print_fn, fmt, len, percent); 38 } 39 40 if (fmtype == PERF_HPP_FMT_TYPE__AVERAGE) { 41 double avg = nr_samples ? (1.0 * val / nr_samples) : 0; 42 43 return hpp__call_print_fn(hpp, print_fn, fmt, len, avg); 44 } 45 46 return hpp__call_print_fn(hpp, print_fn, fmt, len, val); 47 } 48 49 struct hpp_fmt_value { 50 struct hists *hists; 51 u64 val; 52 int samples; 53 }; 54 55 static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, 56 hpp_field_fn get_field, const char *fmt, int len, 57 hpp_snprint_fn print_fn, enum perf_hpp_fmt_type fmtype) 58 { 59 int ret = 0; 60 struct hists *hists = he->hists; 61 struct evsel *evsel = hists_to_evsel(hists); 62 struct evsel *pos; 63 char *buf = hpp->buf; 64 size_t size = hpp->size; 65 int i, nr_members = 1; 66 struct hpp_fmt_value *values; 67 68 if (evsel__is_group_event(evsel)) 69 nr_members = evsel->core.nr_members; 70 71 values = calloc(nr_members, sizeof(*values)); 72 if (values == NULL) 73 return 0; 74 75 i = 0; 76 for_each_group_evsel(pos, evsel) 77 values[i++].hists = evsel__hists(pos); 78 79 values[0].val = get_field(he); 80 values[0].samples = he->stat.nr_events; 81 82 if (evsel__is_group_event(evsel)) { 83 struct hist_entry *pair; 84 85 list_for_each_entry(pair, &he->pairs.head, pairs.node) { 86 for (i = 0; i < nr_members; i++) { 87 if (values[i].hists != pair->hists) 88 continue; 89 90 values[i].val = get_field(pair); 91 values[i].samples = pair->stat.nr_events; 92 break; 93 } 94 } 95 } 96 97 for (i = 0; i < nr_members; i++) { 98 if (symbol_conf.skip_empty && 99 values[i].hists->stats.nr_samples == 0) 100 continue; 101 102 ret += __hpp__fmt_print(hpp, values[i].hists, values[i].val, 103 values[i].samples, fmt, len, 104 print_fn, fmtype); 105 } 106 107 free(values); 108 109 /* 110 * Restore original buf and size as it's where caller expects 111 * the result will be saved. 112 */ 113 hpp->buf = buf; 114 hpp->size = size; 115 116 return ret; 117 } 118 119 int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 120 struct hist_entry *he, hpp_field_fn get_field, 121 const char *fmtstr, hpp_snprint_fn print_fn, 122 enum perf_hpp_fmt_type fmtype) 123 { 124 int len = fmt->user_len ?: fmt->len; 125 126 if (symbol_conf.field_sep) { 127 return __hpp__fmt(hpp, he, get_field, fmtstr, 1, 128 print_fn, fmtype); 129 } 130 131 if (fmtype == PERF_HPP_FMT_TYPE__PERCENT) 132 len -= 2; /* 2 for a space and a % sign */ 133 else 134 len -= 1; 135 136 return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmtype); 137 } 138 139 int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 140 struct hist_entry *he, hpp_field_fn get_field, 141 const char *fmtstr, hpp_snprint_fn print_fn, 142 enum perf_hpp_fmt_type fmtype) 143 { 144 if (!symbol_conf.cumulate_callchain) { 145 int len = fmt->user_len ?: fmt->len; 146 return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A"); 147 } 148 149 return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmtype); 150 } 151 152 static int field_cmp(u64 field_a, u64 field_b) 153 { 154 if (field_a > field_b) 155 return 1; 156 if (field_a < field_b) 157 return -1; 158 return 0; 159 } 160 161 static int hist_entry__new_pair(struct hist_entry *a, struct hist_entry *b, 162 hpp_field_fn get_field, int nr_members, 163 u64 **fields_a, u64 **fields_b) 164 { 165 u64 *fa = calloc(nr_members, sizeof(*fa)), 166 *fb = calloc(nr_members, sizeof(*fb)); 167 struct hist_entry *pair; 168 169 if (!fa || !fb) 170 goto out_free; 171 172 list_for_each_entry(pair, &a->pairs.head, pairs.node) { 173 struct evsel *evsel = hists_to_evsel(pair->hists); 174 fa[evsel__group_idx(evsel)] = get_field(pair); 175 } 176 177 list_for_each_entry(pair, &b->pairs.head, pairs.node) { 178 struct evsel *evsel = hists_to_evsel(pair->hists); 179 fb[evsel__group_idx(evsel)] = get_field(pair); 180 } 181 182 *fields_a = fa; 183 *fields_b = fb; 184 return 0; 185 out_free: 186 free(fa); 187 free(fb); 188 *fields_a = *fields_b = NULL; 189 return -1; 190 } 191 192 static int __hpp__group_sort_idx(struct hist_entry *a, struct hist_entry *b, 193 hpp_field_fn get_field, int idx) 194 { 195 struct evsel *evsel = hists_to_evsel(a->hists); 196 u64 *fields_a, *fields_b; 197 int cmp, nr_members, ret, i; 198 199 cmp = field_cmp(get_field(a), get_field(b)); 200 if (!evsel__is_group_event(evsel)) 201 return cmp; 202 203 nr_members = evsel->core.nr_members; 204 if (idx < 1 || idx >= nr_members) 205 return cmp; 206 207 ret = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b); 208 if (ret) { 209 ret = cmp; 210 goto out; 211 } 212 213 ret = field_cmp(fields_a[idx], fields_b[idx]); 214 if (ret) 215 goto out; 216 217 for (i = 1; i < nr_members; i++) { 218 if (i != idx) { 219 ret = field_cmp(fields_a[i], fields_b[i]); 220 if (ret) 221 goto out; 222 } 223 } 224 225 out: 226 free(fields_a); 227 free(fields_b); 228 229 return ret; 230 } 231 232 static int __hpp__sort(struct hist_entry *a, struct hist_entry *b, 233 hpp_field_fn get_field) 234 { 235 s64 ret; 236 int i, nr_members; 237 struct evsel *evsel; 238 u64 *fields_a, *fields_b; 239 240 if (symbol_conf.group_sort_idx && symbol_conf.event_group) { 241 return __hpp__group_sort_idx(a, b, get_field, 242 symbol_conf.group_sort_idx); 243 } 244 245 ret = field_cmp(get_field(a), get_field(b)); 246 if (ret || !symbol_conf.event_group) 247 return ret; 248 249 evsel = hists_to_evsel(a->hists); 250 if (!evsel__is_group_event(evsel)) 251 return ret; 252 253 nr_members = evsel->core.nr_members; 254 i = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b); 255 if (i) 256 goto out; 257 258 for (i = 1; i < nr_members; i++) { 259 ret = field_cmp(fields_a[i], fields_b[i]); 260 if (ret) 261 break; 262 } 263 264 out: 265 free(fields_a); 266 free(fields_b); 267 268 return ret; 269 } 270 271 static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b, 272 hpp_field_fn get_field) 273 { 274 s64 ret = 0; 275 276 if (symbol_conf.cumulate_callchain) { 277 /* 278 * Put caller above callee when they have equal period. 279 */ 280 ret = field_cmp(get_field(a), get_field(b)); 281 if (ret) 282 return ret; 283 284 if ((a->thread == NULL ? NULL : RC_CHK_ACCESS(a->thread)) != 285 (b->thread == NULL ? NULL : RC_CHK_ACCESS(b->thread)) || 286 !hist_entry__has_callchains(a) || !symbol_conf.use_callchain) 287 return 0; 288 289 ret = b->callchain->max_depth - a->callchain->max_depth; 290 if (callchain_param.order == ORDER_CALLER) 291 ret = -ret; 292 } 293 return ret; 294 } 295 296 static int hpp__width_fn(struct perf_hpp_fmt *fmt, 297 struct perf_hpp *hpp __maybe_unused, 298 struct hists *hists) 299 { 300 int len = fmt->user_len ?: fmt->len; 301 struct evsel *evsel = hists_to_evsel(hists); 302 303 if (symbol_conf.event_group) { 304 int nr = 0; 305 struct evsel *pos; 306 307 for_each_group_evsel(pos, evsel) { 308 if (!symbol_conf.skip_empty || 309 evsel__hists(pos)->stats.nr_samples) 310 nr++; 311 } 312 313 len = max(len, nr * fmt->len); 314 } 315 316 if (len < (int)strlen(fmt->name)) 317 len = strlen(fmt->name); 318 319 return len; 320 } 321 322 static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 323 struct hists *hists, int line __maybe_unused, 324 int *span __maybe_unused) 325 { 326 int len = hpp__width_fn(fmt, hpp, hists); 327 return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name); 328 } 329 330 int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...) 331 { 332 va_list args; 333 ssize_t ssize = hpp->size; 334 double percent; 335 int ret, len; 336 337 va_start(args, fmt); 338 len = va_arg(args, int); 339 percent = va_arg(args, double); 340 ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent); 341 va_end(args); 342 343 return (ret >= ssize) ? (ssize - 1) : ret; 344 } 345 346 static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...) 347 { 348 va_list args; 349 ssize_t ssize = hpp->size; 350 int ret; 351 352 va_start(args, fmt); 353 ret = vsnprintf(hpp->buf, hpp->size, fmt, args); 354 va_end(args); 355 356 return (ret >= ssize) ? (ssize - 1) : ret; 357 } 358 359 #define __HPP_COLOR_PERCENT_FN(_type, _field) \ 360 static u64 he_get_##_field(struct hist_entry *he) \ 361 { \ 362 return he->stat._field; \ 363 } \ 364 \ 365 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \ 366 struct perf_hpp *hpp, struct hist_entry *he) \ 367 { \ 368 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \ 369 hpp_color_scnprintf, PERF_HPP_FMT_TYPE__PERCENT); \ 370 } 371 372 #define __HPP_ENTRY_PERCENT_FN(_type, _field) \ 373 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ 374 struct perf_hpp *hpp, struct hist_entry *he) \ 375 { \ 376 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \ 377 hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__PERCENT); \ 378 } 379 380 #define __HPP_SORT_FN(_type, _field) \ 381 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ 382 struct hist_entry *a, struct hist_entry *b) \ 383 { \ 384 return __hpp__sort(a, b, he_get_##_field); \ 385 } 386 387 #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ 388 static u64 he_get_acc_##_field(struct hist_entry *he) \ 389 { \ 390 return he->stat_acc->_field; \ 391 } \ 392 \ 393 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \ 394 struct perf_hpp *hpp, struct hist_entry *he) \ 395 { \ 396 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \ 397 hpp_color_scnprintf, PERF_HPP_FMT_TYPE__PERCENT); \ 398 } 399 400 #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \ 401 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ 402 struct perf_hpp *hpp, struct hist_entry *he) \ 403 { \ 404 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \ 405 hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__PERCENT); \ 406 } 407 408 #define __HPP_SORT_ACC_FN(_type, _field) \ 409 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ 410 struct hist_entry *a, struct hist_entry *b) \ 411 { \ 412 return __hpp__sort_acc(a, b, he_get_acc_##_field); \ 413 } 414 415 #define __HPP_ENTRY_RAW_FN(_type, _field) \ 416 static u64 he_get_raw_##_field(struct hist_entry *he) \ 417 { \ 418 return he->stat._field; \ 419 } \ 420 \ 421 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ 422 struct perf_hpp *hpp, struct hist_entry *he) \ 423 { \ 424 return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \ 425 hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__RAW); \ 426 } 427 428 #define __HPP_SORT_RAW_FN(_type, _field) \ 429 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ 430 struct hist_entry *a, struct hist_entry *b) \ 431 { \ 432 return __hpp__sort(a, b, he_get_raw_##_field); \ 433 } 434 435 #define __HPP_ENTRY_AVERAGE_FN(_type, _field) \ 436 static u64 he_get_##_field(struct hist_entry *he) \ 437 { \ 438 return he->stat._field; \ 439 } \ 440 \ 441 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ 442 struct perf_hpp *hpp, struct hist_entry *he) \ 443 { \ 444 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.1f", \ 445 hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__AVERAGE); \ 446 } 447 448 #define __HPP_SORT_AVERAGE_FN(_type, _field) \ 449 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ 450 struct hist_entry *a, struct hist_entry *b) \ 451 { \ 452 return __hpp__sort(a, b, he_get_##_field); \ 453 } 454 455 456 #define HPP_PERCENT_FNS(_type, _field) \ 457 __HPP_COLOR_PERCENT_FN(_type, _field) \ 458 __HPP_ENTRY_PERCENT_FN(_type, _field) \ 459 __HPP_SORT_FN(_type, _field) 460 461 #define HPP_PERCENT_ACC_FNS(_type, _field) \ 462 __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ 463 __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \ 464 __HPP_SORT_ACC_FN(_type, _field) 465 466 #define HPP_RAW_FNS(_type, _field) \ 467 __HPP_ENTRY_RAW_FN(_type, _field) \ 468 __HPP_SORT_RAW_FN(_type, _field) 469 470 #define HPP_AVERAGE_FNS(_type, _field) \ 471 __HPP_ENTRY_AVERAGE_FN(_type, _field) \ 472 __HPP_SORT_AVERAGE_FN(_type, _field) 473 474 HPP_PERCENT_FNS(overhead, period) 475 HPP_PERCENT_FNS(overhead_sys, period_sys) 476 HPP_PERCENT_FNS(overhead_us, period_us) 477 HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys) 478 HPP_PERCENT_FNS(overhead_guest_us, period_guest_us) 479 HPP_PERCENT_ACC_FNS(overhead_acc, period) 480 481 HPP_RAW_FNS(samples, nr_events) 482 HPP_RAW_FNS(period, period) 483 484 HPP_AVERAGE_FNS(weight1, weight1) 485 HPP_AVERAGE_FNS(weight2, weight2) 486 HPP_AVERAGE_FNS(weight3, weight3) 487 488 static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused, 489 struct hist_entry *a __maybe_unused, 490 struct hist_entry *b __maybe_unused) 491 { 492 return 0; 493 } 494 495 static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a) 496 { 497 return a->header == hpp__header_fn; 498 } 499 500 static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 501 { 502 if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b)) 503 return false; 504 505 return a->idx == b->idx; 506 } 507 508 #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \ 509 { \ 510 .name = _name, \ 511 .header = hpp__header_fn, \ 512 .width = hpp__width_fn, \ 513 .color = hpp__color_ ## _fn, \ 514 .entry = hpp__entry_ ## _fn, \ 515 .cmp = hpp__nop_cmp, \ 516 .collapse = hpp__nop_cmp, \ 517 .sort = hpp__sort_ ## _fn, \ 518 .idx = PERF_HPP__ ## _idx, \ 519 .equal = hpp__equal, \ 520 } 521 522 #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \ 523 { \ 524 .name = _name, \ 525 .header = hpp__header_fn, \ 526 .width = hpp__width_fn, \ 527 .color = hpp__color_ ## _fn, \ 528 .entry = hpp__entry_ ## _fn, \ 529 .cmp = hpp__nop_cmp, \ 530 .collapse = hpp__nop_cmp, \ 531 .sort = hpp__sort_ ## _fn, \ 532 .idx = PERF_HPP__ ## _idx, \ 533 .equal = hpp__equal, \ 534 } 535 536 #define HPP__PRINT_FNS(_name, _fn, _idx) \ 537 { \ 538 .name = _name, \ 539 .header = hpp__header_fn, \ 540 .width = hpp__width_fn, \ 541 .entry = hpp__entry_ ## _fn, \ 542 .cmp = hpp__nop_cmp, \ 543 .collapse = hpp__nop_cmp, \ 544 .sort = hpp__sort_ ## _fn, \ 545 .idx = PERF_HPP__ ## _idx, \ 546 .equal = hpp__equal, \ 547 } 548 549 struct perf_hpp_fmt perf_hpp__format[] = { 550 HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD), 551 HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS), 552 HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US), 553 HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS), 554 HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US), 555 HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC), 556 HPP__PRINT_FNS("Samples", samples, SAMPLES), 557 HPP__PRINT_FNS("Period", period, PERIOD), 558 HPP__PRINT_FNS("Weight1", weight1, WEIGHT1), 559 HPP__PRINT_FNS("Weight2", weight2, WEIGHT2), 560 HPP__PRINT_FNS("Weight3", weight3, WEIGHT3), 561 }; 562 563 struct perf_hpp_list perf_hpp_list = { 564 .fields = LIST_HEAD_INIT(perf_hpp_list.fields), 565 .sorts = LIST_HEAD_INIT(perf_hpp_list.sorts), 566 .nr_header_lines = 1, 567 }; 568 569 #undef HPP__COLOR_PRINT_FNS 570 #undef HPP__COLOR_ACC_PRINT_FNS 571 #undef HPP__PRINT_FNS 572 573 #undef HPP_PERCENT_FNS 574 #undef HPP_PERCENT_ACC_FNS 575 #undef HPP_RAW_FNS 576 #undef HPP_AVERAGE_FNS 577 578 #undef __HPP_HEADER_FN 579 #undef __HPP_WIDTH_FN 580 #undef __HPP_COLOR_PERCENT_FN 581 #undef __HPP_ENTRY_PERCENT_FN 582 #undef __HPP_COLOR_ACC_PERCENT_FN 583 #undef __HPP_ENTRY_ACC_PERCENT_FN 584 #undef __HPP_ENTRY_RAW_FN 585 #undef __HPP_ENTRY_AVERAGE_FN 586 #undef __HPP_SORT_FN 587 #undef __HPP_SORT_ACC_FN 588 #undef __HPP_SORT_RAW_FN 589 #undef __HPP_SORT_AVERAGE_FN 590 591 static void fmt_free(struct perf_hpp_fmt *fmt) 592 { 593 /* 594 * At this point fmt should be completely 595 * unhooked, if not it's a bug. 596 */ 597 BUG_ON(!list_empty(&fmt->list)); 598 BUG_ON(!list_empty(&fmt->sort_list)); 599 600 if (fmt->free) 601 fmt->free(fmt); 602 } 603 604 void perf_hpp__init(void) 605 { 606 int i; 607 608 for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { 609 struct perf_hpp_fmt *fmt = &perf_hpp__format[i]; 610 611 INIT_LIST_HEAD(&fmt->list); 612 613 /* sort_list may be linked by setup_sorting() */ 614 if (fmt->sort_list.next == NULL) 615 INIT_LIST_HEAD(&fmt->sort_list); 616 } 617 618 /* 619 * If user specified field order, no need to setup default fields. 620 */ 621 if (is_strict_order(field_order)) 622 return; 623 624 if (symbol_conf.cumulate_callchain) { 625 hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC); 626 perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self"; 627 } 628 629 hpp_dimension__add_output(PERF_HPP__OVERHEAD); 630 631 if (symbol_conf.show_cpu_utilization) { 632 hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS); 633 hpp_dimension__add_output(PERF_HPP__OVERHEAD_US); 634 635 if (perf_guest) { 636 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS); 637 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US); 638 } 639 } 640 641 if (symbol_conf.show_nr_samples) 642 hpp_dimension__add_output(PERF_HPP__SAMPLES); 643 644 if (symbol_conf.show_total_period) 645 hpp_dimension__add_output(PERF_HPP__PERIOD); 646 } 647 648 void perf_hpp_list__column_register(struct perf_hpp_list *list, 649 struct perf_hpp_fmt *format) 650 { 651 list_add_tail(&format->list, &list->fields); 652 } 653 654 void perf_hpp_list__register_sort_field(struct perf_hpp_list *list, 655 struct perf_hpp_fmt *format) 656 { 657 list_add_tail(&format->sort_list, &list->sorts); 658 } 659 660 void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list, 661 struct perf_hpp_fmt *format) 662 { 663 list_add(&format->sort_list, &list->sorts); 664 } 665 666 static void perf_hpp__column_unregister(struct perf_hpp_fmt *format) 667 { 668 list_del_init(&format->list); 669 fmt_free(format); 670 } 671 672 void perf_hpp__cancel_cumulate(void) 673 { 674 struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp; 675 676 if (is_strict_order(field_order)) 677 return; 678 679 ovh = &perf_hpp__format[PERF_HPP__OVERHEAD]; 680 acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC]; 681 682 perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) { 683 if (acc->equal(acc, fmt)) { 684 perf_hpp__column_unregister(fmt); 685 continue; 686 } 687 688 if (ovh->equal(ovh, fmt)) 689 fmt->name = "Overhead"; 690 } 691 } 692 693 static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 694 { 695 return a->equal && a->equal(a, b); 696 } 697 698 void perf_hpp__setup_output_field(struct perf_hpp_list *list) 699 { 700 struct perf_hpp_fmt *fmt; 701 702 /* append sort keys to output field */ 703 perf_hpp_list__for_each_sort_list(list, fmt) { 704 struct perf_hpp_fmt *pos; 705 706 /* skip sort-only fields ("sort_compute" in perf diff) */ 707 if (!fmt->entry && !fmt->color) 708 continue; 709 710 perf_hpp_list__for_each_format(list, pos) { 711 if (fmt_equal(fmt, pos)) 712 goto next; 713 } 714 715 perf_hpp__column_register(fmt); 716 next: 717 continue; 718 } 719 } 720 721 void perf_hpp__append_sort_keys(struct perf_hpp_list *list) 722 { 723 struct perf_hpp_fmt *fmt; 724 725 /* append output fields to sort keys */ 726 perf_hpp_list__for_each_format(list, fmt) { 727 struct perf_hpp_fmt *pos; 728 729 perf_hpp_list__for_each_sort_list(list, pos) { 730 if (fmt_equal(fmt, pos)) 731 goto next; 732 } 733 734 perf_hpp__register_sort_field(fmt); 735 next: 736 continue; 737 } 738 } 739 740 741 void perf_hpp__reset_output_field(struct perf_hpp_list *list) 742 { 743 struct perf_hpp_fmt *fmt, *tmp; 744 745 /* reset output fields */ 746 perf_hpp_list__for_each_format_safe(list, fmt, tmp) { 747 list_del_init(&fmt->list); 748 list_del_init(&fmt->sort_list); 749 fmt_free(fmt); 750 } 751 752 /* reset sort keys */ 753 perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) { 754 list_del_init(&fmt->list); 755 list_del_init(&fmt->sort_list); 756 fmt_free(fmt); 757 } 758 } 759 760 /* 761 * See hists__fprintf to match the column widths 762 */ 763 unsigned int hists__sort_list_width(struct hists *hists) 764 { 765 struct perf_hpp_fmt *fmt; 766 int ret = 0; 767 bool first = true; 768 struct perf_hpp dummy_hpp; 769 770 hists__for_each_format(hists, fmt) { 771 if (perf_hpp__should_skip(fmt, hists)) 772 continue; 773 774 if (first) 775 first = false; 776 else 777 ret += 2; 778 779 ret += fmt->width(fmt, &dummy_hpp, hists); 780 } 781 782 if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */ 783 ret += 3 + BITS_PER_LONG / 4; 784 785 return ret; 786 } 787 788 unsigned int hists__overhead_width(struct hists *hists) 789 { 790 struct perf_hpp_fmt *fmt; 791 int ret = 0; 792 bool first = true; 793 struct perf_hpp dummy_hpp; 794 795 hists__for_each_format(hists, fmt) { 796 if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt)) 797 break; 798 799 if (first) 800 first = false; 801 else 802 ret += 2; 803 804 ret += fmt->width(fmt, &dummy_hpp, hists); 805 } 806 807 return ret; 808 } 809 810 void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists) 811 { 812 if (perf_hpp__is_sort_entry(fmt)) 813 return perf_hpp__reset_sort_width(fmt, hists); 814 815 if (perf_hpp__is_dynamic_entry(fmt)) 816 return; 817 818 BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX); 819 820 switch (fmt->idx) { 821 case PERF_HPP__OVERHEAD: 822 case PERF_HPP__OVERHEAD_SYS: 823 case PERF_HPP__OVERHEAD_US: 824 case PERF_HPP__OVERHEAD_ACC: 825 fmt->len = 8; 826 break; 827 828 case PERF_HPP__OVERHEAD_GUEST_SYS: 829 case PERF_HPP__OVERHEAD_GUEST_US: 830 fmt->len = 9; 831 break; 832 833 case PERF_HPP__SAMPLES: 834 case PERF_HPP__PERIOD: 835 fmt->len = 12; 836 break; 837 838 case PERF_HPP__WEIGHT1: 839 case PERF_HPP__WEIGHT2: 840 case PERF_HPP__WEIGHT3: 841 fmt->len = 8; 842 break; 843 844 default: 845 break; 846 } 847 } 848 849 void hists__reset_column_width(struct hists *hists) 850 { 851 struct perf_hpp_fmt *fmt; 852 struct perf_hpp_list_node *node; 853 854 hists__for_each_format(hists, fmt) 855 perf_hpp__reset_width(fmt, hists); 856 857 /* hierarchy entries have their own hpp list */ 858 list_for_each_entry(node, &hists->hpp_formats, list) { 859 perf_hpp_list__for_each_format(&node->hpp, fmt) 860 perf_hpp__reset_width(fmt, hists); 861 } 862 } 863 864 void perf_hpp__set_user_width(const char *width_list_str) 865 { 866 struct perf_hpp_fmt *fmt; 867 const char *ptr = width_list_str; 868 869 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 870 char *p; 871 872 int len = strtol(ptr, &p, 10); 873 fmt->user_len = len; 874 875 if (*p == ',') 876 ptr = p + 1; 877 else 878 break; 879 } 880 } 881 882 static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt) 883 { 884 struct perf_hpp_list_node *node = NULL; 885 struct perf_hpp_fmt *fmt_copy; 886 bool found = false; 887 bool skip = perf_hpp__should_skip(fmt, hists); 888 889 list_for_each_entry(node, &hists->hpp_formats, list) { 890 if (node->level == fmt->level) { 891 found = true; 892 break; 893 } 894 } 895 896 if (!found) { 897 node = malloc(sizeof(*node)); 898 if (node == NULL) 899 return -1; 900 901 node->skip = skip; 902 node->level = fmt->level; 903 perf_hpp_list__init(&node->hpp); 904 905 hists->nr_hpp_node++; 906 list_add_tail(&node->list, &hists->hpp_formats); 907 } 908 909 fmt_copy = perf_hpp_fmt__dup(fmt); 910 if (fmt_copy == NULL) 911 return -1; 912 913 if (!skip) 914 node->skip = false; 915 916 list_add_tail(&fmt_copy->list, &node->hpp.fields); 917 list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts); 918 919 return 0; 920 } 921 922 int perf_hpp__setup_hists_formats(struct perf_hpp_list *list, 923 struct evlist *evlist) 924 { 925 struct evsel *evsel; 926 struct perf_hpp_fmt *fmt; 927 struct hists *hists; 928 int ret; 929 930 if (!symbol_conf.report_hierarchy) 931 return 0; 932 933 evlist__for_each_entry(evlist, evsel) { 934 hists = evsel__hists(evsel); 935 936 perf_hpp_list__for_each_sort_list(list, fmt) { 937 if (perf_hpp__is_dynamic_entry(fmt) && 938 !perf_hpp__defined_dynamic_entry(fmt, hists)) 939 continue; 940 941 ret = add_hierarchy_fmt(hists, fmt); 942 if (ret < 0) 943 return ret; 944 } 945 } 946 947 return 0; 948 } 949