1 // SPDX-License-Identifier: GPL-2.0 2 #include <inttypes.h> 3 #include <math.h> 4 #include <linux/compiler.h> 5 6 #include "../util/callchain.h" 7 #include "../util/hist.h" 8 #include "../util/util.h" 9 #include "../util/sort.h" 10 #include "../util/evsel.h" 11 #include "../util/evlist.h" 12 #include "../perf.h" 13 14 /* hist period print (hpp) functions */ 15 16 #define hpp__call_print_fn(hpp, fn, fmt, ...) \ 17 ({ \ 18 int __ret = fn(hpp, fmt, ##__VA_ARGS__); \ 19 advance_hpp(hpp, __ret); \ 20 __ret; \ 21 }) 22 23 static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, 24 hpp_field_fn get_field, const char *fmt, int len, 25 hpp_snprint_fn print_fn, bool fmt_percent) 26 { 27 int ret; 28 struct hists *hists = he->hists; 29 struct evsel *evsel = hists_to_evsel(hists); 30 char *buf = hpp->buf; 31 size_t size = hpp->size; 32 33 if (fmt_percent) { 34 double percent = 0.0; 35 u64 total = hists__total_period(hists); 36 37 if (total) 38 percent = 100.0 * get_field(he) / total; 39 40 ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent); 41 } else 42 ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he)); 43 44 if (perf_evsel__is_group_event(evsel)) { 45 int prev_idx, idx_delta; 46 struct hist_entry *pair; 47 int nr_members = evsel->core.nr_members; 48 49 prev_idx = perf_evsel__group_idx(evsel); 50 51 list_for_each_entry(pair, &he->pairs.head, pairs.node) { 52 u64 period = get_field(pair); 53 u64 total = hists__total_period(pair->hists); 54 55 if (!total) 56 continue; 57 58 evsel = hists_to_evsel(pair->hists); 59 idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1; 60 61 while (idx_delta--) { 62 /* 63 * zero-fill group members in the middle which 64 * have no sample 65 */ 66 if (fmt_percent) { 67 ret += hpp__call_print_fn(hpp, print_fn, 68 fmt, len, 0.0); 69 } else { 70 ret += hpp__call_print_fn(hpp, print_fn, 71 fmt, len, 0ULL); 72 } 73 } 74 75 if (fmt_percent) { 76 ret += hpp__call_print_fn(hpp, print_fn, fmt, len, 77 100.0 * period / total); 78 } else { 79 ret += hpp__call_print_fn(hpp, print_fn, fmt, 80 len, period); 81 } 82 83 prev_idx = perf_evsel__group_idx(evsel); 84 } 85 86 idx_delta = nr_members - prev_idx - 1; 87 88 while (idx_delta--) { 89 /* 90 * zero-fill group members at last which have no sample 91 */ 92 if (fmt_percent) { 93 ret += hpp__call_print_fn(hpp, print_fn, 94 fmt, len, 0.0); 95 } else { 96 ret += hpp__call_print_fn(hpp, print_fn, 97 fmt, len, 0ULL); 98 } 99 } 100 } 101 102 /* 103 * Restore original buf and size as it's where caller expects 104 * the result will be saved. 105 */ 106 hpp->buf = buf; 107 hpp->size = size; 108 109 return ret; 110 } 111 112 int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 113 struct hist_entry *he, hpp_field_fn get_field, 114 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent) 115 { 116 int len = fmt->user_len ?: fmt->len; 117 118 if (symbol_conf.field_sep) { 119 return __hpp__fmt(hpp, he, get_field, fmtstr, 1, 120 print_fn, fmt_percent); 121 } 122 123 if (fmt_percent) 124 len -= 2; /* 2 for a space and a % sign */ 125 else 126 len -= 1; 127 128 return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent); 129 } 130 131 int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 132 struct hist_entry *he, hpp_field_fn get_field, 133 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent) 134 { 135 if (!symbol_conf.cumulate_callchain) { 136 int len = fmt->user_len ?: fmt->len; 137 return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A"); 138 } 139 140 return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent); 141 } 142 143 static int field_cmp(u64 field_a, u64 field_b) 144 { 145 if (field_a > field_b) 146 return 1; 147 if (field_a < field_b) 148 return -1; 149 return 0; 150 } 151 152 static int __hpp__sort(struct hist_entry *a, struct hist_entry *b, 153 hpp_field_fn get_field) 154 { 155 s64 ret; 156 int i, nr_members; 157 struct evsel *evsel; 158 struct hist_entry *pair; 159 u64 *fields_a, *fields_b; 160 161 ret = field_cmp(get_field(a), get_field(b)); 162 if (ret || !symbol_conf.event_group) 163 return ret; 164 165 evsel = hists_to_evsel(a->hists); 166 if (!perf_evsel__is_group_event(evsel)) 167 return ret; 168 169 nr_members = evsel->core.nr_members; 170 fields_a = calloc(nr_members, sizeof(*fields_a)); 171 fields_b = calloc(nr_members, sizeof(*fields_b)); 172 173 if (!fields_a || !fields_b) 174 goto out; 175 176 list_for_each_entry(pair, &a->pairs.head, pairs.node) { 177 evsel = hists_to_evsel(pair->hists); 178 fields_a[perf_evsel__group_idx(evsel)] = get_field(pair); 179 } 180 181 list_for_each_entry(pair, &b->pairs.head, pairs.node) { 182 evsel = hists_to_evsel(pair->hists); 183 fields_b[perf_evsel__group_idx(evsel)] = get_field(pair); 184 } 185 186 for (i = 1; i < nr_members; i++) { 187 ret = field_cmp(fields_a[i], fields_b[i]); 188 if (ret) 189 break; 190 } 191 192 out: 193 free(fields_a); 194 free(fields_b); 195 196 return ret; 197 } 198 199 static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b, 200 hpp_field_fn get_field) 201 { 202 s64 ret = 0; 203 204 if (symbol_conf.cumulate_callchain) { 205 /* 206 * Put caller above callee when they have equal period. 207 */ 208 ret = field_cmp(get_field(a), get_field(b)); 209 if (ret) 210 return ret; 211 212 if (a->thread != b->thread || !hist_entry__has_callchains(a) || !symbol_conf.use_callchain) 213 return 0; 214 215 ret = b->callchain->max_depth - a->callchain->max_depth; 216 if (callchain_param.order == ORDER_CALLER) 217 ret = -ret; 218 } 219 return ret; 220 } 221 222 static int hpp__width_fn(struct perf_hpp_fmt *fmt, 223 struct perf_hpp *hpp __maybe_unused, 224 struct hists *hists) 225 { 226 int len = fmt->user_len ?: fmt->len; 227 struct evsel *evsel = hists_to_evsel(hists); 228 229 if (symbol_conf.event_group) 230 len = max(len, evsel->core.nr_members * fmt->len); 231 232 if (len < (int)strlen(fmt->name)) 233 len = strlen(fmt->name); 234 235 return len; 236 } 237 238 static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 239 struct hists *hists, int line __maybe_unused, 240 int *span __maybe_unused) 241 { 242 int len = hpp__width_fn(fmt, hpp, hists); 243 return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name); 244 } 245 246 int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...) 247 { 248 va_list args; 249 ssize_t ssize = hpp->size; 250 double percent; 251 int ret, len; 252 253 va_start(args, fmt); 254 len = va_arg(args, int); 255 percent = va_arg(args, double); 256 ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent); 257 va_end(args); 258 259 return (ret >= ssize) ? (ssize - 1) : ret; 260 } 261 262 static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...) 263 { 264 va_list args; 265 ssize_t ssize = hpp->size; 266 int ret; 267 268 va_start(args, fmt); 269 ret = vsnprintf(hpp->buf, hpp->size, fmt, args); 270 va_end(args); 271 272 return (ret >= ssize) ? (ssize - 1) : ret; 273 } 274 275 #define __HPP_COLOR_PERCENT_FN(_type, _field) \ 276 static u64 he_get_##_field(struct hist_entry *he) \ 277 { \ 278 return he->stat._field; \ 279 } \ 280 \ 281 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \ 282 struct perf_hpp *hpp, struct hist_entry *he) \ 283 { \ 284 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \ 285 hpp_color_scnprintf, true); \ 286 } 287 288 #define __HPP_ENTRY_PERCENT_FN(_type, _field) \ 289 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ 290 struct perf_hpp *hpp, struct hist_entry *he) \ 291 { \ 292 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \ 293 hpp_entry_scnprintf, true); \ 294 } 295 296 #define __HPP_SORT_FN(_type, _field) \ 297 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ 298 struct hist_entry *a, struct hist_entry *b) \ 299 { \ 300 return __hpp__sort(a, b, he_get_##_field); \ 301 } 302 303 #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ 304 static u64 he_get_acc_##_field(struct hist_entry *he) \ 305 { \ 306 return he->stat_acc->_field; \ 307 } \ 308 \ 309 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \ 310 struct perf_hpp *hpp, struct hist_entry *he) \ 311 { \ 312 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \ 313 hpp_color_scnprintf, true); \ 314 } 315 316 #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \ 317 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ 318 struct perf_hpp *hpp, struct hist_entry *he) \ 319 { \ 320 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \ 321 hpp_entry_scnprintf, true); \ 322 } 323 324 #define __HPP_SORT_ACC_FN(_type, _field) \ 325 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ 326 struct hist_entry *a, struct hist_entry *b) \ 327 { \ 328 return __hpp__sort_acc(a, b, he_get_acc_##_field); \ 329 } 330 331 #define __HPP_ENTRY_RAW_FN(_type, _field) \ 332 static u64 he_get_raw_##_field(struct hist_entry *he) \ 333 { \ 334 return he->stat._field; \ 335 } \ 336 \ 337 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ 338 struct perf_hpp *hpp, struct hist_entry *he) \ 339 { \ 340 return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \ 341 hpp_entry_scnprintf, false); \ 342 } 343 344 #define __HPP_SORT_RAW_FN(_type, _field) \ 345 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ 346 struct hist_entry *a, struct hist_entry *b) \ 347 { \ 348 return __hpp__sort(a, b, he_get_raw_##_field); \ 349 } 350 351 352 #define HPP_PERCENT_FNS(_type, _field) \ 353 __HPP_COLOR_PERCENT_FN(_type, _field) \ 354 __HPP_ENTRY_PERCENT_FN(_type, _field) \ 355 __HPP_SORT_FN(_type, _field) 356 357 #define HPP_PERCENT_ACC_FNS(_type, _field) \ 358 __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ 359 __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \ 360 __HPP_SORT_ACC_FN(_type, _field) 361 362 #define HPP_RAW_FNS(_type, _field) \ 363 __HPP_ENTRY_RAW_FN(_type, _field) \ 364 __HPP_SORT_RAW_FN(_type, _field) 365 366 HPP_PERCENT_FNS(overhead, period) 367 HPP_PERCENT_FNS(overhead_sys, period_sys) 368 HPP_PERCENT_FNS(overhead_us, period_us) 369 HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys) 370 HPP_PERCENT_FNS(overhead_guest_us, period_guest_us) 371 HPP_PERCENT_ACC_FNS(overhead_acc, period) 372 373 HPP_RAW_FNS(samples, nr_events) 374 HPP_RAW_FNS(period, period) 375 376 static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused, 377 struct hist_entry *a __maybe_unused, 378 struct hist_entry *b __maybe_unused) 379 { 380 return 0; 381 } 382 383 static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a) 384 { 385 return a->header == hpp__header_fn; 386 } 387 388 static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 389 { 390 if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b)) 391 return false; 392 393 return a->idx == b->idx; 394 } 395 396 #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \ 397 { \ 398 .name = _name, \ 399 .header = hpp__header_fn, \ 400 .width = hpp__width_fn, \ 401 .color = hpp__color_ ## _fn, \ 402 .entry = hpp__entry_ ## _fn, \ 403 .cmp = hpp__nop_cmp, \ 404 .collapse = hpp__nop_cmp, \ 405 .sort = hpp__sort_ ## _fn, \ 406 .idx = PERF_HPP__ ## _idx, \ 407 .equal = hpp__equal, \ 408 } 409 410 #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \ 411 { \ 412 .name = _name, \ 413 .header = hpp__header_fn, \ 414 .width = hpp__width_fn, \ 415 .color = hpp__color_ ## _fn, \ 416 .entry = hpp__entry_ ## _fn, \ 417 .cmp = hpp__nop_cmp, \ 418 .collapse = hpp__nop_cmp, \ 419 .sort = hpp__sort_ ## _fn, \ 420 .idx = PERF_HPP__ ## _idx, \ 421 .equal = hpp__equal, \ 422 } 423 424 #define HPP__PRINT_FNS(_name, _fn, _idx) \ 425 { \ 426 .name = _name, \ 427 .header = hpp__header_fn, \ 428 .width = hpp__width_fn, \ 429 .entry = hpp__entry_ ## _fn, \ 430 .cmp = hpp__nop_cmp, \ 431 .collapse = hpp__nop_cmp, \ 432 .sort = hpp__sort_ ## _fn, \ 433 .idx = PERF_HPP__ ## _idx, \ 434 .equal = hpp__equal, \ 435 } 436 437 struct perf_hpp_fmt perf_hpp__format[] = { 438 HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD), 439 HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS), 440 HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US), 441 HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS), 442 HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US), 443 HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC), 444 HPP__PRINT_FNS("Samples", samples, SAMPLES), 445 HPP__PRINT_FNS("Period", period, PERIOD) 446 }; 447 448 struct perf_hpp_list perf_hpp_list = { 449 .fields = LIST_HEAD_INIT(perf_hpp_list.fields), 450 .sorts = LIST_HEAD_INIT(perf_hpp_list.sorts), 451 .nr_header_lines = 1, 452 }; 453 454 #undef HPP__COLOR_PRINT_FNS 455 #undef HPP__COLOR_ACC_PRINT_FNS 456 #undef HPP__PRINT_FNS 457 458 #undef HPP_PERCENT_FNS 459 #undef HPP_PERCENT_ACC_FNS 460 #undef HPP_RAW_FNS 461 462 #undef __HPP_HEADER_FN 463 #undef __HPP_WIDTH_FN 464 #undef __HPP_COLOR_PERCENT_FN 465 #undef __HPP_ENTRY_PERCENT_FN 466 #undef __HPP_COLOR_ACC_PERCENT_FN 467 #undef __HPP_ENTRY_ACC_PERCENT_FN 468 #undef __HPP_ENTRY_RAW_FN 469 #undef __HPP_SORT_FN 470 #undef __HPP_SORT_ACC_FN 471 #undef __HPP_SORT_RAW_FN 472 473 474 void perf_hpp__init(void) 475 { 476 int i; 477 478 for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { 479 struct perf_hpp_fmt *fmt = &perf_hpp__format[i]; 480 481 INIT_LIST_HEAD(&fmt->list); 482 483 /* sort_list may be linked by setup_sorting() */ 484 if (fmt->sort_list.next == NULL) 485 INIT_LIST_HEAD(&fmt->sort_list); 486 } 487 488 /* 489 * If user specified field order, no need to setup default fields. 490 */ 491 if (is_strict_order(field_order)) 492 return; 493 494 if (symbol_conf.cumulate_callchain) { 495 hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC); 496 perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self"; 497 } 498 499 hpp_dimension__add_output(PERF_HPP__OVERHEAD); 500 501 if (symbol_conf.show_cpu_utilization) { 502 hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS); 503 hpp_dimension__add_output(PERF_HPP__OVERHEAD_US); 504 505 if (perf_guest) { 506 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS); 507 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US); 508 } 509 } 510 511 if (symbol_conf.show_nr_samples) 512 hpp_dimension__add_output(PERF_HPP__SAMPLES); 513 514 if (symbol_conf.show_total_period) 515 hpp_dimension__add_output(PERF_HPP__PERIOD); 516 } 517 518 void perf_hpp_list__column_register(struct perf_hpp_list *list, 519 struct perf_hpp_fmt *format) 520 { 521 list_add_tail(&format->list, &list->fields); 522 } 523 524 void perf_hpp_list__register_sort_field(struct perf_hpp_list *list, 525 struct perf_hpp_fmt *format) 526 { 527 list_add_tail(&format->sort_list, &list->sorts); 528 } 529 530 void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list, 531 struct perf_hpp_fmt *format) 532 { 533 list_add(&format->sort_list, &list->sorts); 534 } 535 536 void perf_hpp__column_unregister(struct perf_hpp_fmt *format) 537 { 538 list_del_init(&format->list); 539 } 540 541 void perf_hpp__cancel_cumulate(void) 542 { 543 struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp; 544 545 if (is_strict_order(field_order)) 546 return; 547 548 ovh = &perf_hpp__format[PERF_HPP__OVERHEAD]; 549 acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC]; 550 551 perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) { 552 if (acc->equal(acc, fmt)) { 553 perf_hpp__column_unregister(fmt); 554 continue; 555 } 556 557 if (ovh->equal(ovh, fmt)) 558 fmt->name = "Overhead"; 559 } 560 } 561 562 static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 563 { 564 return a->equal && a->equal(a, b); 565 } 566 567 void perf_hpp__setup_output_field(struct perf_hpp_list *list) 568 { 569 struct perf_hpp_fmt *fmt; 570 571 /* append sort keys to output field */ 572 perf_hpp_list__for_each_sort_list(list, fmt) { 573 struct perf_hpp_fmt *pos; 574 575 /* skip sort-only fields ("sort_compute" in perf diff) */ 576 if (!fmt->entry && !fmt->color) 577 continue; 578 579 perf_hpp_list__for_each_format(list, pos) { 580 if (fmt_equal(fmt, pos)) 581 goto next; 582 } 583 584 perf_hpp__column_register(fmt); 585 next: 586 continue; 587 } 588 } 589 590 void perf_hpp__append_sort_keys(struct perf_hpp_list *list) 591 { 592 struct perf_hpp_fmt *fmt; 593 594 /* append output fields to sort keys */ 595 perf_hpp_list__for_each_format(list, fmt) { 596 struct perf_hpp_fmt *pos; 597 598 perf_hpp_list__for_each_sort_list(list, pos) { 599 if (fmt_equal(fmt, pos)) 600 goto next; 601 } 602 603 perf_hpp__register_sort_field(fmt); 604 next: 605 continue; 606 } 607 } 608 609 610 static void fmt_free(struct perf_hpp_fmt *fmt) 611 { 612 /* 613 * At this point fmt should be completely 614 * unhooked, if not it's a bug. 615 */ 616 BUG_ON(!list_empty(&fmt->list)); 617 BUG_ON(!list_empty(&fmt->sort_list)); 618 619 if (fmt->free) 620 fmt->free(fmt); 621 } 622 623 void perf_hpp__reset_output_field(struct perf_hpp_list *list) 624 { 625 struct perf_hpp_fmt *fmt, *tmp; 626 627 /* reset output fields */ 628 perf_hpp_list__for_each_format_safe(list, fmt, tmp) { 629 list_del_init(&fmt->list); 630 list_del_init(&fmt->sort_list); 631 fmt_free(fmt); 632 } 633 634 /* reset sort keys */ 635 perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) { 636 list_del_init(&fmt->list); 637 list_del_init(&fmt->sort_list); 638 fmt_free(fmt); 639 } 640 } 641 642 /* 643 * See hists__fprintf to match the column widths 644 */ 645 unsigned int hists__sort_list_width(struct hists *hists) 646 { 647 struct perf_hpp_fmt *fmt; 648 int ret = 0; 649 bool first = true; 650 struct perf_hpp dummy_hpp; 651 652 hists__for_each_format(hists, fmt) { 653 if (perf_hpp__should_skip(fmt, hists)) 654 continue; 655 656 if (first) 657 first = false; 658 else 659 ret += 2; 660 661 ret += fmt->width(fmt, &dummy_hpp, hists); 662 } 663 664 if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */ 665 ret += 3 + BITS_PER_LONG / 4; 666 667 return ret; 668 } 669 670 unsigned int hists__overhead_width(struct hists *hists) 671 { 672 struct perf_hpp_fmt *fmt; 673 int ret = 0; 674 bool first = true; 675 struct perf_hpp dummy_hpp; 676 677 hists__for_each_format(hists, fmt) { 678 if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt)) 679 break; 680 681 if (first) 682 first = false; 683 else 684 ret += 2; 685 686 ret += fmt->width(fmt, &dummy_hpp, hists); 687 } 688 689 return ret; 690 } 691 692 void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists) 693 { 694 if (perf_hpp__is_sort_entry(fmt)) 695 return perf_hpp__reset_sort_width(fmt, hists); 696 697 if (perf_hpp__is_dynamic_entry(fmt)) 698 return; 699 700 BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX); 701 702 switch (fmt->idx) { 703 case PERF_HPP__OVERHEAD: 704 case PERF_HPP__OVERHEAD_SYS: 705 case PERF_HPP__OVERHEAD_US: 706 case PERF_HPP__OVERHEAD_ACC: 707 fmt->len = 8; 708 break; 709 710 case PERF_HPP__OVERHEAD_GUEST_SYS: 711 case PERF_HPP__OVERHEAD_GUEST_US: 712 fmt->len = 9; 713 break; 714 715 case PERF_HPP__SAMPLES: 716 case PERF_HPP__PERIOD: 717 fmt->len = 12; 718 break; 719 720 default: 721 break; 722 } 723 } 724 725 void hists__reset_column_width(struct hists *hists) 726 { 727 struct perf_hpp_fmt *fmt; 728 struct perf_hpp_list_node *node; 729 730 hists__for_each_format(hists, fmt) 731 perf_hpp__reset_width(fmt, hists); 732 733 /* hierarchy entries have their own hpp list */ 734 list_for_each_entry(node, &hists->hpp_formats, list) { 735 perf_hpp_list__for_each_format(&node->hpp, fmt) 736 perf_hpp__reset_width(fmt, hists); 737 } 738 } 739 740 void perf_hpp__set_user_width(const char *width_list_str) 741 { 742 struct perf_hpp_fmt *fmt; 743 const char *ptr = width_list_str; 744 745 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 746 char *p; 747 748 int len = strtol(ptr, &p, 10); 749 fmt->user_len = len; 750 751 if (*p == ',') 752 ptr = p + 1; 753 else 754 break; 755 } 756 } 757 758 static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt) 759 { 760 struct perf_hpp_list_node *node = NULL; 761 struct perf_hpp_fmt *fmt_copy; 762 bool found = false; 763 bool skip = perf_hpp__should_skip(fmt, hists); 764 765 list_for_each_entry(node, &hists->hpp_formats, list) { 766 if (node->level == fmt->level) { 767 found = true; 768 break; 769 } 770 } 771 772 if (!found) { 773 node = malloc(sizeof(*node)); 774 if (node == NULL) 775 return -1; 776 777 node->skip = skip; 778 node->level = fmt->level; 779 perf_hpp_list__init(&node->hpp); 780 781 hists->nr_hpp_node++; 782 list_add_tail(&node->list, &hists->hpp_formats); 783 } 784 785 fmt_copy = perf_hpp_fmt__dup(fmt); 786 if (fmt_copy == NULL) 787 return -1; 788 789 if (!skip) 790 node->skip = false; 791 792 list_add_tail(&fmt_copy->list, &node->hpp.fields); 793 list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts); 794 795 return 0; 796 } 797 798 int perf_hpp__setup_hists_formats(struct perf_hpp_list *list, 799 struct evlist *evlist) 800 { 801 struct evsel *evsel; 802 struct perf_hpp_fmt *fmt; 803 struct hists *hists; 804 int ret; 805 806 if (!symbol_conf.report_hierarchy) 807 return 0; 808 809 evlist__for_each_entry(evlist, evsel) { 810 hists = evsel__hists(evsel); 811 812 perf_hpp_list__for_each_sort_list(list, fmt) { 813 if (perf_hpp__is_dynamic_entry(fmt) && 814 !perf_hpp__defined_dynamic_entry(fmt, hists)) 815 continue; 816 817 ret = add_hierarchy_fmt(hists, fmt); 818 if (ret < 0) 819 return ret; 820 } 821 } 822 823 return 0; 824 } 825