1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <regex.h> 5 #include <stdlib.h> 6 #include <linux/mman.h> 7 #include <linux/time64.h> 8 #include "debug.h" 9 #include "dso.h" 10 #include "sort.h" 11 #include "hist.h" 12 #include "cacheline.h" 13 #include "comm.h" 14 #include "map.h" 15 #include "maps.h" 16 #include "symbol.h" 17 #include "map_symbol.h" 18 #include "branch.h" 19 #include "thread.h" 20 #include "evsel.h" 21 #include "evlist.h" 22 #include "srcline.h" 23 #include "strlist.h" 24 #include "strbuf.h" 25 #include "mem-events.h" 26 #include "mem-info.h" 27 #include "annotate.h" 28 #include "annotate-data.h" 29 #include "event.h" 30 #include "time-utils.h" 31 #include "cgroup.h" 32 #include "machine.h" 33 #include "trace-event.h" 34 #include <linux/kernel.h> 35 #include <linux/string.h> 36 37 #ifdef HAVE_LIBTRACEEVENT 38 #include <event-parse.h> 39 #endif 40 41 regex_t parent_regex; 42 const char default_parent_pattern[] = "^sys_|^do_page_fault"; 43 const char *parent_pattern = default_parent_pattern; 44 const char *default_sort_order = "comm,dso,symbol"; 45 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 46 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc"; 47 const char default_top_sort_order[] = "dso,symbol"; 48 const char default_diff_sort_order[] = "dso,symbol"; 49 const char default_tracepoint_sort_order[] = "trace"; 50 const char *sort_order; 51 const char *field_order; 52 regex_t ignore_callees_regex; 53 int have_ignore_callees = 0; 54 enum sort_mode sort__mode = SORT_MODE__NORMAL; 55 static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"}; 56 static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"}; 57 58 /* 59 * Some architectures have Adjacent Cacheline Prefetch feature, which 60 * behaves like the cacheline size is doubled. Enable this flag to 61 * check things in double cacheline granularity. 62 */ 63 bool chk_double_cl; 64 65 /* 66 * Replaces all occurrences of a char used with the: 67 * 68 * -t, --field-separator 69 * 70 * option, that uses a special separator character and don't pad with spaces, 71 * replacing all occurrences of this separator in symbol names (and other 72 * output) with a '.' character, that thus it's the only non valid separator. 73 */ 74 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 75 { 76 int n; 77 va_list ap; 78 79 va_start(ap, fmt); 80 n = vsnprintf(bf, size, fmt, ap); 81 if (symbol_conf.field_sep && n > 0) { 82 char *sep = bf; 83 84 while (1) { 85 sep = strchr(sep, *symbol_conf.field_sep); 86 if (sep == NULL) 87 break; 88 *sep = '.'; 89 } 90 } 91 va_end(ap); 92 93 if (n >= (int)size) 94 return size - 1; 95 return n; 96 } 97 98 static int64_t cmp_null(const void *l, const void *r) 99 { 100 if (!l && !r) 101 return 0; 102 else if (!l) 103 return -1; 104 else 105 return 1; 106 } 107 108 /* --sort pid */ 109 110 static int64_t 111 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 112 { 113 return thread__tid(right->thread) - thread__tid(left->thread); 114 } 115 116 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 117 size_t size, unsigned int width) 118 { 119 const char *comm = thread__comm_str(he->thread); 120 121 width = max(7U, width) - 8; 122 return repsep_snprintf(bf, size, "%7d:%-*.*s", thread__tid(he->thread), 123 width, width, comm ?: ""); 124 } 125 126 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 127 { 128 const struct thread *th = arg; 129 130 if (type != HIST_FILTER__THREAD) 131 return -1; 132 133 return th && !RC_CHK_EQUAL(he->thread, th); 134 } 135 136 struct sort_entry sort_thread = { 137 .se_header = " Pid:Command", 138 .se_cmp = sort__thread_cmp, 139 .se_snprintf = hist_entry__thread_snprintf, 140 .se_filter = hist_entry__thread_filter, 141 .se_width_idx = HISTC_THREAD, 142 }; 143 144 /* --sort simd */ 145 146 static int64_t 147 sort__simd_cmp(struct hist_entry *left, struct hist_entry *right) 148 { 149 if (left->simd_flags.arch != right->simd_flags.arch) 150 return (int64_t) left->simd_flags.arch - right->simd_flags.arch; 151 152 return (int64_t) left->simd_flags.pred - right->simd_flags.pred; 153 } 154 155 static const char *hist_entry__get_simd_name(struct simd_flags *simd_flags) 156 { 157 u64 arch = simd_flags->arch; 158 159 if (arch & SIMD_OP_FLAGS_ARCH_SVE) 160 return "SVE"; 161 else 162 return "n/a"; 163 } 164 165 static int hist_entry__simd_snprintf(struct hist_entry *he, char *bf, 166 size_t size, unsigned int width __maybe_unused) 167 { 168 const char *name; 169 170 if (!he->simd_flags.arch) 171 return repsep_snprintf(bf, size, ""); 172 173 name = hist_entry__get_simd_name(&he->simd_flags); 174 175 if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_EMPTY) 176 return repsep_snprintf(bf, size, "[e] %s", name); 177 else if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_PARTIAL) 178 return repsep_snprintf(bf, size, "[p] %s", name); 179 180 return repsep_snprintf(bf, size, "[.] %s", name); 181 } 182 183 struct sort_entry sort_simd = { 184 .se_header = "Simd ", 185 .se_cmp = sort__simd_cmp, 186 .se_snprintf = hist_entry__simd_snprintf, 187 .se_width_idx = HISTC_SIMD, 188 }; 189 190 /* --sort comm */ 191 192 /* 193 * We can't use pointer comparison in functions below, 194 * because it gives different results based on pointer 195 * values, which could break some sorting assumptions. 196 */ 197 static int64_t 198 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 199 { 200 return strcmp(comm__str(right->comm), comm__str(left->comm)); 201 } 202 203 static int64_t 204 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 205 { 206 return strcmp(comm__str(right->comm), comm__str(left->comm)); 207 } 208 209 static int64_t 210 sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 211 { 212 return strcmp(comm__str(right->comm), comm__str(left->comm)); 213 } 214 215 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 216 size_t size, unsigned int width) 217 { 218 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 219 } 220 221 struct sort_entry sort_comm = { 222 .se_header = "Command", 223 .se_cmp = sort__comm_cmp, 224 .se_collapse = sort__comm_collapse, 225 .se_sort = sort__comm_sort, 226 .se_snprintf = hist_entry__comm_snprintf, 227 .se_filter = hist_entry__thread_filter, 228 .se_width_idx = HISTC_COMM, 229 }; 230 231 /* --sort dso */ 232 233 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 234 { 235 struct dso *dso_l = map_l ? map__dso(map_l) : NULL; 236 struct dso *dso_r = map_r ? map__dso(map_r) : NULL; 237 const char *dso_name_l, *dso_name_r; 238 239 if (!dso_l || !dso_r) 240 return cmp_null(dso_r, dso_l); 241 242 if (verbose > 0) { 243 dso_name_l = dso__long_name(dso_l); 244 dso_name_r = dso__long_name(dso_r); 245 } else { 246 dso_name_l = dso__short_name(dso_l); 247 dso_name_r = dso__short_name(dso_r); 248 } 249 250 return strcmp(dso_name_l, dso_name_r); 251 } 252 253 static int64_t 254 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 255 { 256 return _sort__dso_cmp(right->ms.map, left->ms.map); 257 } 258 259 static int _hist_entry__dso_snprintf(struct map *map, char *bf, 260 size_t size, unsigned int width) 261 { 262 const struct dso *dso = map ? map__dso(map) : NULL; 263 const char *dso_name = "[unknown]"; 264 265 if (dso) 266 dso_name = verbose > 0 ? dso__long_name(dso) : dso__short_name(dso); 267 268 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 269 } 270 271 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 272 size_t size, unsigned int width) 273 { 274 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 275 } 276 277 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 278 { 279 const struct dso *dso = arg; 280 281 if (type != HIST_FILTER__DSO) 282 return -1; 283 284 return dso && (!he->ms.map || map__dso(he->ms.map) != dso); 285 } 286 287 struct sort_entry sort_dso = { 288 .se_header = "Shared Object", 289 .se_cmp = sort__dso_cmp, 290 .se_snprintf = hist_entry__dso_snprintf, 291 .se_filter = hist_entry__dso_filter, 292 .se_width_idx = HISTC_DSO, 293 }; 294 295 /* --sort symbol */ 296 297 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 298 { 299 return (int64_t)(right_ip - left_ip); 300 } 301 302 int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 303 { 304 if (!sym_l || !sym_r) 305 return cmp_null(sym_l, sym_r); 306 307 if (sym_l == sym_r) 308 return 0; 309 310 if (sym_l->inlined || sym_r->inlined) { 311 int ret = strcmp(sym_l->name, sym_r->name); 312 313 if (ret) 314 return ret; 315 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start)) 316 return 0; 317 } 318 319 if (sym_l->start != sym_r->start) 320 return (int64_t)(sym_r->start - sym_l->start); 321 322 return (int64_t)(sym_r->end - sym_l->end); 323 } 324 325 static int64_t 326 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 327 { 328 int64_t ret; 329 330 if (!left->ms.sym && !right->ms.sym) 331 return _sort__addr_cmp(left->ip, right->ip); 332 333 /* 334 * comparing symbol address alone is not enough since it's a 335 * relative address within a dso. 336 */ 337 if (!hists__has(left->hists, dso)) { 338 ret = sort__dso_cmp(left, right); 339 if (ret != 0) 340 return ret; 341 } 342 343 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 344 } 345 346 static int64_t 347 sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 348 { 349 if (!left->ms.sym || !right->ms.sym) 350 return cmp_null(left->ms.sym, right->ms.sym); 351 352 return strcmp(right->ms.sym->name, left->ms.sym->name); 353 } 354 355 static int _hist_entry__sym_snprintf(struct map_symbol *ms, 356 u64 ip, char level, char *bf, size_t size, 357 unsigned int width) 358 { 359 struct symbol *sym = ms->sym; 360 struct map *map = ms->map; 361 size_t ret = 0; 362 363 if (verbose > 0) { 364 struct dso *dso = map ? map__dso(map) : NULL; 365 char o = dso ? dso__symtab_origin(dso) : '!'; 366 u64 rip = ip; 367 368 if (dso && dso__kernel(dso) && dso__adjust_symbols(dso)) 369 rip = map__unmap_ip(map, ip); 370 371 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 372 BITS_PER_LONG / 4 + 2, rip, o); 373 } 374 375 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 376 if (sym && map) { 377 if (sym->type == STT_OBJECT) { 378 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 379 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 380 ip - map__unmap_ip(map, sym->start)); 381 } else { 382 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 383 width - ret, 384 sym->name); 385 if (sym->inlined) 386 ret += repsep_snprintf(bf + ret, size - ret, 387 " (inlined)"); 388 } 389 } else { 390 size_t len = BITS_PER_LONG / 4; 391 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 392 len, ip); 393 } 394 395 return ret; 396 } 397 398 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) 399 { 400 return _hist_entry__sym_snprintf(&he->ms, he->ip, 401 he->level, bf, size, width); 402 } 403 404 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 405 { 406 const char *sym = arg; 407 408 if (type != HIST_FILTER__SYMBOL) 409 return -1; 410 411 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 412 } 413 414 struct sort_entry sort_sym = { 415 .se_header = "Symbol", 416 .se_cmp = sort__sym_cmp, 417 .se_sort = sort__sym_sort, 418 .se_snprintf = hist_entry__sym_snprintf, 419 .se_filter = hist_entry__sym_filter, 420 .se_width_idx = HISTC_SYMBOL, 421 }; 422 423 /* --sort symoff */ 424 425 static int64_t 426 sort__symoff_cmp(struct hist_entry *left, struct hist_entry *right) 427 { 428 int64_t ret; 429 430 ret = sort__sym_cmp(left, right); 431 if (ret) 432 return ret; 433 434 return left->ip - right->ip; 435 } 436 437 static int64_t 438 sort__symoff_sort(struct hist_entry *left, struct hist_entry *right) 439 { 440 int64_t ret; 441 442 ret = sort__sym_sort(left, right); 443 if (ret) 444 return ret; 445 446 return left->ip - right->ip; 447 } 448 449 static int 450 hist_entry__symoff_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) 451 { 452 struct symbol *sym = he->ms.sym; 453 454 if (sym == NULL) 455 return repsep_snprintf(bf, size, "[%c] %-#.*llx", he->level, width - 4, he->ip); 456 457 return repsep_snprintf(bf, size, "[%c] %s+0x%llx", he->level, sym->name, he->ip - sym->start); 458 } 459 460 struct sort_entry sort_sym_offset = { 461 .se_header = "Symbol Offset", 462 .se_cmp = sort__symoff_cmp, 463 .se_sort = sort__symoff_sort, 464 .se_snprintf = hist_entry__symoff_snprintf, 465 .se_filter = hist_entry__sym_filter, 466 .se_width_idx = HISTC_SYMBOL_OFFSET, 467 }; 468 469 /* --sort srcline */ 470 471 char *hist_entry__srcline(struct hist_entry *he) 472 { 473 return map__srcline(he->ms.map, he->ip, he->ms.sym); 474 } 475 476 static int64_t 477 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 478 { 479 int64_t ret; 480 481 ret = _sort__addr_cmp(left->ip, right->ip); 482 if (ret) 483 return ret; 484 485 return sort__dso_cmp(left, right); 486 } 487 488 static int64_t 489 sort__srcline_collapse(struct hist_entry *left, struct hist_entry *right) 490 { 491 if (!left->srcline) 492 left->srcline = hist_entry__srcline(left); 493 if (!right->srcline) 494 right->srcline = hist_entry__srcline(right); 495 496 return strcmp(right->srcline, left->srcline); 497 } 498 499 static int64_t 500 sort__srcline_sort(struct hist_entry *left, struct hist_entry *right) 501 { 502 return sort__srcline_collapse(left, right); 503 } 504 505 static void 506 sort__srcline_init(struct hist_entry *he) 507 { 508 if (!he->srcline) 509 he->srcline = hist_entry__srcline(he); 510 } 511 512 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 513 size_t size, unsigned int width) 514 { 515 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 516 } 517 518 struct sort_entry sort_srcline = { 519 .se_header = "Source:Line", 520 .se_cmp = sort__srcline_cmp, 521 .se_collapse = sort__srcline_collapse, 522 .se_sort = sort__srcline_sort, 523 .se_init = sort__srcline_init, 524 .se_snprintf = hist_entry__srcline_snprintf, 525 .se_width_idx = HISTC_SRCLINE, 526 }; 527 528 /* --sort srcline_from */ 529 530 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams) 531 { 532 return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym); 533 } 534 535 static int64_t 536 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 537 { 538 return left->branch_info->from.addr - right->branch_info->from.addr; 539 } 540 541 static int64_t 542 sort__srcline_from_collapse(struct hist_entry *left, struct hist_entry *right) 543 { 544 if (!left->branch_info->srcline_from) 545 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from); 546 547 if (!right->branch_info->srcline_from) 548 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from); 549 550 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 551 } 552 553 static int64_t 554 sort__srcline_from_sort(struct hist_entry *left, struct hist_entry *right) 555 { 556 return sort__srcline_from_collapse(left, right); 557 } 558 559 static void sort__srcline_from_init(struct hist_entry *he) 560 { 561 if (!he->branch_info->srcline_from) 562 he->branch_info->srcline_from = addr_map_symbol__srcline(&he->branch_info->from); 563 } 564 565 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 566 size_t size, unsigned int width) 567 { 568 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 569 } 570 571 struct sort_entry sort_srcline_from = { 572 .se_header = "From Source:Line", 573 .se_cmp = sort__srcline_from_cmp, 574 .se_collapse = sort__srcline_from_collapse, 575 .se_sort = sort__srcline_from_sort, 576 .se_init = sort__srcline_from_init, 577 .se_snprintf = hist_entry__srcline_from_snprintf, 578 .se_width_idx = HISTC_SRCLINE_FROM, 579 }; 580 581 /* --sort srcline_to */ 582 583 static int64_t 584 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 585 { 586 return left->branch_info->to.addr - right->branch_info->to.addr; 587 } 588 589 static int64_t 590 sort__srcline_to_collapse(struct hist_entry *left, struct hist_entry *right) 591 { 592 if (!left->branch_info->srcline_to) 593 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to); 594 595 if (!right->branch_info->srcline_to) 596 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to); 597 598 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 599 } 600 601 static int64_t 602 sort__srcline_to_sort(struct hist_entry *left, struct hist_entry *right) 603 { 604 return sort__srcline_to_collapse(left, right); 605 } 606 607 static void sort__srcline_to_init(struct hist_entry *he) 608 { 609 if (!he->branch_info->srcline_to) 610 he->branch_info->srcline_to = addr_map_symbol__srcline(&he->branch_info->to); 611 } 612 613 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 614 size_t size, unsigned int width) 615 { 616 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 617 } 618 619 struct sort_entry sort_srcline_to = { 620 .se_header = "To Source:Line", 621 .se_cmp = sort__srcline_to_cmp, 622 .se_collapse = sort__srcline_to_collapse, 623 .se_sort = sort__srcline_to_sort, 624 .se_init = sort__srcline_to_init, 625 .se_snprintf = hist_entry__srcline_to_snprintf, 626 .se_width_idx = HISTC_SRCLINE_TO, 627 }; 628 629 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf, 630 size_t size, unsigned int width) 631 { 632 633 struct symbol *sym = he->ms.sym; 634 struct annotated_branch *branch; 635 double ipc = 0.0, coverage = 0.0; 636 char tmp[64]; 637 638 if (!sym) 639 return repsep_snprintf(bf, size, "%-*s", width, "-"); 640 641 branch = symbol__annotation(sym)->branch; 642 643 if (branch && branch->hit_cycles) 644 ipc = branch->hit_insn / ((double)branch->hit_cycles); 645 646 if (branch && branch->total_insn) { 647 coverage = branch->cover_insn * 100.0 / 648 ((double)branch->total_insn); 649 } 650 651 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage); 652 return repsep_snprintf(bf, size, "%-*s", width, tmp); 653 } 654 655 struct sort_entry sort_sym_ipc = { 656 .se_header = "IPC [IPC Coverage]", 657 .se_cmp = sort__sym_cmp, 658 .se_snprintf = hist_entry__sym_ipc_snprintf, 659 .se_width_idx = HISTC_SYMBOL_IPC, 660 }; 661 662 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he 663 __maybe_unused, 664 char *bf, size_t size, 665 unsigned int width) 666 { 667 char tmp[64]; 668 669 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-"); 670 return repsep_snprintf(bf, size, "%-*s", width, tmp); 671 } 672 673 struct sort_entry sort_sym_ipc_null = { 674 .se_header = "IPC [IPC Coverage]", 675 .se_cmp = sort__sym_cmp, 676 .se_snprintf = hist_entry__sym_ipc_null_snprintf, 677 .se_width_idx = HISTC_SYMBOL_IPC, 678 }; 679 680 /* --sort callchain_branch_predicted */ 681 682 static int64_t 683 sort__callchain_branch_predicted_cmp(struct hist_entry *left __maybe_unused, 684 struct hist_entry *right __maybe_unused) 685 { 686 return 0; 687 } 688 689 static int hist_entry__callchain_branch_predicted_snprintf( 690 struct hist_entry *he, char *bf, size_t size, unsigned int width) 691 { 692 u64 branch_count, predicted_count; 693 double percent = 0.0; 694 char str[32]; 695 696 callchain_branch_counts(he->callchain, &branch_count, 697 &predicted_count, NULL, NULL); 698 699 if (branch_count) 700 percent = predicted_count * 100.0 / branch_count; 701 702 snprintf(str, sizeof(str), "%.1f%%", percent); 703 return repsep_snprintf(bf, size, "%-*.*s", width, width, str); 704 } 705 706 struct sort_entry sort_callchain_branch_predicted = { 707 .se_header = "Predicted", 708 .se_cmp = sort__callchain_branch_predicted_cmp, 709 .se_snprintf = hist_entry__callchain_branch_predicted_snprintf, 710 .se_width_idx = HISTC_CALLCHAIN_BRANCH_PREDICTED, 711 }; 712 713 /* --sort callchain_branch_abort */ 714 715 static int64_t 716 sort__callchain_branch_abort_cmp(struct hist_entry *left __maybe_unused, 717 struct hist_entry *right __maybe_unused) 718 { 719 return 0; 720 } 721 722 static int hist_entry__callchain_branch_abort_snprintf(struct hist_entry *he, 723 char *bf, size_t size, 724 unsigned int width) 725 { 726 u64 branch_count, abort_count; 727 char str[32]; 728 729 callchain_branch_counts(he->callchain, &branch_count, 730 NULL, &abort_count, NULL); 731 732 snprintf(str, sizeof(str), "%" PRId64, abort_count); 733 return repsep_snprintf(bf, size, "%-*.*s", width, width, str); 734 } 735 736 struct sort_entry sort_callchain_branch_abort = { 737 .se_header = "Abort", 738 .se_cmp = sort__callchain_branch_abort_cmp, 739 .se_snprintf = hist_entry__callchain_branch_abort_snprintf, 740 .se_width_idx = HISTC_CALLCHAIN_BRANCH_ABORT, 741 }; 742 743 /* --sort callchain_branch_cycles */ 744 745 static int64_t 746 sort__callchain_branch_cycles_cmp(struct hist_entry *left __maybe_unused, 747 struct hist_entry *right __maybe_unused) 748 { 749 return 0; 750 } 751 752 static int hist_entry__callchain_branch_cycles_snprintf(struct hist_entry *he, 753 char *bf, size_t size, 754 unsigned int width) 755 { 756 u64 branch_count, cycles_count, cycles = 0; 757 char str[32]; 758 759 callchain_branch_counts(he->callchain, &branch_count, 760 NULL, NULL, &cycles_count); 761 762 if (branch_count) 763 cycles = cycles_count / branch_count; 764 765 snprintf(str, sizeof(str), "%" PRId64 "", cycles); 766 return repsep_snprintf(bf, size, "%-*.*s", width, width, str); 767 } 768 769 struct sort_entry sort_callchain_branch_cycles = { 770 .se_header = "Cycles", 771 .se_cmp = sort__callchain_branch_cycles_cmp, 772 .se_snprintf = hist_entry__callchain_branch_cycles_snprintf, 773 .se_width_idx = HISTC_CALLCHAIN_BRANCH_CYCLES, 774 }; 775 776 /* --sort srcfile */ 777 778 static char no_srcfile[1]; 779 780 static char *hist_entry__get_srcfile(struct hist_entry *e) 781 { 782 char *sf, *p; 783 struct map *map = e->ms.map; 784 785 if (!map) 786 return no_srcfile; 787 788 sf = __get_srcline(map__dso(map), map__rip_2objdump(map, e->ip), 789 e->ms.sym, false, true, true, e->ip); 790 if (sf == SRCLINE_UNKNOWN) 791 return no_srcfile; 792 p = strchr(sf, ':'); 793 if (p && *sf) { 794 *p = 0; 795 return sf; 796 } 797 free(sf); 798 return no_srcfile; 799 } 800 801 static int64_t 802 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 803 { 804 return sort__srcline_cmp(left, right); 805 } 806 807 static int64_t 808 sort__srcfile_collapse(struct hist_entry *left, struct hist_entry *right) 809 { 810 if (!left->srcfile) 811 left->srcfile = hist_entry__get_srcfile(left); 812 if (!right->srcfile) 813 right->srcfile = hist_entry__get_srcfile(right); 814 815 return strcmp(right->srcfile, left->srcfile); 816 } 817 818 static int64_t 819 sort__srcfile_sort(struct hist_entry *left, struct hist_entry *right) 820 { 821 return sort__srcfile_collapse(left, right); 822 } 823 824 static void sort__srcfile_init(struct hist_entry *he) 825 { 826 if (!he->srcfile) 827 he->srcfile = hist_entry__get_srcfile(he); 828 } 829 830 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 831 size_t size, unsigned int width) 832 { 833 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 834 } 835 836 struct sort_entry sort_srcfile = { 837 .se_header = "Source File", 838 .se_cmp = sort__srcfile_cmp, 839 .se_collapse = sort__srcfile_collapse, 840 .se_sort = sort__srcfile_sort, 841 .se_init = sort__srcfile_init, 842 .se_snprintf = hist_entry__srcfile_snprintf, 843 .se_width_idx = HISTC_SRCFILE, 844 }; 845 846 /* --sort parent */ 847 848 static int64_t 849 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 850 { 851 struct symbol *sym_l = left->parent; 852 struct symbol *sym_r = right->parent; 853 854 if (!sym_l || !sym_r) 855 return cmp_null(sym_l, sym_r); 856 857 return strcmp(sym_r->name, sym_l->name); 858 } 859 860 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 861 size_t size, unsigned int width) 862 { 863 return repsep_snprintf(bf, size, "%-*.*s", width, width, 864 he->parent ? he->parent->name : "[other]"); 865 } 866 867 struct sort_entry sort_parent = { 868 .se_header = "Parent symbol", 869 .se_cmp = sort__parent_cmp, 870 .se_snprintf = hist_entry__parent_snprintf, 871 .se_width_idx = HISTC_PARENT, 872 }; 873 874 /* --sort cpu */ 875 876 static int64_t 877 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 878 { 879 return right->cpu - left->cpu; 880 } 881 882 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 883 size_t size, unsigned int width) 884 { 885 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 886 } 887 888 struct sort_entry sort_cpu = { 889 .se_header = "CPU", 890 .se_cmp = sort__cpu_cmp, 891 .se_snprintf = hist_entry__cpu_snprintf, 892 .se_width_idx = HISTC_CPU, 893 }; 894 895 /* --sort parallelism */ 896 897 static int64_t 898 sort__parallelism_cmp(struct hist_entry *left, struct hist_entry *right) 899 { 900 return right->parallelism - left->parallelism; 901 } 902 903 static int hist_entry__parallelism_filter(struct hist_entry *he, int type, const void *arg) 904 { 905 const unsigned long *parallelism_filter = arg; 906 907 if (type != HIST_FILTER__PARALLELISM) 908 return -1; 909 910 return test_bit(he->parallelism, parallelism_filter); 911 } 912 913 static int hist_entry__parallelism_snprintf(struct hist_entry *he, char *bf, 914 size_t size, unsigned int width) 915 { 916 return repsep_snprintf(bf, size, "%*d", width, he->parallelism); 917 } 918 919 struct sort_entry sort_parallelism = { 920 .se_header = "Parallelism", 921 .se_cmp = sort__parallelism_cmp, 922 .se_filter = hist_entry__parallelism_filter, 923 .se_snprintf = hist_entry__parallelism_snprintf, 924 .se_width_idx = HISTC_PARALLELISM, 925 }; 926 927 /* --sort cgroup_id */ 928 929 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev) 930 { 931 return (int64_t)(right_dev - left_dev); 932 } 933 934 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino) 935 { 936 return (int64_t)(right_ino - left_ino); 937 } 938 939 static int64_t 940 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right) 941 { 942 int64_t ret; 943 944 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev); 945 if (ret != 0) 946 return ret; 947 948 return _sort__cgroup_inode_cmp(right->cgroup_id.ino, 949 left->cgroup_id.ino); 950 } 951 952 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he, 953 char *bf, size_t size, 954 unsigned int width __maybe_unused) 955 { 956 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev, 957 he->cgroup_id.ino); 958 } 959 960 struct sort_entry sort_cgroup_id = { 961 .se_header = "cgroup id (dev/inode)", 962 .se_cmp = sort__cgroup_id_cmp, 963 .se_snprintf = hist_entry__cgroup_id_snprintf, 964 .se_width_idx = HISTC_CGROUP_ID, 965 }; 966 967 /* --sort cgroup */ 968 969 static int64_t 970 sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right) 971 { 972 return right->cgroup - left->cgroup; 973 } 974 975 static int hist_entry__cgroup_snprintf(struct hist_entry *he, 976 char *bf, size_t size, 977 unsigned int width __maybe_unused) 978 { 979 const char *cgrp_name = "N/A"; 980 981 if (he->cgroup) { 982 struct cgroup *cgrp = cgroup__find(maps__machine(he->ms.maps)->env, 983 he->cgroup); 984 if (cgrp != NULL) 985 cgrp_name = cgrp->name; 986 else 987 cgrp_name = "unknown"; 988 } 989 990 return repsep_snprintf(bf, size, "%s", cgrp_name); 991 } 992 993 struct sort_entry sort_cgroup = { 994 .se_header = "Cgroup", 995 .se_cmp = sort__cgroup_cmp, 996 .se_snprintf = hist_entry__cgroup_snprintf, 997 .se_width_idx = HISTC_CGROUP, 998 }; 999 1000 /* --sort socket */ 1001 1002 static int64_t 1003 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 1004 { 1005 return right->socket - left->socket; 1006 } 1007 1008 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 1009 size_t size, unsigned int width) 1010 { 1011 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 1012 } 1013 1014 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 1015 { 1016 int sk = *(const int *)arg; 1017 1018 if (type != HIST_FILTER__SOCKET) 1019 return -1; 1020 1021 return sk >= 0 && he->socket != sk; 1022 } 1023 1024 struct sort_entry sort_socket = { 1025 .se_header = "Socket", 1026 .se_cmp = sort__socket_cmp, 1027 .se_snprintf = hist_entry__socket_snprintf, 1028 .se_filter = hist_entry__socket_filter, 1029 .se_width_idx = HISTC_SOCKET, 1030 }; 1031 1032 /* --sort time */ 1033 1034 static int64_t 1035 sort__time_cmp(struct hist_entry *left, struct hist_entry *right) 1036 { 1037 return right->time - left->time; 1038 } 1039 1040 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf, 1041 size_t size, unsigned int width) 1042 { 1043 char he_time[32]; 1044 1045 if (symbol_conf.nanosecs) 1046 timestamp__scnprintf_nsec(he->time, he_time, 1047 sizeof(he_time)); 1048 else 1049 timestamp__scnprintf_usec(he->time, he_time, 1050 sizeof(he_time)); 1051 1052 return repsep_snprintf(bf, size, "%-.*s", width, he_time); 1053 } 1054 1055 struct sort_entry sort_time = { 1056 .se_header = "Time", 1057 .se_cmp = sort__time_cmp, 1058 .se_snprintf = hist_entry__time_snprintf, 1059 .se_width_idx = HISTC_TIME, 1060 }; 1061 1062 /* --sort trace */ 1063 1064 #ifdef HAVE_LIBTRACEEVENT 1065 static char *get_trace_output(struct hist_entry *he) 1066 { 1067 struct trace_seq seq; 1068 struct evsel *evsel; 1069 struct tep_record rec = { 1070 .data = he->raw_data, 1071 .size = he->raw_size, 1072 }; 1073 struct tep_event *tp_format; 1074 1075 evsel = hists_to_evsel(he->hists); 1076 1077 trace_seq_init(&seq); 1078 tp_format = evsel__tp_format(evsel); 1079 if (tp_format) { 1080 if (symbol_conf.raw_trace) 1081 tep_print_fields(&seq, he->raw_data, he->raw_size, tp_format); 1082 else 1083 tep_print_event(tp_format->tep, &seq, &rec, "%s", TEP_PRINT_INFO); 1084 } 1085 1086 /* 1087 * Trim the buffer, it starts at 4KB and we're not going to 1088 * add anything more to this buffer. 1089 */ 1090 return realloc(seq.buffer, seq.len + 1); 1091 } 1092 1093 static int64_t 1094 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 1095 { 1096 struct evsel *evsel; 1097 1098 evsel = hists_to_evsel(left->hists); 1099 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 1100 return 0; 1101 1102 if (left->trace_output == NULL) 1103 left->trace_output = get_trace_output(left); 1104 if (right->trace_output == NULL) 1105 right->trace_output = get_trace_output(right); 1106 1107 return strcmp(right->trace_output, left->trace_output); 1108 } 1109 1110 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 1111 size_t size, unsigned int width) 1112 { 1113 struct evsel *evsel; 1114 1115 evsel = hists_to_evsel(he->hists); 1116 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 1117 return scnprintf(bf, size, "%-.*s", width, "N/A"); 1118 1119 if (he->trace_output == NULL) 1120 he->trace_output = get_trace_output(he); 1121 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 1122 } 1123 1124 struct sort_entry sort_trace = { 1125 .se_header = "Trace output", 1126 .se_cmp = sort__trace_cmp, 1127 .se_snprintf = hist_entry__trace_snprintf, 1128 .se_width_idx = HISTC_TRACE, 1129 }; 1130 #endif /* HAVE_LIBTRACEEVENT */ 1131 1132 /* sort keys for branch stacks */ 1133 1134 static int64_t 1135 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 1136 { 1137 if (!left->branch_info || !right->branch_info) 1138 return cmp_null(left->branch_info, right->branch_info); 1139 1140 return _sort__dso_cmp(left->branch_info->from.ms.map, 1141 right->branch_info->from.ms.map); 1142 } 1143 1144 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 1145 size_t size, unsigned int width) 1146 { 1147 if (he->branch_info) 1148 return _hist_entry__dso_snprintf(he->branch_info->from.ms.map, 1149 bf, size, width); 1150 else 1151 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1152 } 1153 1154 static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 1155 const void *arg) 1156 { 1157 const struct dso *dso = arg; 1158 1159 if (type != HIST_FILTER__DSO) 1160 return -1; 1161 1162 return dso && (!he->branch_info || !he->branch_info->from.ms.map || 1163 map__dso(he->branch_info->from.ms.map) != dso); 1164 } 1165 1166 static int64_t 1167 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 1168 { 1169 if (!left->branch_info || !right->branch_info) 1170 return cmp_null(left->branch_info, right->branch_info); 1171 1172 return _sort__dso_cmp(left->branch_info->to.ms.map, 1173 right->branch_info->to.ms.map); 1174 } 1175 1176 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 1177 size_t size, unsigned int width) 1178 { 1179 if (he->branch_info) 1180 return _hist_entry__dso_snprintf(he->branch_info->to.ms.map, 1181 bf, size, width); 1182 else 1183 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1184 } 1185 1186 static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 1187 const void *arg) 1188 { 1189 const struct dso *dso = arg; 1190 1191 if (type != HIST_FILTER__DSO) 1192 return -1; 1193 1194 return dso && (!he->branch_info || !he->branch_info->to.ms.map || 1195 map__dso(he->branch_info->to.ms.map) != dso); 1196 } 1197 1198 static int64_t 1199 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 1200 { 1201 struct addr_map_symbol *from_l, *from_r; 1202 1203 if (!left->branch_info || !right->branch_info) 1204 return cmp_null(left->branch_info, right->branch_info); 1205 1206 from_l = &left->branch_info->from; 1207 from_r = &right->branch_info->from; 1208 1209 if (!from_l->ms.sym && !from_r->ms.sym) 1210 return _sort__addr_cmp(from_l->addr, from_r->addr); 1211 1212 return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym); 1213 } 1214 1215 static int64_t 1216 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 1217 { 1218 struct addr_map_symbol *to_l, *to_r; 1219 1220 if (!left->branch_info || !right->branch_info) 1221 return cmp_null(left->branch_info, right->branch_info); 1222 1223 to_l = &left->branch_info->to; 1224 to_r = &right->branch_info->to; 1225 1226 if (!to_l->ms.sym && !to_r->ms.sym) 1227 return _sort__addr_cmp(to_l->addr, to_r->addr); 1228 1229 return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym); 1230 } 1231 1232 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 1233 size_t size, unsigned int width) 1234 { 1235 if (he->branch_info) { 1236 struct addr_map_symbol *from = &he->branch_info->from; 1237 1238 return _hist_entry__sym_snprintf(&from->ms, from->al_addr, 1239 from->al_level, bf, size, width); 1240 } 1241 1242 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1243 } 1244 1245 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 1246 size_t size, unsigned int width) 1247 { 1248 if (he->branch_info) { 1249 struct addr_map_symbol *to = &he->branch_info->to; 1250 1251 return _hist_entry__sym_snprintf(&to->ms, to->al_addr, 1252 to->al_level, bf, size, width); 1253 } 1254 1255 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1256 } 1257 1258 static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 1259 const void *arg) 1260 { 1261 const char *sym = arg; 1262 1263 if (type != HIST_FILTER__SYMBOL) 1264 return -1; 1265 1266 return sym && !(he->branch_info && he->branch_info->from.ms.sym && 1267 strstr(he->branch_info->from.ms.sym->name, sym)); 1268 } 1269 1270 static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 1271 const void *arg) 1272 { 1273 const char *sym = arg; 1274 1275 if (type != HIST_FILTER__SYMBOL) 1276 return -1; 1277 1278 return sym && !(he->branch_info && he->branch_info->to.ms.sym && 1279 strstr(he->branch_info->to.ms.sym->name, sym)); 1280 } 1281 1282 struct sort_entry sort_dso_from = { 1283 .se_header = "Source Shared Object", 1284 .se_cmp = sort__dso_from_cmp, 1285 .se_snprintf = hist_entry__dso_from_snprintf, 1286 .se_filter = hist_entry__dso_from_filter, 1287 .se_width_idx = HISTC_DSO_FROM, 1288 }; 1289 1290 struct sort_entry sort_dso_to = { 1291 .se_header = "Target Shared Object", 1292 .se_cmp = sort__dso_to_cmp, 1293 .se_snprintf = hist_entry__dso_to_snprintf, 1294 .se_filter = hist_entry__dso_to_filter, 1295 .se_width_idx = HISTC_DSO_TO, 1296 }; 1297 1298 struct sort_entry sort_sym_from = { 1299 .se_header = "Source Symbol", 1300 .se_cmp = sort__sym_from_cmp, 1301 .se_snprintf = hist_entry__sym_from_snprintf, 1302 .se_filter = hist_entry__sym_from_filter, 1303 .se_width_idx = HISTC_SYMBOL_FROM, 1304 }; 1305 1306 struct sort_entry sort_sym_to = { 1307 .se_header = "Target Symbol", 1308 .se_cmp = sort__sym_to_cmp, 1309 .se_snprintf = hist_entry__sym_to_snprintf, 1310 .se_filter = hist_entry__sym_to_filter, 1311 .se_width_idx = HISTC_SYMBOL_TO, 1312 }; 1313 1314 static int _hist_entry__addr_snprintf(struct map_symbol *ms, 1315 u64 ip, char level, char *bf, size_t size, 1316 unsigned int width) 1317 { 1318 struct symbol *sym = ms->sym; 1319 struct map *map = ms->map; 1320 size_t ret = 0, offs; 1321 1322 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 1323 if (sym && map) { 1324 if (sym->type == STT_OBJECT) { 1325 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 1326 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 1327 ip - map__unmap_ip(map, sym->start)); 1328 } else { 1329 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 1330 width - ret, 1331 sym->name); 1332 offs = ip - sym->start; 1333 if (offs) 1334 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", offs); 1335 } 1336 } else { 1337 size_t len = BITS_PER_LONG / 4; 1338 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 1339 len, ip); 1340 } 1341 1342 return ret; 1343 } 1344 1345 static int hist_entry__addr_from_snprintf(struct hist_entry *he, char *bf, 1346 size_t size, unsigned int width) 1347 { 1348 if (he->branch_info) { 1349 struct addr_map_symbol *from = &he->branch_info->from; 1350 1351 return _hist_entry__addr_snprintf(&from->ms, from->al_addr, 1352 he->level, bf, size, width); 1353 } 1354 1355 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1356 } 1357 1358 static int hist_entry__addr_to_snprintf(struct hist_entry *he, char *bf, 1359 size_t size, unsigned int width) 1360 { 1361 if (he->branch_info) { 1362 struct addr_map_symbol *to = &he->branch_info->to; 1363 1364 return _hist_entry__addr_snprintf(&to->ms, to->al_addr, 1365 he->level, bf, size, width); 1366 } 1367 1368 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1369 } 1370 1371 static int64_t 1372 sort__addr_from_cmp(struct hist_entry *left, struct hist_entry *right) 1373 { 1374 struct addr_map_symbol *from_l; 1375 struct addr_map_symbol *from_r; 1376 int64_t ret; 1377 1378 if (!left->branch_info || !right->branch_info) 1379 return cmp_null(left->branch_info, right->branch_info); 1380 1381 from_l = &left->branch_info->from; 1382 from_r = &right->branch_info->from; 1383 1384 /* 1385 * comparing symbol address alone is not enough since it's a 1386 * relative address within a dso. 1387 */ 1388 ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map); 1389 if (ret != 0) 1390 return ret; 1391 1392 return _sort__addr_cmp(from_l->addr, from_r->addr); 1393 } 1394 1395 static int64_t 1396 sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right) 1397 { 1398 struct addr_map_symbol *to_l; 1399 struct addr_map_symbol *to_r; 1400 int64_t ret; 1401 1402 if (!left->branch_info || !right->branch_info) 1403 return cmp_null(left->branch_info, right->branch_info); 1404 1405 to_l = &left->branch_info->to; 1406 to_r = &right->branch_info->to; 1407 1408 /* 1409 * comparing symbol address alone is not enough since it's a 1410 * relative address within a dso. 1411 */ 1412 ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map); 1413 if (ret != 0) 1414 return ret; 1415 1416 return _sort__addr_cmp(to_l->addr, to_r->addr); 1417 } 1418 1419 struct sort_entry sort_addr_from = { 1420 .se_header = "Source Address", 1421 .se_cmp = sort__addr_from_cmp, 1422 .se_snprintf = hist_entry__addr_from_snprintf, 1423 .se_filter = hist_entry__sym_from_filter, /* shared with sym_from */ 1424 .se_width_idx = HISTC_ADDR_FROM, 1425 }; 1426 1427 struct sort_entry sort_addr_to = { 1428 .se_header = "Target Address", 1429 .se_cmp = sort__addr_to_cmp, 1430 .se_snprintf = hist_entry__addr_to_snprintf, 1431 .se_filter = hist_entry__sym_to_filter, /* shared with sym_to */ 1432 .se_width_idx = HISTC_ADDR_TO, 1433 }; 1434 1435 1436 static int64_t 1437 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 1438 { 1439 unsigned char mp, p; 1440 1441 if (!left->branch_info || !right->branch_info) 1442 return cmp_null(left->branch_info, right->branch_info); 1443 1444 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 1445 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 1446 return mp || p; 1447 } 1448 1449 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 1450 size_t size, unsigned int width){ 1451 static const char *out = "N/A"; 1452 1453 if (he->branch_info) { 1454 if (he->branch_info->flags.predicted) 1455 out = "N"; 1456 else if (he->branch_info->flags.mispred) 1457 out = "Y"; 1458 } 1459 1460 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 1461 } 1462 1463 static int64_t 1464 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 1465 { 1466 if (!left->branch_info || !right->branch_info) 1467 return cmp_null(left->branch_info, right->branch_info); 1468 1469 return left->branch_info->flags.cycles - 1470 right->branch_info->flags.cycles; 1471 } 1472 1473 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 1474 size_t size, unsigned int width) 1475 { 1476 if (!he->branch_info) 1477 return scnprintf(bf, size, "%-.*s", width, "N/A"); 1478 if (he->branch_info->flags.cycles == 0) 1479 return repsep_snprintf(bf, size, "%-*s", width, "-"); 1480 return repsep_snprintf(bf, size, "%-*hd", width, 1481 he->branch_info->flags.cycles); 1482 } 1483 1484 struct sort_entry sort_cycles = { 1485 .se_header = "Basic Block Cycles", 1486 .se_cmp = sort__cycles_cmp, 1487 .se_snprintf = hist_entry__cycles_snprintf, 1488 .se_width_idx = HISTC_CYCLES, 1489 }; 1490 1491 /* --sort daddr_sym */ 1492 int64_t 1493 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1494 { 1495 uint64_t l = 0, r = 0; 1496 1497 if (left->mem_info) 1498 l = mem_info__daddr(left->mem_info)->addr; 1499 if (right->mem_info) 1500 r = mem_info__daddr(right->mem_info)->addr; 1501 1502 return (int64_t)(r - l); 1503 } 1504 1505 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 1506 size_t size, unsigned int width) 1507 { 1508 uint64_t addr = 0; 1509 struct map_symbol *ms = NULL; 1510 1511 if (he->mem_info) { 1512 addr = mem_info__daddr(he->mem_info)->addr; 1513 ms = &mem_info__daddr(he->mem_info)->ms; 1514 } 1515 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); 1516 } 1517 1518 int64_t 1519 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 1520 { 1521 uint64_t l = 0, r = 0; 1522 1523 if (left->mem_info) 1524 l = mem_info__iaddr(left->mem_info)->addr; 1525 if (right->mem_info) 1526 r = mem_info__iaddr(right->mem_info)->addr; 1527 1528 return (int64_t)(r - l); 1529 } 1530 1531 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 1532 size_t size, unsigned int width) 1533 { 1534 uint64_t addr = 0; 1535 struct map_symbol *ms = NULL; 1536 1537 if (he->mem_info) { 1538 addr = mem_info__iaddr(he->mem_info)->addr; 1539 ms = &mem_info__iaddr(he->mem_info)->ms; 1540 } 1541 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); 1542 } 1543 1544 static int64_t 1545 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1546 { 1547 struct map *map_l = NULL; 1548 struct map *map_r = NULL; 1549 1550 if (left->mem_info) 1551 map_l = mem_info__daddr(left->mem_info)->ms.map; 1552 if (right->mem_info) 1553 map_r = mem_info__daddr(right->mem_info)->ms.map; 1554 1555 return _sort__dso_cmp(map_l, map_r); 1556 } 1557 1558 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 1559 size_t size, unsigned int width) 1560 { 1561 struct map *map = NULL; 1562 1563 if (he->mem_info) 1564 map = mem_info__daddr(he->mem_info)->ms.map; 1565 1566 return _hist_entry__dso_snprintf(map, bf, size, width); 1567 } 1568 1569 static int64_t 1570 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 1571 { 1572 union perf_mem_data_src data_src_l; 1573 union perf_mem_data_src data_src_r; 1574 1575 if (left->mem_info) 1576 data_src_l = *mem_info__data_src(left->mem_info); 1577 else 1578 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 1579 1580 if (right->mem_info) 1581 data_src_r = *mem_info__data_src(right->mem_info); 1582 else 1583 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 1584 1585 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 1586 } 1587 1588 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 1589 size_t size, unsigned int width) 1590 { 1591 char out[10]; 1592 1593 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 1594 return repsep_snprintf(bf, size, "%.*s", width, out); 1595 } 1596 1597 static int64_t 1598 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 1599 { 1600 union perf_mem_data_src data_src_l; 1601 union perf_mem_data_src data_src_r; 1602 1603 if (left->mem_info) 1604 data_src_l = *mem_info__data_src(left->mem_info); 1605 else 1606 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 1607 1608 if (right->mem_info) 1609 data_src_r = *mem_info__data_src(right->mem_info); 1610 else 1611 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 1612 1613 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 1614 } 1615 1616 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1617 size_t size, unsigned int width) 1618 { 1619 char out[64]; 1620 1621 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1622 return repsep_snprintf(bf, size, "%-*s", width, out); 1623 } 1624 1625 static int64_t 1626 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1627 { 1628 union perf_mem_data_src data_src_l; 1629 union perf_mem_data_src data_src_r; 1630 1631 if (left->mem_info) 1632 data_src_l = *mem_info__data_src(left->mem_info); 1633 else 1634 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1635 1636 if (right->mem_info) 1637 data_src_r = *mem_info__data_src(right->mem_info); 1638 else 1639 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1640 1641 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1642 } 1643 1644 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1645 size_t size, unsigned int width) 1646 { 1647 char out[64]; 1648 1649 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1650 return repsep_snprintf(bf, size, "%-*s", width, out); 1651 } 1652 1653 static int64_t 1654 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1655 { 1656 union perf_mem_data_src data_src_l; 1657 union perf_mem_data_src data_src_r; 1658 1659 if (left->mem_info) 1660 data_src_l = *mem_info__data_src(left->mem_info); 1661 else 1662 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1663 1664 if (right->mem_info) 1665 data_src_r = *mem_info__data_src(right->mem_info); 1666 else 1667 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1668 1669 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1670 } 1671 1672 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1673 size_t size, unsigned int width) 1674 { 1675 char out[64]; 1676 1677 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1678 return repsep_snprintf(bf, size, "%-*s", width, out); 1679 } 1680 1681 int64_t 1682 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1683 { 1684 u64 l, r; 1685 struct map *l_map, *r_map; 1686 struct dso *l_dso, *r_dso; 1687 int rc; 1688 1689 if (!left->mem_info) return -1; 1690 if (!right->mem_info) return 1; 1691 1692 /* group event types together */ 1693 if (left->cpumode > right->cpumode) return -1; 1694 if (left->cpumode < right->cpumode) return 1; 1695 1696 l_map = mem_info__daddr(left->mem_info)->ms.map; 1697 r_map = mem_info__daddr(right->mem_info)->ms.map; 1698 1699 /* if both are NULL, jump to sort on al_addr instead */ 1700 if (!l_map && !r_map) 1701 goto addr; 1702 1703 if (!l_map) return -1; 1704 if (!r_map) return 1; 1705 1706 l_dso = map__dso(l_map); 1707 r_dso = map__dso(r_map); 1708 rc = dso__cmp_id(l_dso, r_dso); 1709 if (rc) 1710 return rc; 1711 /* 1712 * Addresses with no major/minor numbers are assumed to be 1713 * anonymous in userspace. Sort those on pid then address. 1714 * 1715 * The kernel and non-zero major/minor mapped areas are 1716 * assumed to be unity mapped. Sort those on address. 1717 */ 1718 1719 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1720 (!(map__flags(l_map) & MAP_SHARED)) && !dso__id(l_dso)->maj && !dso__id(l_dso)->min && 1721 !dso__id(l_dso)->ino && !dso__id(l_dso)->ino_generation) { 1722 /* userspace anonymous */ 1723 1724 if (thread__pid(left->thread) > thread__pid(right->thread)) 1725 return -1; 1726 if (thread__pid(left->thread) < thread__pid(right->thread)) 1727 return 1; 1728 } 1729 1730 addr: 1731 /* al_addr does all the right addr - start + offset calculations */ 1732 l = cl_address(mem_info__daddr(left->mem_info)->al_addr, chk_double_cl); 1733 r = cl_address(mem_info__daddr(right->mem_info)->al_addr, chk_double_cl); 1734 1735 if (l > r) return -1; 1736 if (l < r) return 1; 1737 1738 return 0; 1739 } 1740 1741 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1742 size_t size, unsigned int width) 1743 { 1744 1745 uint64_t addr = 0; 1746 struct map_symbol *ms = NULL; 1747 char level = he->level; 1748 1749 if (he->mem_info) { 1750 struct map *map = mem_info__daddr(he->mem_info)->ms.map; 1751 struct dso *dso = map ? map__dso(map) : NULL; 1752 1753 addr = cl_address(mem_info__daddr(he->mem_info)->al_addr, chk_double_cl); 1754 ms = &mem_info__daddr(he->mem_info)->ms; 1755 1756 /* print [s] for shared data mmaps */ 1757 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1758 map && !(map__prot(map) & PROT_EXEC) && 1759 (map__flags(map) & MAP_SHARED) && 1760 (dso__id(dso)->maj || dso__id(dso)->min || dso__id(dso)->ino || 1761 dso__id(dso)->ino_generation)) 1762 level = 's'; 1763 else if (!map) 1764 level = 'X'; 1765 } 1766 return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width); 1767 } 1768 1769 struct sort_entry sort_mispredict = { 1770 .se_header = "Branch Mispredicted", 1771 .se_cmp = sort__mispredict_cmp, 1772 .se_snprintf = hist_entry__mispredict_snprintf, 1773 .se_width_idx = HISTC_MISPREDICT, 1774 }; 1775 1776 static int64_t 1777 sort__weight_cmp(struct hist_entry *left, struct hist_entry *right) 1778 { 1779 return left->weight - right->weight; 1780 } 1781 1782 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1783 size_t size, unsigned int width) 1784 { 1785 return repsep_snprintf(bf, size, "%-*llu", width, he->weight); 1786 } 1787 1788 struct sort_entry sort_local_weight = { 1789 .se_header = "Local Weight", 1790 .se_cmp = sort__weight_cmp, 1791 .se_snprintf = hist_entry__local_weight_snprintf, 1792 .se_width_idx = HISTC_LOCAL_WEIGHT, 1793 }; 1794 1795 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1796 size_t size, unsigned int width) 1797 { 1798 return repsep_snprintf(bf, size, "%-*llu", width, 1799 he->weight * he->stat.nr_events); 1800 } 1801 1802 struct sort_entry sort_global_weight = { 1803 .se_header = "Weight", 1804 .se_cmp = sort__weight_cmp, 1805 .se_snprintf = hist_entry__global_weight_snprintf, 1806 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1807 }; 1808 1809 static int64_t 1810 sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right) 1811 { 1812 return left->ins_lat - right->ins_lat; 1813 } 1814 1815 static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf, 1816 size_t size, unsigned int width) 1817 { 1818 return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat); 1819 } 1820 1821 struct sort_entry sort_local_ins_lat = { 1822 .se_header = "Local INSTR Latency", 1823 .se_cmp = sort__ins_lat_cmp, 1824 .se_snprintf = hist_entry__local_ins_lat_snprintf, 1825 .se_width_idx = HISTC_LOCAL_INS_LAT, 1826 }; 1827 1828 static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf, 1829 size_t size, unsigned int width) 1830 { 1831 return repsep_snprintf(bf, size, "%-*u", width, 1832 he->ins_lat * he->stat.nr_events); 1833 } 1834 1835 struct sort_entry sort_global_ins_lat = { 1836 .se_header = "INSTR Latency", 1837 .se_cmp = sort__ins_lat_cmp, 1838 .se_snprintf = hist_entry__global_ins_lat_snprintf, 1839 .se_width_idx = HISTC_GLOBAL_INS_LAT, 1840 }; 1841 1842 static int64_t 1843 sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right) 1844 { 1845 return left->p_stage_cyc - right->p_stage_cyc; 1846 } 1847 1848 static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf, 1849 size_t size, unsigned int width) 1850 { 1851 return repsep_snprintf(bf, size, "%-*u", width, 1852 he->p_stage_cyc * he->stat.nr_events); 1853 } 1854 1855 1856 static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf, 1857 size_t size, unsigned int width) 1858 { 1859 return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc); 1860 } 1861 1862 struct sort_entry sort_local_p_stage_cyc = { 1863 .se_header = "Local Pipeline Stage Cycle", 1864 .se_cmp = sort__p_stage_cyc_cmp, 1865 .se_snprintf = hist_entry__p_stage_cyc_snprintf, 1866 .se_width_idx = HISTC_LOCAL_P_STAGE_CYC, 1867 }; 1868 1869 struct sort_entry sort_global_p_stage_cyc = { 1870 .se_header = "Pipeline Stage Cycle", 1871 .se_cmp = sort__p_stage_cyc_cmp, 1872 .se_snprintf = hist_entry__global_p_stage_cyc_snprintf, 1873 .se_width_idx = HISTC_GLOBAL_P_STAGE_CYC, 1874 }; 1875 1876 struct sort_entry sort_mem_daddr_sym = { 1877 .se_header = "Data Symbol", 1878 .se_cmp = sort__daddr_cmp, 1879 .se_snprintf = hist_entry__daddr_snprintf, 1880 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1881 }; 1882 1883 struct sort_entry sort_mem_iaddr_sym = { 1884 .se_header = "Code Symbol", 1885 .se_cmp = sort__iaddr_cmp, 1886 .se_snprintf = hist_entry__iaddr_snprintf, 1887 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1888 }; 1889 1890 struct sort_entry sort_mem_daddr_dso = { 1891 .se_header = "Data Object", 1892 .se_cmp = sort__dso_daddr_cmp, 1893 .se_snprintf = hist_entry__dso_daddr_snprintf, 1894 .se_width_idx = HISTC_MEM_DADDR_DSO, 1895 }; 1896 1897 struct sort_entry sort_mem_locked = { 1898 .se_header = "Locked", 1899 .se_cmp = sort__locked_cmp, 1900 .se_snprintf = hist_entry__locked_snprintf, 1901 .se_width_idx = HISTC_MEM_LOCKED, 1902 }; 1903 1904 struct sort_entry sort_mem_tlb = { 1905 .se_header = "TLB access", 1906 .se_cmp = sort__tlb_cmp, 1907 .se_snprintf = hist_entry__tlb_snprintf, 1908 .se_width_idx = HISTC_MEM_TLB, 1909 }; 1910 1911 struct sort_entry sort_mem_lvl = { 1912 .se_header = "Memory access", 1913 .se_cmp = sort__lvl_cmp, 1914 .se_snprintf = hist_entry__lvl_snprintf, 1915 .se_width_idx = HISTC_MEM_LVL, 1916 }; 1917 1918 struct sort_entry sort_mem_snoop = { 1919 .se_header = "Snoop", 1920 .se_cmp = sort__snoop_cmp, 1921 .se_snprintf = hist_entry__snoop_snprintf, 1922 .se_width_idx = HISTC_MEM_SNOOP, 1923 }; 1924 1925 struct sort_entry sort_mem_dcacheline = { 1926 .se_header = "Data Cacheline", 1927 .se_cmp = sort__dcacheline_cmp, 1928 .se_snprintf = hist_entry__dcacheline_snprintf, 1929 .se_width_idx = HISTC_MEM_DCACHELINE, 1930 }; 1931 1932 static int64_t 1933 sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right) 1934 { 1935 union perf_mem_data_src data_src_l; 1936 union perf_mem_data_src data_src_r; 1937 1938 if (left->mem_info) 1939 data_src_l = *mem_info__data_src(left->mem_info); 1940 else 1941 data_src_l.mem_blk = PERF_MEM_BLK_NA; 1942 1943 if (right->mem_info) 1944 data_src_r = *mem_info__data_src(right->mem_info); 1945 else 1946 data_src_r.mem_blk = PERF_MEM_BLK_NA; 1947 1948 return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk); 1949 } 1950 1951 static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf, 1952 size_t size, unsigned int width) 1953 { 1954 char out[16]; 1955 1956 perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info); 1957 return repsep_snprintf(bf, size, "%.*s", width, out); 1958 } 1959 1960 struct sort_entry sort_mem_blocked = { 1961 .se_header = "Blocked", 1962 .se_cmp = sort__blocked_cmp, 1963 .se_snprintf = hist_entry__blocked_snprintf, 1964 .se_width_idx = HISTC_MEM_BLOCKED, 1965 }; 1966 1967 static int64_t 1968 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1969 { 1970 uint64_t l = 0, r = 0; 1971 1972 if (left->mem_info) 1973 l = mem_info__daddr(left->mem_info)->phys_addr; 1974 if (right->mem_info) 1975 r = mem_info__daddr(right->mem_info)->phys_addr; 1976 1977 return (int64_t)(r - l); 1978 } 1979 1980 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf, 1981 size_t size, unsigned int width) 1982 { 1983 uint64_t addr = 0; 1984 size_t ret = 0; 1985 size_t len = BITS_PER_LONG / 4; 1986 1987 addr = mem_info__daddr(he->mem_info)->phys_addr; 1988 1989 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level); 1990 1991 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr); 1992 1993 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, ""); 1994 1995 if (ret > width) 1996 bf[width] = '\0'; 1997 1998 return width; 1999 } 2000 2001 struct sort_entry sort_mem_phys_daddr = { 2002 .se_header = "Data Physical Address", 2003 .se_cmp = sort__phys_daddr_cmp, 2004 .se_snprintf = hist_entry__phys_daddr_snprintf, 2005 .se_width_idx = HISTC_MEM_PHYS_DADDR, 2006 }; 2007 2008 static int64_t 2009 sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right) 2010 { 2011 uint64_t l = 0, r = 0; 2012 2013 if (left->mem_info) 2014 l = mem_info__daddr(left->mem_info)->data_page_size; 2015 if (right->mem_info) 2016 r = mem_info__daddr(right->mem_info)->data_page_size; 2017 2018 return (int64_t)(r - l); 2019 } 2020 2021 static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf, 2022 size_t size, unsigned int width) 2023 { 2024 char str[PAGE_SIZE_NAME_LEN]; 2025 2026 return repsep_snprintf(bf, size, "%-*s", width, 2027 get_page_size_name(mem_info__daddr(he->mem_info)->data_page_size, str)); 2028 } 2029 2030 struct sort_entry sort_mem_data_page_size = { 2031 .se_header = "Data Page Size", 2032 .se_cmp = sort__data_page_size_cmp, 2033 .se_snprintf = hist_entry__data_page_size_snprintf, 2034 .se_width_idx = HISTC_MEM_DATA_PAGE_SIZE, 2035 }; 2036 2037 static int64_t 2038 sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right) 2039 { 2040 uint64_t l = left->code_page_size; 2041 uint64_t r = right->code_page_size; 2042 2043 return (int64_t)(r - l); 2044 } 2045 2046 static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf, 2047 size_t size, unsigned int width) 2048 { 2049 char str[PAGE_SIZE_NAME_LEN]; 2050 2051 return repsep_snprintf(bf, size, "%-*s", width, 2052 get_page_size_name(he->code_page_size, str)); 2053 } 2054 2055 struct sort_entry sort_code_page_size = { 2056 .se_header = "Code Page Size", 2057 .se_cmp = sort__code_page_size_cmp, 2058 .se_snprintf = hist_entry__code_page_size_snprintf, 2059 .se_width_idx = HISTC_CODE_PAGE_SIZE, 2060 }; 2061 2062 static int64_t 2063 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 2064 { 2065 if (!left->branch_info || !right->branch_info) 2066 return cmp_null(left->branch_info, right->branch_info); 2067 2068 return left->branch_info->flags.abort != 2069 right->branch_info->flags.abort; 2070 } 2071 2072 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 2073 size_t size, unsigned int width) 2074 { 2075 static const char *out = "N/A"; 2076 2077 if (he->branch_info) { 2078 if (he->branch_info->flags.abort) 2079 out = "A"; 2080 else 2081 out = "."; 2082 } 2083 2084 return repsep_snprintf(bf, size, "%-*s", width, out); 2085 } 2086 2087 struct sort_entry sort_abort = { 2088 .se_header = "Transaction abort", 2089 .se_cmp = sort__abort_cmp, 2090 .se_snprintf = hist_entry__abort_snprintf, 2091 .se_width_idx = HISTC_ABORT, 2092 }; 2093 2094 static int64_t 2095 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 2096 { 2097 if (!left->branch_info || !right->branch_info) 2098 return cmp_null(left->branch_info, right->branch_info); 2099 2100 return left->branch_info->flags.in_tx != 2101 right->branch_info->flags.in_tx; 2102 } 2103 2104 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 2105 size_t size, unsigned int width) 2106 { 2107 static const char *out = "N/A"; 2108 2109 if (he->branch_info) { 2110 if (he->branch_info->flags.in_tx) 2111 out = "T"; 2112 else 2113 out = "."; 2114 } 2115 2116 return repsep_snprintf(bf, size, "%-*s", width, out); 2117 } 2118 2119 struct sort_entry sort_in_tx = { 2120 .se_header = "Branch in transaction", 2121 .se_cmp = sort__in_tx_cmp, 2122 .se_snprintf = hist_entry__in_tx_snprintf, 2123 .se_width_idx = HISTC_IN_TX, 2124 }; 2125 2126 static int64_t 2127 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 2128 { 2129 return left->transaction - right->transaction; 2130 } 2131 2132 static inline char *add_str(char *p, const char *str) 2133 { 2134 strcpy(p, str); 2135 return p + strlen(str); 2136 } 2137 2138 static struct txbit { 2139 unsigned flag; 2140 const char *name; 2141 int skip_for_len; 2142 } txbits[] = { 2143 { PERF_TXN_ELISION, "EL ", 0 }, 2144 { PERF_TXN_TRANSACTION, "TX ", 1 }, 2145 { PERF_TXN_SYNC, "SYNC ", 1 }, 2146 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 2147 { PERF_TXN_RETRY, "RETRY ", 0 }, 2148 { PERF_TXN_CONFLICT, "CON ", 0 }, 2149 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 2150 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 2151 { 0, NULL, 0 } 2152 }; 2153 2154 int hist_entry__transaction_len(void) 2155 { 2156 int i; 2157 int len = 0; 2158 2159 for (i = 0; txbits[i].name; i++) { 2160 if (!txbits[i].skip_for_len) 2161 len += strlen(txbits[i].name); 2162 } 2163 len += 4; /* :XX<space> */ 2164 return len; 2165 } 2166 2167 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 2168 size_t size, unsigned int width) 2169 { 2170 u64 t = he->transaction; 2171 char buf[128]; 2172 char *p = buf; 2173 int i; 2174 2175 buf[0] = 0; 2176 for (i = 0; txbits[i].name; i++) 2177 if (txbits[i].flag & t) 2178 p = add_str(p, txbits[i].name); 2179 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 2180 p = add_str(p, "NEITHER "); 2181 if (t & PERF_TXN_ABORT_MASK) { 2182 sprintf(p, ":%" PRIx64, 2183 (t & PERF_TXN_ABORT_MASK) >> 2184 PERF_TXN_ABORT_SHIFT); 2185 p += strlen(p); 2186 } 2187 2188 return repsep_snprintf(bf, size, "%-*s", width, buf); 2189 } 2190 2191 struct sort_entry sort_transaction = { 2192 .se_header = "Transaction ", 2193 .se_cmp = sort__transaction_cmp, 2194 .se_snprintf = hist_entry__transaction_snprintf, 2195 .se_width_idx = HISTC_TRANSACTION, 2196 }; 2197 2198 /* --sort symbol_size */ 2199 2200 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r) 2201 { 2202 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0; 2203 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0; 2204 2205 return size_l < size_r ? -1 : 2206 size_l == size_r ? 0 : 1; 2207 } 2208 2209 static int64_t 2210 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right) 2211 { 2212 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym); 2213 } 2214 2215 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf, 2216 size_t bf_size, unsigned int width) 2217 { 2218 if (sym) 2219 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym)); 2220 2221 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 2222 } 2223 2224 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf, 2225 size_t size, unsigned int width) 2226 { 2227 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width); 2228 } 2229 2230 struct sort_entry sort_sym_size = { 2231 .se_header = "Symbol size", 2232 .se_cmp = sort__sym_size_cmp, 2233 .se_snprintf = hist_entry__sym_size_snprintf, 2234 .se_width_idx = HISTC_SYM_SIZE, 2235 }; 2236 2237 /* --sort dso_size */ 2238 2239 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r) 2240 { 2241 int64_t size_l = map_l != NULL ? map__size(map_l) : 0; 2242 int64_t size_r = map_r != NULL ? map__size(map_r) : 0; 2243 2244 return size_l < size_r ? -1 : 2245 size_l == size_r ? 0 : 1; 2246 } 2247 2248 static int64_t 2249 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right) 2250 { 2251 return _sort__dso_size_cmp(right->ms.map, left->ms.map); 2252 } 2253 2254 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf, 2255 size_t bf_size, unsigned int width) 2256 { 2257 if (map && map__dso(map)) 2258 return repsep_snprintf(bf, bf_size, "%*d", width, map__size(map)); 2259 2260 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 2261 } 2262 2263 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf, 2264 size_t size, unsigned int width) 2265 { 2266 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width); 2267 } 2268 2269 struct sort_entry sort_dso_size = { 2270 .se_header = "DSO size", 2271 .se_cmp = sort__dso_size_cmp, 2272 .se_snprintf = hist_entry__dso_size_snprintf, 2273 .se_width_idx = HISTC_DSO_SIZE, 2274 }; 2275 2276 /* --sort addr */ 2277 2278 static int64_t 2279 sort__addr_cmp(struct hist_entry *left, struct hist_entry *right) 2280 { 2281 u64 left_ip = left->ip; 2282 u64 right_ip = right->ip; 2283 struct map *left_map = left->ms.map; 2284 struct map *right_map = right->ms.map; 2285 2286 if (left_map) 2287 left_ip = map__unmap_ip(left_map, left_ip); 2288 if (right_map) 2289 right_ip = map__unmap_ip(right_map, right_ip); 2290 2291 return _sort__addr_cmp(left_ip, right_ip); 2292 } 2293 2294 static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf, 2295 size_t size, unsigned int width) 2296 { 2297 u64 ip = he->ip; 2298 struct map *map = he->ms.map; 2299 2300 if (map) 2301 ip = map__unmap_ip(map, ip); 2302 2303 return repsep_snprintf(bf, size, "%-#*llx", width, ip); 2304 } 2305 2306 struct sort_entry sort_addr = { 2307 .se_header = "Address", 2308 .se_cmp = sort__addr_cmp, 2309 .se_snprintf = hist_entry__addr_snprintf, 2310 .se_width_idx = HISTC_ADDR, 2311 }; 2312 2313 /* --sort type */ 2314 2315 struct annotated_data_type unknown_type = { 2316 .self = { 2317 .type_name = (char *)"(unknown)", 2318 .children = LIST_HEAD_INIT(unknown_type.self.children), 2319 }, 2320 }; 2321 2322 static int64_t 2323 sort__type_cmp(struct hist_entry *left, struct hist_entry *right) 2324 { 2325 return sort__addr_cmp(left, right); 2326 } 2327 2328 static void sort__type_init(struct hist_entry *he) 2329 { 2330 if (he->mem_type) 2331 return; 2332 2333 he->mem_type = hist_entry__get_data_type(he); 2334 if (he->mem_type == NULL) { 2335 he->mem_type = &unknown_type; 2336 he->mem_type_off = 0; 2337 } 2338 } 2339 2340 static int64_t 2341 sort__type_collapse(struct hist_entry *left, struct hist_entry *right) 2342 { 2343 struct annotated_data_type *left_type = left->mem_type; 2344 struct annotated_data_type *right_type = right->mem_type; 2345 2346 if (!left_type) { 2347 sort__type_init(left); 2348 left_type = left->mem_type; 2349 } 2350 2351 if (!right_type) { 2352 sort__type_init(right); 2353 right_type = right->mem_type; 2354 } 2355 2356 return strcmp(left_type->self.type_name, right_type->self.type_name); 2357 } 2358 2359 static int64_t 2360 sort__type_sort(struct hist_entry *left, struct hist_entry *right) 2361 { 2362 return sort__type_collapse(left, right); 2363 } 2364 2365 static int hist_entry__type_snprintf(struct hist_entry *he, char *bf, 2366 size_t size, unsigned int width) 2367 { 2368 return repsep_snprintf(bf, size, "%-*s", width, he->mem_type->self.type_name); 2369 } 2370 2371 struct sort_entry sort_type = { 2372 .se_header = "Data Type", 2373 .se_cmp = sort__type_cmp, 2374 .se_collapse = sort__type_collapse, 2375 .se_sort = sort__type_sort, 2376 .se_init = sort__type_init, 2377 .se_snprintf = hist_entry__type_snprintf, 2378 .se_width_idx = HISTC_TYPE, 2379 }; 2380 2381 /* --sort typeoff */ 2382 2383 static int64_t 2384 sort__typeoff_sort(struct hist_entry *left, struct hist_entry *right) 2385 { 2386 struct annotated_data_type *left_type = left->mem_type; 2387 struct annotated_data_type *right_type = right->mem_type; 2388 int64_t ret; 2389 2390 if (!left_type) { 2391 sort__type_init(left); 2392 left_type = left->mem_type; 2393 } 2394 2395 if (!right_type) { 2396 sort__type_init(right); 2397 right_type = right->mem_type; 2398 } 2399 2400 ret = strcmp(left_type->self.type_name, right_type->self.type_name); 2401 if (ret) 2402 return ret; 2403 return left->mem_type_off - right->mem_type_off; 2404 } 2405 2406 static int hist_entry__typeoff_snprintf(struct hist_entry *he, char *bf, 2407 size_t size, unsigned int width __maybe_unused) 2408 { 2409 struct annotated_data_type *he_type = he->mem_type; 2410 char buf[4096]; 2411 2412 if (he_type == &unknown_type || he_type == &stackop_type || 2413 he_type == &canary_type) 2414 return repsep_snprintf(bf, size, "%s", he_type->self.type_name); 2415 2416 if (!annotated_data_type__get_member_name(he_type, buf, sizeof(buf), 2417 he->mem_type_off)) 2418 scnprintf(buf, sizeof(buf), "no field"); 2419 2420 return repsep_snprintf(bf, size, "%s +%#x (%s)", he_type->self.type_name, 2421 he->mem_type_off, buf); 2422 } 2423 2424 struct sort_entry sort_type_offset = { 2425 .se_header = "Data Type Offset", 2426 .se_cmp = sort__type_cmp, 2427 .se_collapse = sort__typeoff_sort, 2428 .se_sort = sort__typeoff_sort, 2429 .se_init = sort__type_init, 2430 .se_snprintf = hist_entry__typeoff_snprintf, 2431 .se_width_idx = HISTC_TYPE_OFFSET, 2432 }; 2433 2434 /* --sort typecln */ 2435 2436 /* TODO: use actual value in the system */ 2437 #define TYPE_CACHELINE_SIZE 64 2438 2439 static int64_t 2440 sort__typecln_sort(struct hist_entry *left, struct hist_entry *right) 2441 { 2442 struct annotated_data_type *left_type = left->mem_type; 2443 struct annotated_data_type *right_type = right->mem_type; 2444 int64_t left_cln, right_cln; 2445 int64_t ret; 2446 2447 if (!left_type) { 2448 sort__type_init(left); 2449 left_type = left->mem_type; 2450 } 2451 2452 if (!right_type) { 2453 sort__type_init(right); 2454 right_type = right->mem_type; 2455 } 2456 2457 ret = strcmp(left_type->self.type_name, right_type->self.type_name); 2458 if (ret) 2459 return ret; 2460 2461 left_cln = left->mem_type_off / TYPE_CACHELINE_SIZE; 2462 right_cln = right->mem_type_off / TYPE_CACHELINE_SIZE; 2463 return left_cln - right_cln; 2464 } 2465 2466 static int hist_entry__typecln_snprintf(struct hist_entry *he, char *bf, 2467 size_t size, unsigned int width __maybe_unused) 2468 { 2469 struct annotated_data_type *he_type = he->mem_type; 2470 2471 return repsep_snprintf(bf, size, "%s: cache-line %d", he_type->self.type_name, 2472 he->mem_type_off / TYPE_CACHELINE_SIZE); 2473 } 2474 2475 struct sort_entry sort_type_cacheline = { 2476 .se_header = "Data Type Cacheline", 2477 .se_cmp = sort__type_cmp, 2478 .se_collapse = sort__typecln_sort, 2479 .se_sort = sort__typecln_sort, 2480 .se_init = sort__type_init, 2481 .se_snprintf = hist_entry__typecln_snprintf, 2482 .se_width_idx = HISTC_TYPE_CACHELINE, 2483 }; 2484 2485 2486 struct sort_dimension { 2487 const char *name; 2488 struct sort_entry *entry; 2489 int taken; 2490 }; 2491 2492 int __weak arch_support_sort_key(const char *sort_key __maybe_unused) 2493 { 2494 return 0; 2495 } 2496 2497 const char * __weak arch_perf_header_entry(const char *se_header) 2498 { 2499 return se_header; 2500 } 2501 2502 static void sort_dimension_add_dynamic_header(struct sort_dimension *sd) 2503 { 2504 sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header); 2505 } 2506 2507 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 2508 2509 static struct sort_dimension common_sort_dimensions[] = { 2510 DIM(SORT_PID, "pid", sort_thread), 2511 DIM(SORT_COMM, "comm", sort_comm), 2512 DIM(SORT_DSO, "dso", sort_dso), 2513 DIM(SORT_SYM, "symbol", sort_sym), 2514 DIM(SORT_PARENT, "parent", sort_parent), 2515 DIM(SORT_CPU, "cpu", sort_cpu), 2516 DIM(SORT_SOCKET, "socket", sort_socket), 2517 DIM(SORT_SRCLINE, "srcline", sort_srcline), 2518 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 2519 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 2520 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 2521 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 2522 #ifdef HAVE_LIBTRACEEVENT 2523 DIM(SORT_TRACE, "trace", sort_trace), 2524 #endif 2525 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 2526 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size), 2527 DIM(SORT_CGROUP, "cgroup", sort_cgroup), 2528 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 2529 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null), 2530 DIM(SORT_TIME, "time", sort_time), 2531 DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size), 2532 DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat), 2533 DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat), 2534 DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc), 2535 DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc), 2536 DIM(SORT_ADDR, "addr", sort_addr), 2537 DIM(SORT_LOCAL_RETIRE_LAT, "local_retire_lat", sort_local_p_stage_cyc), 2538 DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc), 2539 DIM(SORT_SIMD, "simd", sort_simd), 2540 DIM(SORT_ANNOTATE_DATA_TYPE, "type", sort_type), 2541 DIM(SORT_ANNOTATE_DATA_TYPE_OFFSET, "typeoff", sort_type_offset), 2542 DIM(SORT_SYM_OFFSET, "symoff", sort_sym_offset), 2543 DIM(SORT_ANNOTATE_DATA_TYPE_CACHELINE, "typecln", sort_type_cacheline), 2544 DIM(SORT_PARALLELISM, "parallelism", sort_parallelism), 2545 }; 2546 2547 #undef DIM 2548 2549 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 2550 2551 static struct sort_dimension bstack_sort_dimensions[] = { 2552 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 2553 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 2554 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 2555 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 2556 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 2557 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 2558 DIM(SORT_ABORT, "abort", sort_abort), 2559 DIM(SORT_CYCLES, "cycles", sort_cycles), 2560 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 2561 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 2562 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc), 2563 DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from), 2564 DIM(SORT_ADDR_TO, "addr_to", sort_addr_to), 2565 DIM(SORT_CALLCHAIN_BRANCH_PREDICTED, 2566 "callchain_branch_predicted", 2567 sort_callchain_branch_predicted), 2568 DIM(SORT_CALLCHAIN_BRANCH_ABORT, 2569 "callchain_branch_abort", 2570 sort_callchain_branch_abort), 2571 DIM(SORT_CALLCHAIN_BRANCH_CYCLES, 2572 "callchain_branch_cycles", 2573 sort_callchain_branch_cycles) 2574 }; 2575 2576 #undef DIM 2577 2578 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 2579 2580 static struct sort_dimension memory_sort_dimensions[] = { 2581 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 2582 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 2583 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 2584 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 2585 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 2586 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 2587 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 2588 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 2589 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr), 2590 DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size), 2591 DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked), 2592 }; 2593 2594 #undef DIM 2595 2596 struct hpp_dimension { 2597 const char *name; 2598 struct perf_hpp_fmt *fmt; 2599 int taken; 2600 int was_taken; 2601 int mem_mode; 2602 }; 2603 2604 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 2605 #define DIM_MEM(d, n) { .name = n, .fmt = &perf_hpp__format[d], .mem_mode = 1, } 2606 2607 static struct hpp_dimension hpp_sort_dimensions[] = { 2608 DIM(PERF_HPP__OVERHEAD, "overhead"), 2609 DIM(PERF_HPP__LATENCY, "latency"), 2610 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 2611 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 2612 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 2613 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 2614 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 2615 DIM(PERF_HPP__LATENCY_ACC, "latency_children"), 2616 DIM(PERF_HPP__SAMPLES, "sample"), 2617 DIM(PERF_HPP__PERIOD, "period"), 2618 DIM(PERF_HPP__WEIGHT1, "weight1"), 2619 DIM(PERF_HPP__WEIGHT2, "weight2"), 2620 DIM(PERF_HPP__WEIGHT3, "weight3"), 2621 /* aliases for weight_struct */ 2622 DIM(PERF_HPP__WEIGHT2, "ins_lat"), 2623 DIM(PERF_HPP__WEIGHT3, "retire_lat"), 2624 DIM(PERF_HPP__WEIGHT3, "p_stage_cyc"), 2625 /* used for output only when SORT_MODE__MEM */ 2626 DIM_MEM(PERF_HPP__MEM_STAT_OP, "op"), 2627 DIM_MEM(PERF_HPP__MEM_STAT_CACHE, "cache"), 2628 DIM_MEM(PERF_HPP__MEM_STAT_MEMORY, "memory"), 2629 DIM_MEM(PERF_HPP__MEM_STAT_SNOOP, "snoop"), 2630 DIM_MEM(PERF_HPP__MEM_STAT_DTLB, "dtlb"), 2631 }; 2632 2633 #undef DIM_MEM 2634 #undef DIM 2635 2636 struct hpp_sort_entry { 2637 struct perf_hpp_fmt hpp; 2638 struct sort_entry *se; 2639 }; 2640 2641 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 2642 { 2643 struct hpp_sort_entry *hse; 2644 2645 if (!perf_hpp__is_sort_entry(fmt)) 2646 return; 2647 2648 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2649 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 2650 } 2651 2652 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2653 struct hists *hists, int line, 2654 int *span __maybe_unused) 2655 { 2656 struct hpp_sort_entry *hse; 2657 size_t len = fmt->user_len; 2658 const char *hdr = ""; 2659 2660 if (line == hists->hpp_list->nr_header_lines - 1) 2661 hdr = fmt->name; 2662 2663 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2664 2665 if (!len) 2666 len = hists__col_len(hists, hse->se->se_width_idx); 2667 2668 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, hdr); 2669 } 2670 2671 static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 2672 struct perf_hpp *hpp __maybe_unused, 2673 struct hists *hists) 2674 { 2675 struct hpp_sort_entry *hse; 2676 size_t len = fmt->user_len; 2677 2678 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2679 2680 if (!len) 2681 len = hists__col_len(hists, hse->se->se_width_idx); 2682 2683 return len; 2684 } 2685 2686 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2687 struct hist_entry *he) 2688 { 2689 struct hpp_sort_entry *hse; 2690 size_t len = fmt->user_len; 2691 2692 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2693 2694 if (!len) 2695 len = hists__col_len(he->hists, hse->se->se_width_idx); 2696 2697 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 2698 } 2699 2700 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 2701 struct hist_entry *a, struct hist_entry *b) 2702 { 2703 struct hpp_sort_entry *hse; 2704 2705 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2706 return hse->se->se_cmp(a, b); 2707 } 2708 2709 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 2710 struct hist_entry *a, struct hist_entry *b) 2711 { 2712 struct hpp_sort_entry *hse; 2713 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 2714 2715 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2716 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 2717 return collapse_fn(a, b); 2718 } 2719 2720 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 2721 struct hist_entry *a, struct hist_entry *b) 2722 { 2723 struct hpp_sort_entry *hse; 2724 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 2725 2726 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2727 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 2728 return sort_fn(a, b); 2729 } 2730 2731 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 2732 { 2733 return format->header == __sort__hpp_header; 2734 } 2735 2736 #define MK_SORT_ENTRY_CHK(key) \ 2737 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 2738 { \ 2739 struct hpp_sort_entry *hse; \ 2740 \ 2741 if (!perf_hpp__is_sort_entry(fmt)) \ 2742 return false; \ 2743 \ 2744 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 2745 return hse->se == &sort_ ## key ; \ 2746 } 2747 2748 #ifdef HAVE_LIBTRACEEVENT 2749 MK_SORT_ENTRY_CHK(trace) 2750 #else 2751 bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt __maybe_unused) 2752 { 2753 return false; 2754 } 2755 #endif 2756 MK_SORT_ENTRY_CHK(srcline) 2757 MK_SORT_ENTRY_CHK(srcfile) 2758 MK_SORT_ENTRY_CHK(thread) 2759 MK_SORT_ENTRY_CHK(comm) 2760 MK_SORT_ENTRY_CHK(dso) 2761 MK_SORT_ENTRY_CHK(sym) 2762 MK_SORT_ENTRY_CHK(parallelism) 2763 2764 2765 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2766 { 2767 struct hpp_sort_entry *hse_a; 2768 struct hpp_sort_entry *hse_b; 2769 2770 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 2771 return false; 2772 2773 hse_a = container_of(a, struct hpp_sort_entry, hpp); 2774 hse_b = container_of(b, struct hpp_sort_entry, hpp); 2775 2776 return hse_a->se == hse_b->se; 2777 } 2778 2779 static void hse_free(struct perf_hpp_fmt *fmt) 2780 { 2781 struct hpp_sort_entry *hse; 2782 2783 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2784 free(hse); 2785 } 2786 2787 static void hse_init(struct perf_hpp_fmt *fmt, struct hist_entry *he) 2788 { 2789 struct hpp_sort_entry *hse; 2790 2791 if (!perf_hpp__is_sort_entry(fmt)) 2792 return; 2793 2794 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2795 2796 if (hse->se->se_init) 2797 hse->se->se_init(he); 2798 } 2799 2800 static struct hpp_sort_entry * 2801 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 2802 { 2803 struct hpp_sort_entry *hse; 2804 2805 hse = malloc(sizeof(*hse)); 2806 if (hse == NULL) { 2807 pr_err("Memory allocation failed\n"); 2808 return NULL; 2809 } 2810 2811 hse->se = sd->entry; 2812 hse->hpp.name = sd->entry->se_header; 2813 hse->hpp.header = __sort__hpp_header; 2814 hse->hpp.width = __sort__hpp_width; 2815 hse->hpp.entry = __sort__hpp_entry; 2816 hse->hpp.color = NULL; 2817 2818 hse->hpp.cmp = __sort__hpp_cmp; 2819 hse->hpp.collapse = __sort__hpp_collapse; 2820 hse->hpp.sort = __sort__hpp_sort; 2821 hse->hpp.equal = __sort__hpp_equal; 2822 hse->hpp.free = hse_free; 2823 hse->hpp.init = hse_init; 2824 2825 INIT_LIST_HEAD(&hse->hpp.list); 2826 INIT_LIST_HEAD(&hse->hpp.sort_list); 2827 hse->hpp.elide = false; 2828 hse->hpp.len = 0; 2829 hse->hpp.user_len = 0; 2830 hse->hpp.level = level; 2831 2832 return hse; 2833 } 2834 2835 static void hpp_free(struct perf_hpp_fmt *fmt) 2836 { 2837 free(fmt); 2838 } 2839 2840 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 2841 int level) 2842 { 2843 struct perf_hpp_fmt *fmt; 2844 2845 fmt = memdup(hd->fmt, sizeof(*fmt)); 2846 if (fmt) { 2847 INIT_LIST_HEAD(&fmt->list); 2848 INIT_LIST_HEAD(&fmt->sort_list); 2849 fmt->free = hpp_free; 2850 fmt->level = level; 2851 } 2852 2853 return fmt; 2854 } 2855 2856 int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 2857 { 2858 struct perf_hpp_fmt *fmt; 2859 struct hpp_sort_entry *hse; 2860 int ret = -1; 2861 int r; 2862 2863 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 2864 if (!perf_hpp__is_sort_entry(fmt)) 2865 continue; 2866 2867 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2868 if (hse->se->se_filter == NULL) 2869 continue; 2870 2871 /* 2872 * hist entry is filtered if any of sort key in the hpp list 2873 * is applied. But it should skip non-matched filter types. 2874 */ 2875 r = hse->se->se_filter(he, type, arg); 2876 if (r >= 0) { 2877 if (ret < 0) 2878 ret = 0; 2879 ret |= r; 2880 } 2881 } 2882 2883 return ret; 2884 } 2885 2886 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 2887 struct perf_hpp_list *list, 2888 int level) 2889 { 2890 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 2891 2892 if (hse == NULL) 2893 return -1; 2894 2895 perf_hpp_list__register_sort_field(list, &hse->hpp); 2896 return 0; 2897 } 2898 2899 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 2900 struct perf_hpp_list *list, 2901 int level) 2902 { 2903 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 2904 2905 if (hse == NULL) 2906 return -1; 2907 2908 perf_hpp_list__column_register(list, &hse->hpp); 2909 return 0; 2910 } 2911 2912 #ifndef HAVE_LIBTRACEEVENT 2913 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused) 2914 { 2915 return false; 2916 } 2917 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused, 2918 struct hists *hists __maybe_unused) 2919 { 2920 return false; 2921 } 2922 #else 2923 struct hpp_dynamic_entry { 2924 struct perf_hpp_fmt hpp; 2925 struct evsel *evsel; 2926 struct tep_format_field *field; 2927 unsigned dynamic_len; 2928 bool raw_trace; 2929 }; 2930 2931 static int hde_width(struct hpp_dynamic_entry *hde) 2932 { 2933 if (!hde->hpp.len) { 2934 int len = hde->dynamic_len; 2935 int namelen = strlen(hde->field->name); 2936 int fieldlen = hde->field->size; 2937 2938 if (namelen > len) 2939 len = namelen; 2940 2941 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) { 2942 /* length for print hex numbers */ 2943 fieldlen = hde->field->size * 2 + 2; 2944 } 2945 if (fieldlen > len) 2946 len = fieldlen; 2947 2948 hde->hpp.len = len; 2949 } 2950 return hde->hpp.len; 2951 } 2952 2953 static void update_dynamic_len(struct hpp_dynamic_entry *hde, 2954 struct hist_entry *he) 2955 { 2956 char *str, *pos; 2957 struct tep_format_field *field = hde->field; 2958 size_t namelen; 2959 bool last = false; 2960 2961 if (hde->raw_trace) 2962 return; 2963 2964 /* parse pretty print result and update max length */ 2965 if (!he->trace_output) 2966 he->trace_output = get_trace_output(he); 2967 2968 namelen = strlen(field->name); 2969 str = he->trace_output; 2970 2971 while (str) { 2972 pos = strchr(str, ' '); 2973 if (pos == NULL) { 2974 last = true; 2975 pos = str + strlen(str); 2976 } 2977 2978 if (!strncmp(str, field->name, namelen)) { 2979 size_t len; 2980 2981 str += namelen + 1; 2982 len = pos - str; 2983 2984 if (len > hde->dynamic_len) 2985 hde->dynamic_len = len; 2986 break; 2987 } 2988 2989 if (last) 2990 str = NULL; 2991 else 2992 str = pos + 1; 2993 } 2994 } 2995 2996 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2997 struct hists *hists __maybe_unused, 2998 int line __maybe_unused, 2999 int *span __maybe_unused) 3000 { 3001 struct hpp_dynamic_entry *hde; 3002 size_t len = fmt->user_len; 3003 3004 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 3005 3006 if (!len) 3007 len = hde_width(hde); 3008 3009 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 3010 } 3011 3012 static int __sort__hde_width(struct perf_hpp_fmt *fmt, 3013 struct perf_hpp *hpp __maybe_unused, 3014 struct hists *hists __maybe_unused) 3015 { 3016 struct hpp_dynamic_entry *hde; 3017 size_t len = fmt->user_len; 3018 3019 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 3020 3021 if (!len) 3022 len = hde_width(hde); 3023 3024 return len; 3025 } 3026 3027 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 3028 { 3029 struct hpp_dynamic_entry *hde; 3030 3031 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 3032 3033 return hists_to_evsel(hists) == hde->evsel; 3034 } 3035 3036 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 3037 struct hist_entry *he) 3038 { 3039 struct hpp_dynamic_entry *hde; 3040 size_t len = fmt->user_len; 3041 char *str, *pos; 3042 struct tep_format_field *field; 3043 size_t namelen; 3044 bool last = false; 3045 int ret; 3046 3047 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 3048 3049 if (!len) 3050 len = hde_width(hde); 3051 3052 if (hde->raw_trace) 3053 goto raw_field; 3054 3055 if (!he->trace_output) 3056 he->trace_output = get_trace_output(he); 3057 3058 field = hde->field; 3059 namelen = strlen(field->name); 3060 str = he->trace_output; 3061 3062 while (str) { 3063 pos = strchr(str, ' '); 3064 if (pos == NULL) { 3065 last = true; 3066 pos = str + strlen(str); 3067 } 3068 3069 if (!strncmp(str, field->name, namelen)) { 3070 str += namelen + 1; 3071 str = strndup(str, pos - str); 3072 3073 if (str == NULL) 3074 return scnprintf(hpp->buf, hpp->size, 3075 "%*.*s", len, len, "ERROR"); 3076 break; 3077 } 3078 3079 if (last) 3080 str = NULL; 3081 else 3082 str = pos + 1; 3083 } 3084 3085 if (str == NULL) { 3086 struct trace_seq seq; 3087 raw_field: 3088 trace_seq_init(&seq); 3089 tep_print_field(&seq, he->raw_data, hde->field); 3090 str = seq.buffer; 3091 } 3092 3093 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 3094 free(str); 3095 return ret; 3096 } 3097 3098 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 3099 struct hist_entry *a, struct hist_entry *b) 3100 { 3101 struct hpp_dynamic_entry *hde; 3102 struct tep_format_field *field; 3103 unsigned offset, size; 3104 3105 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 3106 3107 field = hde->field; 3108 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 3109 unsigned long long dyn; 3110 3111 tep_read_number_field(field, a->raw_data, &dyn); 3112 offset = dyn & 0xffff; 3113 size = (dyn >> 16) & 0xffff; 3114 if (tep_field_is_relative(field->flags)) 3115 offset += field->offset + field->size; 3116 /* record max width for output */ 3117 if (size > hde->dynamic_len) 3118 hde->dynamic_len = size; 3119 } else { 3120 offset = field->offset; 3121 size = field->size; 3122 } 3123 3124 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 3125 } 3126 3127 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 3128 { 3129 return fmt->cmp == __sort__hde_cmp; 3130 } 3131 3132 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 3133 { 3134 struct hpp_dynamic_entry *hde_a; 3135 struct hpp_dynamic_entry *hde_b; 3136 3137 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 3138 return false; 3139 3140 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 3141 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 3142 3143 return hde_a->field == hde_b->field; 3144 } 3145 3146 static void hde_free(struct perf_hpp_fmt *fmt) 3147 { 3148 struct hpp_dynamic_entry *hde; 3149 3150 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 3151 free(hde); 3152 } 3153 3154 static void __sort__hde_init(struct perf_hpp_fmt *fmt, struct hist_entry *he) 3155 { 3156 struct hpp_dynamic_entry *hde; 3157 3158 if (!perf_hpp__is_dynamic_entry(fmt)) 3159 return; 3160 3161 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 3162 update_dynamic_len(hde, he); 3163 } 3164 3165 static struct hpp_dynamic_entry * 3166 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field, 3167 int level) 3168 { 3169 struct hpp_dynamic_entry *hde; 3170 3171 hde = malloc(sizeof(*hde)); 3172 if (hde == NULL) { 3173 pr_debug("Memory allocation failed\n"); 3174 return NULL; 3175 } 3176 3177 hde->evsel = evsel; 3178 hde->field = field; 3179 hde->dynamic_len = 0; 3180 3181 hde->hpp.name = field->name; 3182 hde->hpp.header = __sort__hde_header; 3183 hde->hpp.width = __sort__hde_width; 3184 hde->hpp.entry = __sort__hde_entry; 3185 hde->hpp.color = NULL; 3186 3187 hde->hpp.init = __sort__hde_init; 3188 hde->hpp.cmp = __sort__hde_cmp; 3189 hde->hpp.collapse = __sort__hde_cmp; 3190 hde->hpp.sort = __sort__hde_cmp; 3191 hde->hpp.equal = __sort__hde_equal; 3192 hde->hpp.free = hde_free; 3193 3194 INIT_LIST_HEAD(&hde->hpp.list); 3195 INIT_LIST_HEAD(&hde->hpp.sort_list); 3196 hde->hpp.elide = false; 3197 hde->hpp.len = 0; 3198 hde->hpp.user_len = 0; 3199 hde->hpp.level = level; 3200 3201 return hde; 3202 } 3203 #endif /* HAVE_LIBTRACEEVENT */ 3204 3205 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 3206 { 3207 struct perf_hpp_fmt *new_fmt = NULL; 3208 3209 if (perf_hpp__is_sort_entry(fmt)) { 3210 struct hpp_sort_entry *hse, *new_hse; 3211 3212 hse = container_of(fmt, struct hpp_sort_entry, hpp); 3213 new_hse = memdup(hse, sizeof(*hse)); 3214 if (new_hse) 3215 new_fmt = &new_hse->hpp; 3216 #ifdef HAVE_LIBTRACEEVENT 3217 } else if (perf_hpp__is_dynamic_entry(fmt)) { 3218 struct hpp_dynamic_entry *hde, *new_hde; 3219 3220 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 3221 new_hde = memdup(hde, sizeof(*hde)); 3222 if (new_hde) 3223 new_fmt = &new_hde->hpp; 3224 #endif 3225 } else { 3226 new_fmt = memdup(fmt, sizeof(*fmt)); 3227 } 3228 3229 INIT_LIST_HEAD(&new_fmt->list); 3230 INIT_LIST_HEAD(&new_fmt->sort_list); 3231 3232 return new_fmt; 3233 } 3234 3235 static int parse_field_name(char *str, char **event, char **field, char **opt) 3236 { 3237 char *event_name, *field_name, *opt_name; 3238 3239 event_name = str; 3240 field_name = strchr(str, '.'); 3241 3242 if (field_name) { 3243 *field_name++ = '\0'; 3244 } else { 3245 event_name = NULL; 3246 field_name = str; 3247 } 3248 3249 opt_name = strchr(field_name, '/'); 3250 if (opt_name) 3251 *opt_name++ = '\0'; 3252 3253 *event = event_name; 3254 *field = field_name; 3255 *opt = opt_name; 3256 3257 return 0; 3258 } 3259 3260 /* find match evsel using a given event name. The event name can be: 3261 * 1. '%' + event index (e.g. '%1' for first event) 3262 * 2. full event name (e.g. sched:sched_switch) 3263 * 3. partial event name (should not contain ':') 3264 */ 3265 static struct evsel *find_evsel(struct evlist *evlist, char *event_name) 3266 { 3267 struct evsel *evsel = NULL; 3268 struct evsel *pos; 3269 bool full_name; 3270 3271 /* case 1 */ 3272 if (event_name[0] == '%') { 3273 int nr = strtol(event_name+1, NULL, 0); 3274 3275 if (nr > evlist->core.nr_entries) 3276 return NULL; 3277 3278 evsel = evlist__first(evlist); 3279 while (--nr > 0) 3280 evsel = evsel__next(evsel); 3281 3282 return evsel; 3283 } 3284 3285 full_name = !!strchr(event_name, ':'); 3286 evlist__for_each_entry(evlist, pos) { 3287 /* case 2 */ 3288 if (full_name && evsel__name_is(pos, event_name)) 3289 return pos; 3290 /* case 3 */ 3291 if (!full_name && strstr(pos->name, event_name)) { 3292 if (evsel) { 3293 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 3294 event_name, evsel->name, pos->name); 3295 return NULL; 3296 } 3297 evsel = pos; 3298 } 3299 } 3300 3301 return evsel; 3302 } 3303 3304 #ifdef HAVE_LIBTRACEEVENT 3305 static int __dynamic_dimension__add(struct evsel *evsel, 3306 struct tep_format_field *field, 3307 bool raw_trace, int level) 3308 { 3309 struct hpp_dynamic_entry *hde; 3310 3311 hde = __alloc_dynamic_entry(evsel, field, level); 3312 if (hde == NULL) 3313 return -ENOMEM; 3314 3315 hde->raw_trace = raw_trace; 3316 3317 perf_hpp__register_sort_field(&hde->hpp); 3318 return 0; 3319 } 3320 3321 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level) 3322 { 3323 int ret; 3324 struct tep_event *tp_format = evsel__tp_format(evsel); 3325 struct tep_format_field *field = tp_format ? tp_format->format.fields : NULL; 3326 while (field) { 3327 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 3328 if (ret < 0) 3329 return ret; 3330 3331 field = field->next; 3332 } 3333 return 0; 3334 } 3335 3336 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace, 3337 int level) 3338 { 3339 int ret; 3340 struct evsel *evsel; 3341 3342 evlist__for_each_entry(evlist, evsel) { 3343 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 3344 continue; 3345 3346 ret = add_evsel_fields(evsel, raw_trace, level); 3347 if (ret < 0) 3348 return ret; 3349 } 3350 return 0; 3351 } 3352 3353 static int add_all_matching_fields(struct evlist *evlist, 3354 char *field_name, bool raw_trace, int level) 3355 { 3356 int ret = -ESRCH; 3357 struct evsel *evsel; 3358 3359 evlist__for_each_entry(evlist, evsel) { 3360 struct tep_event *tp_format; 3361 struct tep_format_field *field; 3362 3363 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 3364 continue; 3365 3366 tp_format = evsel__tp_format(evsel); 3367 if (tp_format == NULL) 3368 continue; 3369 3370 field = tep_find_any_field(tp_format, field_name); 3371 if (field == NULL) 3372 continue; 3373 3374 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 3375 if (ret < 0) 3376 break; 3377 } 3378 return ret; 3379 } 3380 #endif /* HAVE_LIBTRACEEVENT */ 3381 3382 static int add_dynamic_entry(struct evlist *evlist, const char *tok, 3383 int level) 3384 { 3385 char *str, *event_name, *field_name, *opt_name; 3386 struct evsel *evsel; 3387 bool raw_trace = symbol_conf.raw_trace; 3388 int ret = 0; 3389 3390 if (evlist == NULL) 3391 return -ENOENT; 3392 3393 str = strdup(tok); 3394 if (str == NULL) 3395 return -ENOMEM; 3396 3397 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 3398 ret = -EINVAL; 3399 goto out; 3400 } 3401 3402 if (opt_name) { 3403 if (strcmp(opt_name, "raw")) { 3404 pr_debug("unsupported field option %s\n", opt_name); 3405 ret = -EINVAL; 3406 goto out; 3407 } 3408 raw_trace = true; 3409 } 3410 3411 #ifdef HAVE_LIBTRACEEVENT 3412 if (!strcmp(field_name, "trace_fields")) { 3413 ret = add_all_dynamic_fields(evlist, raw_trace, level); 3414 goto out; 3415 } 3416 3417 if (event_name == NULL) { 3418 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 3419 goto out; 3420 } 3421 #else 3422 evlist__for_each_entry(evlist, evsel) { 3423 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) { 3424 pr_err("%s %s", ret ? "," : "This perf binary isn't linked with libtraceevent, can't process", evsel__name(evsel)); 3425 ret = -ENOTSUP; 3426 } 3427 } 3428 3429 if (ret) { 3430 pr_err("\n"); 3431 goto out; 3432 } 3433 #endif 3434 3435 evsel = find_evsel(evlist, event_name); 3436 if (evsel == NULL) { 3437 pr_debug("Cannot find event: %s\n", event_name); 3438 ret = -ENOENT; 3439 goto out; 3440 } 3441 3442 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 3443 pr_debug("%s is not a tracepoint event\n", event_name); 3444 ret = -EINVAL; 3445 goto out; 3446 } 3447 3448 #ifdef HAVE_LIBTRACEEVENT 3449 if (!strcmp(field_name, "*")) { 3450 ret = add_evsel_fields(evsel, raw_trace, level); 3451 } else { 3452 struct tep_event *tp_format = evsel__tp_format(evsel); 3453 struct tep_format_field *field = 3454 tp_format ? tep_find_any_field(tp_format, field_name) : NULL; 3455 3456 if (field == NULL) { 3457 pr_debug("Cannot find event field for %s.%s\n", 3458 event_name, field_name); 3459 return -ENOENT; 3460 } 3461 3462 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 3463 } 3464 #else 3465 (void)level; 3466 (void)raw_trace; 3467 #endif /* HAVE_LIBTRACEEVENT */ 3468 3469 out: 3470 free(str); 3471 return ret; 3472 } 3473 3474 static int __sort_dimension__add(struct sort_dimension *sd, 3475 struct perf_hpp_list *list, 3476 int level) 3477 { 3478 if (sd->taken) 3479 return 0; 3480 3481 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 3482 return -1; 3483 3484 if (sd->entry->se_collapse) 3485 list->need_collapse = 1; 3486 3487 sd->taken = 1; 3488 3489 return 0; 3490 } 3491 3492 static int __hpp_dimension__add(struct hpp_dimension *hd, 3493 struct perf_hpp_list *list, 3494 int level) 3495 { 3496 struct perf_hpp_fmt *fmt; 3497 3498 if (hd->taken) 3499 return 0; 3500 3501 fmt = __hpp_dimension__alloc_hpp(hd, level); 3502 if (!fmt) 3503 return -1; 3504 3505 hd->taken = 1; 3506 hd->was_taken = 1; 3507 perf_hpp_list__register_sort_field(list, fmt); 3508 return 0; 3509 } 3510 3511 static int __sort_dimension__add_output(struct perf_hpp_list *list, 3512 struct sort_dimension *sd, 3513 int level) 3514 { 3515 if (sd->taken) 3516 return 0; 3517 3518 if (__sort_dimension__add_hpp_output(sd, list, level) < 0) 3519 return -1; 3520 3521 sd->taken = 1; 3522 return 0; 3523 } 3524 3525 static int __hpp_dimension__add_output(struct perf_hpp_list *list, 3526 struct hpp_dimension *hd, 3527 int level) 3528 { 3529 struct perf_hpp_fmt *fmt; 3530 3531 if (hd->taken) 3532 return 0; 3533 3534 fmt = __hpp_dimension__alloc_hpp(hd, level); 3535 if (!fmt) 3536 return -1; 3537 3538 hd->taken = 1; 3539 perf_hpp_list__column_register(list, fmt); 3540 return 0; 3541 } 3542 3543 int hpp_dimension__add_output(unsigned col, bool implicit) 3544 { 3545 struct hpp_dimension *hd; 3546 3547 BUG_ON(col >= PERF_HPP__MAX_INDEX); 3548 hd = &hpp_sort_dimensions[col]; 3549 if (implicit && !hd->was_taken) 3550 return 0; 3551 return __hpp_dimension__add_output(&perf_hpp_list, hd, /*level=*/0); 3552 } 3553 3554 int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 3555 struct evlist *evlist, 3556 int level) 3557 { 3558 unsigned int i, j; 3559 3560 /* 3561 * Check to see if there are any arch specific 3562 * sort dimensions not applicable for the current 3563 * architecture. If so, Skip that sort key since 3564 * we don't want to display it in the output fields. 3565 */ 3566 for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) { 3567 if (!strcmp(arch_specific_sort_keys[j], tok) && 3568 !arch_support_sort_key(tok)) { 3569 return 0; 3570 } 3571 } 3572 3573 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 3574 struct sort_dimension *sd = &common_sort_dimensions[i]; 3575 3576 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3577 continue; 3578 3579 for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) { 3580 if (sd->name && !strcmp(dynamic_headers[j], sd->name)) 3581 sort_dimension_add_dynamic_header(sd); 3582 } 3583 3584 if (sd->entry == &sort_parent && parent_pattern) { 3585 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 3586 if (ret) { 3587 char err[BUFSIZ]; 3588 3589 regerror(ret, &parent_regex, err, sizeof(err)); 3590 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 3591 return -EINVAL; 3592 } 3593 list->parent = 1; 3594 } else if (sd->entry == &sort_sym) { 3595 list->sym = 1; 3596 /* 3597 * perf diff displays the performance difference amongst 3598 * two or more perf.data files. Those files could come 3599 * from different binaries. So we should not compare 3600 * their ips, but the name of symbol. 3601 */ 3602 if (sort__mode == SORT_MODE__DIFF) 3603 sd->entry->se_collapse = sort__sym_sort; 3604 3605 } else if (sd->entry == &sort_dso) { 3606 list->dso = 1; 3607 } else if (sd->entry == &sort_socket) { 3608 list->socket = 1; 3609 } else if (sd->entry == &sort_thread) { 3610 list->thread = 1; 3611 } else if (sd->entry == &sort_comm) { 3612 list->comm = 1; 3613 } else if (sd->entry == &sort_type_offset) { 3614 symbol_conf.annotate_data_member = true; 3615 } 3616 3617 return __sort_dimension__add(sd, list, level); 3618 } 3619 3620 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 3621 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 3622 3623 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3624 continue; 3625 3626 if ((sort__mode != SORT_MODE__BRANCH) && 3627 strncasecmp(tok, "callchain_branch_predicted", 3628 strlen(tok)) && 3629 strncasecmp(tok, "callchain_branch_abort", 3630 strlen(tok)) && 3631 strncasecmp(tok, "callchain_branch_cycles", 3632 strlen(tok))) 3633 return -EINVAL; 3634 3635 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 3636 list->sym = 1; 3637 3638 __sort_dimension__add(sd, list, level); 3639 return 0; 3640 } 3641 3642 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 3643 struct sort_dimension *sd = &memory_sort_dimensions[i]; 3644 3645 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3646 continue; 3647 3648 if (sort__mode != SORT_MODE__MEMORY) 3649 return -EINVAL; 3650 3651 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0) 3652 return -EINVAL; 3653 3654 if (sd->entry == &sort_mem_daddr_sym) 3655 list->sym = 1; 3656 3657 __sort_dimension__add(sd, list, level); 3658 return 0; 3659 } 3660 3661 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 3662 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 3663 3664 if (strncasecmp(tok, hd->name, strlen(tok))) 3665 continue; 3666 3667 return __hpp_dimension__add(hd, list, level); 3668 } 3669 3670 if (!add_dynamic_entry(evlist, tok, level)) 3671 return 0; 3672 3673 return -ESRCH; 3674 } 3675 3676 /* This should match with sort_dimension__add() above */ 3677 static bool is_hpp_sort_key(const char *key) 3678 { 3679 unsigned i; 3680 3681 for (i = 0; i < ARRAY_SIZE(arch_specific_sort_keys); i++) { 3682 if (!strcmp(arch_specific_sort_keys[i], key) && 3683 !arch_support_sort_key(key)) { 3684 return false; 3685 } 3686 } 3687 3688 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 3689 struct sort_dimension *sd = &common_sort_dimensions[i]; 3690 3691 if (sd->name && !strncasecmp(key, sd->name, strlen(key))) 3692 return false; 3693 } 3694 3695 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 3696 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 3697 3698 if (!strncasecmp(key, hd->name, strlen(key))) 3699 return true; 3700 } 3701 return false; 3702 } 3703 3704 static int setup_sort_list(struct perf_hpp_list *list, char *str, 3705 struct evlist *evlist) 3706 { 3707 char *tmp, *tok; 3708 int ret = 0; 3709 int level = 0; 3710 int next_level = 1; 3711 int prev_level = 0; 3712 bool in_group = false; 3713 bool prev_was_hpp = false; 3714 3715 do { 3716 tok = str; 3717 tmp = strpbrk(str, "{}, "); 3718 if (tmp) { 3719 if (in_group) 3720 next_level = level; 3721 else 3722 next_level = level + 1; 3723 3724 if (*tmp == '{') 3725 in_group = true; 3726 else if (*tmp == '}') 3727 in_group = false; 3728 3729 *tmp = '\0'; 3730 str = tmp + 1; 3731 } 3732 3733 if (*tok) { 3734 if (is_hpp_sort_key(tok)) { 3735 /* keep output (hpp) sort keys in the same level */ 3736 if (prev_was_hpp) { 3737 bool next_same = (level == next_level); 3738 3739 level = prev_level; 3740 next_level = next_same ? level : level+1; 3741 } 3742 prev_was_hpp = true; 3743 } else { 3744 prev_was_hpp = false; 3745 } 3746 3747 ret = sort_dimension__add(list, tok, evlist, level); 3748 if (ret == -EINVAL) { 3749 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok))) 3750 ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 3751 else 3752 ui__error("Invalid --sort key: `%s'", tok); 3753 break; 3754 } else if (ret == -ESRCH) { 3755 ui__error("Unknown --sort key: `%s'", tok); 3756 break; 3757 } 3758 prev_level = level; 3759 } 3760 3761 level = next_level; 3762 } while (tmp); 3763 3764 return ret; 3765 } 3766 3767 static const char *get_default_sort_order(struct evlist *evlist) 3768 { 3769 const char *default_sort_orders[] = { 3770 default_sort_order, 3771 default_branch_sort_order, 3772 default_mem_sort_order, 3773 default_top_sort_order, 3774 default_diff_sort_order, 3775 default_tracepoint_sort_order, 3776 }; 3777 bool use_trace = true; 3778 struct evsel *evsel; 3779 3780 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 3781 3782 if (evlist == NULL || evlist__empty(evlist)) 3783 goto out_no_evlist; 3784 3785 evlist__for_each_entry(evlist, evsel) { 3786 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 3787 use_trace = false; 3788 break; 3789 } 3790 } 3791 3792 if (use_trace) { 3793 sort__mode = SORT_MODE__TRACEPOINT; 3794 if (symbol_conf.raw_trace) 3795 return "trace_fields"; 3796 } 3797 out_no_evlist: 3798 return default_sort_orders[sort__mode]; 3799 } 3800 3801 static int setup_sort_order(struct evlist *evlist) 3802 { 3803 char *new_sort_order; 3804 3805 /* 3806 * Append '+'-prefixed sort order to the default sort 3807 * order string. 3808 */ 3809 if (!sort_order || is_strict_order(sort_order)) 3810 return 0; 3811 3812 if (sort_order[1] == '\0') { 3813 ui__error("Invalid --sort key: `+'"); 3814 return -EINVAL; 3815 } 3816 3817 /* 3818 * We allocate new sort_order string, but we never free it, 3819 * because it's checked over the rest of the code. 3820 */ 3821 if (asprintf(&new_sort_order, "%s,%s", 3822 get_default_sort_order(evlist), sort_order + 1) < 0) { 3823 pr_err("Not enough memory to set up --sort"); 3824 return -ENOMEM; 3825 } 3826 3827 sort_order = new_sort_order; 3828 return 0; 3829 } 3830 3831 /* 3832 * Adds 'pre,' prefix into 'str' is 'pre' is 3833 * not already part of 'str'. 3834 */ 3835 static char *prefix_if_not_in(const char *pre, char *str) 3836 { 3837 char *n; 3838 3839 if (!str || strstr(str, pre)) 3840 return str; 3841 3842 if (asprintf(&n, "%s,%s", pre, str) < 0) 3843 n = NULL; 3844 3845 free(str); 3846 return n; 3847 } 3848 3849 static char *setup_overhead(char *keys) 3850 { 3851 if (sort__mode == SORT_MODE__DIFF) 3852 return keys; 3853 3854 if (symbol_conf.prefer_latency) { 3855 keys = prefix_if_not_in("overhead", keys); 3856 keys = prefix_if_not_in("latency", keys); 3857 if (symbol_conf.cumulate_callchain) { 3858 keys = prefix_if_not_in("overhead_children", keys); 3859 keys = prefix_if_not_in("latency_children", keys); 3860 } 3861 } else if (!keys || (!strstr(keys, "overhead") && 3862 !strstr(keys, "latency"))) { 3863 if (symbol_conf.enable_latency) 3864 keys = prefix_if_not_in("latency", keys); 3865 keys = prefix_if_not_in("overhead", keys); 3866 if (symbol_conf.cumulate_callchain) { 3867 if (symbol_conf.enable_latency) 3868 keys = prefix_if_not_in("latency_children", keys); 3869 keys = prefix_if_not_in("overhead_children", keys); 3870 } 3871 } 3872 3873 return keys; 3874 } 3875 3876 static int __setup_sorting(struct evlist *evlist) 3877 { 3878 char *str; 3879 const char *sort_keys; 3880 int ret = 0; 3881 3882 ret = setup_sort_order(evlist); 3883 if (ret) 3884 return ret; 3885 3886 sort_keys = sort_order; 3887 if (sort_keys == NULL) { 3888 if (is_strict_order(field_order)) { 3889 /* 3890 * If user specified field order but no sort order, 3891 * we'll honor it and not add default sort orders. 3892 */ 3893 return 0; 3894 } 3895 3896 sort_keys = get_default_sort_order(evlist); 3897 } 3898 3899 str = strdup(sort_keys); 3900 if (str == NULL) { 3901 pr_err("Not enough memory to setup sort keys"); 3902 return -ENOMEM; 3903 } 3904 3905 /* 3906 * Prepend overhead fields for backward compatibility. 3907 */ 3908 if (!is_strict_order(field_order)) { 3909 str = setup_overhead(str); 3910 if (str == NULL) { 3911 pr_err("Not enough memory to setup overhead keys"); 3912 return -ENOMEM; 3913 } 3914 } 3915 3916 ret = setup_sort_list(&perf_hpp_list, str, evlist); 3917 3918 free(str); 3919 return ret; 3920 } 3921 3922 void perf_hpp__set_elide(int idx, bool elide) 3923 { 3924 struct perf_hpp_fmt *fmt; 3925 struct hpp_sort_entry *hse; 3926 3927 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3928 if (!perf_hpp__is_sort_entry(fmt)) 3929 continue; 3930 3931 hse = container_of(fmt, struct hpp_sort_entry, hpp); 3932 if (hse->se->se_width_idx == idx) { 3933 fmt->elide = elide; 3934 break; 3935 } 3936 } 3937 } 3938 3939 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 3940 { 3941 if (list && strlist__nr_entries(list) == 1) { 3942 if (fp != NULL) 3943 fprintf(fp, "# %s: %s\n", list_name, 3944 strlist__entry(list, 0)->s); 3945 return true; 3946 } 3947 return false; 3948 } 3949 3950 static bool get_elide(int idx, FILE *output) 3951 { 3952 switch (idx) { 3953 case HISTC_SYMBOL: 3954 return __get_elide(symbol_conf.sym_list, "symbol", output); 3955 case HISTC_DSO: 3956 return __get_elide(symbol_conf.dso_list, "dso", output); 3957 case HISTC_COMM: 3958 return __get_elide(symbol_conf.comm_list, "comm", output); 3959 default: 3960 break; 3961 } 3962 3963 if (sort__mode != SORT_MODE__BRANCH) 3964 return false; 3965 3966 switch (idx) { 3967 case HISTC_SYMBOL_FROM: 3968 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 3969 case HISTC_SYMBOL_TO: 3970 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 3971 case HISTC_DSO_FROM: 3972 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 3973 case HISTC_DSO_TO: 3974 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 3975 case HISTC_ADDR_FROM: 3976 return __get_elide(symbol_conf.sym_from_list, "addr_from", output); 3977 case HISTC_ADDR_TO: 3978 return __get_elide(symbol_conf.sym_to_list, "addr_to", output); 3979 default: 3980 break; 3981 } 3982 3983 return false; 3984 } 3985 3986 void sort__setup_elide(FILE *output) 3987 { 3988 struct perf_hpp_fmt *fmt; 3989 struct hpp_sort_entry *hse; 3990 3991 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3992 if (!perf_hpp__is_sort_entry(fmt)) 3993 continue; 3994 3995 hse = container_of(fmt, struct hpp_sort_entry, hpp); 3996 fmt->elide = get_elide(hse->se->se_width_idx, output); 3997 } 3998 3999 /* 4000 * It makes no sense to elide all of sort entries. 4001 * Just revert them to show up again. 4002 */ 4003 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 4004 if (!perf_hpp__is_sort_entry(fmt)) 4005 continue; 4006 4007 if (!fmt->elide) 4008 return; 4009 } 4010 4011 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 4012 if (!perf_hpp__is_sort_entry(fmt)) 4013 continue; 4014 4015 fmt->elide = false; 4016 } 4017 } 4018 4019 int output_field_add(struct perf_hpp_list *list, const char *tok, int *level) 4020 { 4021 unsigned int i; 4022 4023 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 4024 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 4025 4026 if (strncasecmp(tok, hd->name, strlen(tok))) 4027 continue; 4028 4029 if (!strcasecmp(tok, "weight")) 4030 ui__warning("--fields weight shows the average value unlike in the --sort key.\n"); 4031 4032 if (hd->mem_mode && sort__mode != SORT_MODE__MEMORY) 4033 continue; 4034 4035 return __hpp_dimension__add_output(list, hd, *level); 4036 } 4037 4038 /* 4039 * A non-output field will increase level so that it can be in a 4040 * different hierarchy. 4041 */ 4042 (*level)++; 4043 4044 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 4045 struct sort_dimension *sd = &common_sort_dimensions[i]; 4046 4047 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 4048 continue; 4049 4050 return __sort_dimension__add_output(list, sd, *level); 4051 } 4052 4053 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 4054 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 4055 4056 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 4057 continue; 4058 4059 if (sort__mode != SORT_MODE__BRANCH) 4060 return -EINVAL; 4061 4062 return __sort_dimension__add_output(list, sd, *level); 4063 } 4064 4065 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 4066 struct sort_dimension *sd = &memory_sort_dimensions[i]; 4067 4068 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 4069 continue; 4070 4071 if (sort__mode != SORT_MODE__MEMORY) 4072 return -EINVAL; 4073 4074 return __sort_dimension__add_output(list, sd, *level); 4075 } 4076 4077 return -ESRCH; 4078 } 4079 4080 static int setup_output_list(struct perf_hpp_list *list, char *str) 4081 { 4082 char *tmp, *tok; 4083 int ret = 0; 4084 int level = 0; 4085 4086 for (tok = strtok_r(str, ", ", &tmp); 4087 tok; tok = strtok_r(NULL, ", ", &tmp)) { 4088 ret = output_field_add(list, tok, &level); 4089 if (ret == -EINVAL) { 4090 ui__error("Invalid --fields key: `%s'", tok); 4091 break; 4092 } else if (ret == -ESRCH) { 4093 ui__error("Unknown --fields key: `%s'", tok); 4094 break; 4095 } 4096 } 4097 4098 return ret; 4099 } 4100 4101 void reset_dimensions(void) 4102 { 4103 unsigned int i; 4104 4105 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 4106 common_sort_dimensions[i].taken = 0; 4107 4108 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 4109 hpp_sort_dimensions[i].taken = 0; 4110 4111 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 4112 bstack_sort_dimensions[i].taken = 0; 4113 4114 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 4115 memory_sort_dimensions[i].taken = 0; 4116 } 4117 4118 bool is_strict_order(const char *order) 4119 { 4120 return order && (*order != '+'); 4121 } 4122 4123 static int __setup_output_field(void) 4124 { 4125 char *str, *strp; 4126 int ret = -EINVAL; 4127 4128 if (field_order == NULL) 4129 return 0; 4130 4131 strp = str = strdup(field_order); 4132 if (str == NULL) { 4133 pr_err("Not enough memory to setup output fields"); 4134 return -ENOMEM; 4135 } 4136 4137 if (!is_strict_order(field_order)) 4138 strp++; 4139 4140 if (!strlen(strp)) { 4141 ui__error("Invalid --fields key: `+'"); 4142 goto out; 4143 } 4144 4145 ret = setup_output_list(&perf_hpp_list, strp); 4146 4147 out: 4148 free(str); 4149 return ret; 4150 } 4151 4152 int setup_sorting(struct evlist *evlist) 4153 { 4154 int err; 4155 4156 err = __setup_sorting(evlist); 4157 if (err < 0) 4158 return err; 4159 4160 if (parent_pattern != default_parent_pattern) { 4161 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 4162 if (err < 0) 4163 return err; 4164 } 4165 4166 reset_dimensions(); 4167 4168 /* 4169 * perf diff doesn't use default hpp output fields. 4170 */ 4171 if (sort__mode != SORT_MODE__DIFF) 4172 perf_hpp__init(); 4173 4174 err = __setup_output_field(); 4175 if (err < 0) 4176 return err; 4177 4178 err = perf_hpp__alloc_mem_stats(&perf_hpp_list, evlist); 4179 if (err < 0) 4180 return err; 4181 4182 /* copy sort keys to output fields */ 4183 perf_hpp__setup_output_field(&perf_hpp_list); 4184 /* and then copy output fields to sort keys */ 4185 perf_hpp__append_sort_keys(&perf_hpp_list); 4186 4187 /* setup hists-specific output fields */ 4188 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 4189 return -1; 4190 4191 return 0; 4192 } 4193 4194 void reset_output_field(void) 4195 { 4196 perf_hpp_list.need_collapse = 0; 4197 perf_hpp_list.parent = 0; 4198 perf_hpp_list.sym = 0; 4199 perf_hpp_list.dso = 0; 4200 4201 field_order = NULL; 4202 sort_order = NULL; 4203 4204 reset_dimensions(); 4205 perf_hpp__reset_output_field(&perf_hpp_list); 4206 } 4207 4208 #define INDENT (3*8 + 1) 4209 4210 static void add_key(struct strbuf *sb, const char *str, int *llen) 4211 { 4212 if (!str) 4213 return; 4214 4215 if (*llen >= 75) { 4216 strbuf_addstr(sb, "\n\t\t\t "); 4217 *llen = INDENT; 4218 } 4219 strbuf_addf(sb, " %s", str); 4220 *llen += strlen(str) + 1; 4221 } 4222 4223 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n, 4224 int *llen) 4225 { 4226 int i; 4227 4228 for (i = 0; i < n; i++) 4229 add_key(sb, s[i].name, llen); 4230 } 4231 4232 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n, 4233 int *llen) 4234 { 4235 int i; 4236 4237 for (i = 0; i < n; i++) 4238 add_key(sb, s[i].name, llen); 4239 } 4240 4241 char *sort_help(const char *prefix, enum sort_mode mode) 4242 { 4243 struct strbuf sb; 4244 char *s; 4245 int len = strlen(prefix) + INDENT; 4246 4247 strbuf_init(&sb, 300); 4248 strbuf_addstr(&sb, prefix); 4249 add_hpp_sort_string(&sb, hpp_sort_dimensions, 4250 ARRAY_SIZE(hpp_sort_dimensions), &len); 4251 add_sort_string(&sb, common_sort_dimensions, 4252 ARRAY_SIZE(common_sort_dimensions), &len); 4253 if (mode == SORT_MODE__NORMAL || mode == SORT_MODE__BRANCH) 4254 add_sort_string(&sb, bstack_sort_dimensions, 4255 ARRAY_SIZE(bstack_sort_dimensions), &len); 4256 if (mode == SORT_MODE__NORMAL || mode == SORT_MODE__MEMORY) 4257 add_sort_string(&sb, memory_sort_dimensions, 4258 ARRAY_SIZE(memory_sort_dimensions), &len); 4259 s = strbuf_detach(&sb, NULL); 4260 strbuf_release(&sb); 4261 return s; 4262 } 4263