1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <regex.h> 5 #include <stdlib.h> 6 #include <linux/mman.h> 7 #include <linux/time64.h> 8 #include "debug.h" 9 #include "dso.h" 10 #include "sort.h" 11 #include "hist.h" 12 #include "cacheline.h" 13 #include "comm.h" 14 #include "map.h" 15 #include "maps.h" 16 #include "symbol.h" 17 #include "map_symbol.h" 18 #include "branch.h" 19 #include "thread.h" 20 #include "evsel.h" 21 #include "evlist.h" 22 #include "srcline.h" 23 #include "strlist.h" 24 #include "strbuf.h" 25 #include "mem-events.h" 26 #include "mem-info.h" 27 #include "annotate.h" 28 #include "annotate-data.h" 29 #include "event.h" 30 #include "time-utils.h" 31 #include "cgroup.h" 32 #include "machine.h" 33 #include "trace-event.h" 34 #include <linux/kernel.h> 35 #include <linux/string.h> 36 37 #ifdef HAVE_LIBTRACEEVENT 38 #include <traceevent/event-parse.h> 39 #endif 40 41 regex_t parent_regex; 42 const char default_parent_pattern[] = "^sys_|^do_page_fault"; 43 const char *parent_pattern = default_parent_pattern; 44 const char *default_sort_order = "comm,dso,symbol"; 45 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 46 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc"; 47 const char default_top_sort_order[] = "dso,symbol"; 48 const char default_diff_sort_order[] = "dso,symbol"; 49 const char default_tracepoint_sort_order[] = "trace"; 50 const char *sort_order; 51 const char *field_order; 52 regex_t ignore_callees_regex; 53 int have_ignore_callees = 0; 54 enum sort_mode sort__mode = SORT_MODE__NORMAL; 55 static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"}; 56 static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"}; 57 58 /* 59 * Some architectures have Adjacent Cacheline Prefetch feature, which 60 * behaves like the cacheline size is doubled. Enable this flag to 61 * check things in double cacheline granularity. 62 */ 63 bool chk_double_cl; 64 65 /* 66 * Replaces all occurrences of a char used with the: 67 * 68 * -t, --field-separator 69 * 70 * option, that uses a special separator character and don't pad with spaces, 71 * replacing all occurrences of this separator in symbol names (and other 72 * output) with a '.' character, that thus it's the only non valid separator. 73 */ 74 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 75 { 76 int n; 77 va_list ap; 78 79 va_start(ap, fmt); 80 n = vsnprintf(bf, size, fmt, ap); 81 if (symbol_conf.field_sep && n > 0) { 82 char *sep = bf; 83 84 while (1) { 85 sep = strchr(sep, *symbol_conf.field_sep); 86 if (sep == NULL) 87 break; 88 *sep = '.'; 89 } 90 } 91 va_end(ap); 92 93 if (n >= (int)size) 94 return size - 1; 95 return n; 96 } 97 98 static int64_t cmp_null(const void *l, const void *r) 99 { 100 if (!l && !r) 101 return 0; 102 else if (!l) 103 return -1; 104 else 105 return 1; 106 } 107 108 /* --sort pid */ 109 110 static int64_t 111 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 112 { 113 return thread__tid(right->thread) - thread__tid(left->thread); 114 } 115 116 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 117 size_t size, unsigned int width) 118 { 119 const char *comm = thread__comm_str(he->thread); 120 121 width = max(7U, width) - 8; 122 return repsep_snprintf(bf, size, "%7d:%-*.*s", thread__tid(he->thread), 123 width, width, comm ?: ""); 124 } 125 126 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 127 { 128 const struct thread *th = arg; 129 130 if (type != HIST_FILTER__THREAD) 131 return -1; 132 133 return th && !RC_CHK_EQUAL(he->thread, th); 134 } 135 136 struct sort_entry sort_thread = { 137 .se_header = " Pid:Command", 138 .se_cmp = sort__thread_cmp, 139 .se_snprintf = hist_entry__thread_snprintf, 140 .se_filter = hist_entry__thread_filter, 141 .se_width_idx = HISTC_THREAD, 142 }; 143 144 /* --sort simd */ 145 146 static int64_t 147 sort__simd_cmp(struct hist_entry *left, struct hist_entry *right) 148 { 149 if (left->simd_flags.arch != right->simd_flags.arch) 150 return (int64_t) left->simd_flags.arch - right->simd_flags.arch; 151 152 return (int64_t) left->simd_flags.pred - right->simd_flags.pred; 153 } 154 155 static const char *hist_entry__get_simd_name(struct simd_flags *simd_flags) 156 { 157 u64 arch = simd_flags->arch; 158 159 if (arch & SIMD_OP_FLAGS_ARCH_SVE) 160 return "SVE"; 161 else 162 return "n/a"; 163 } 164 165 static int hist_entry__simd_snprintf(struct hist_entry *he, char *bf, 166 size_t size, unsigned int width __maybe_unused) 167 { 168 const char *name; 169 170 if (!he->simd_flags.arch) 171 return repsep_snprintf(bf, size, ""); 172 173 name = hist_entry__get_simd_name(&he->simd_flags); 174 175 if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_EMPTY) 176 return repsep_snprintf(bf, size, "[e] %s", name); 177 else if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_PARTIAL) 178 return repsep_snprintf(bf, size, "[p] %s", name); 179 180 return repsep_snprintf(bf, size, "[.] %s", name); 181 } 182 183 struct sort_entry sort_simd = { 184 .se_header = "Simd ", 185 .se_cmp = sort__simd_cmp, 186 .se_snprintf = hist_entry__simd_snprintf, 187 .se_width_idx = HISTC_SIMD, 188 }; 189 190 /* --sort comm */ 191 192 /* 193 * We can't use pointer comparison in functions below, 194 * because it gives different results based on pointer 195 * values, which could break some sorting assumptions. 196 */ 197 static int64_t 198 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 199 { 200 return strcmp(comm__str(right->comm), comm__str(left->comm)); 201 } 202 203 static int64_t 204 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 205 { 206 return strcmp(comm__str(right->comm), comm__str(left->comm)); 207 } 208 209 static int64_t 210 sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 211 { 212 return strcmp(comm__str(right->comm), comm__str(left->comm)); 213 } 214 215 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 216 size_t size, unsigned int width) 217 { 218 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 219 } 220 221 struct sort_entry sort_comm = { 222 .se_header = "Command", 223 .se_cmp = sort__comm_cmp, 224 .se_collapse = sort__comm_collapse, 225 .se_sort = sort__comm_sort, 226 .se_snprintf = hist_entry__comm_snprintf, 227 .se_filter = hist_entry__thread_filter, 228 .se_width_idx = HISTC_COMM, 229 }; 230 231 /* --sort dso */ 232 233 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 234 { 235 struct dso *dso_l = map_l ? map__dso(map_l) : NULL; 236 struct dso *dso_r = map_r ? map__dso(map_r) : NULL; 237 const char *dso_name_l, *dso_name_r; 238 239 if (!dso_l || !dso_r) 240 return cmp_null(dso_r, dso_l); 241 242 if (verbose > 0) { 243 dso_name_l = dso__long_name(dso_l); 244 dso_name_r = dso__long_name(dso_r); 245 } else { 246 dso_name_l = dso__short_name(dso_l); 247 dso_name_r = dso__short_name(dso_r); 248 } 249 250 return strcmp(dso_name_l, dso_name_r); 251 } 252 253 static int64_t 254 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 255 { 256 return _sort__dso_cmp(right->ms.map, left->ms.map); 257 } 258 259 static int _hist_entry__dso_snprintf(struct map *map, char *bf, 260 size_t size, unsigned int width) 261 { 262 const struct dso *dso = map ? map__dso(map) : NULL; 263 const char *dso_name = "[unknown]"; 264 265 if (dso) 266 dso_name = verbose > 0 ? dso__long_name(dso) : dso__short_name(dso); 267 268 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 269 } 270 271 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 272 size_t size, unsigned int width) 273 { 274 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 275 } 276 277 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 278 { 279 const struct dso *dso = arg; 280 281 if (type != HIST_FILTER__DSO) 282 return -1; 283 284 return dso && (!he->ms.map || map__dso(he->ms.map) != dso); 285 } 286 287 struct sort_entry sort_dso = { 288 .se_header = "Shared Object", 289 .se_cmp = sort__dso_cmp, 290 .se_snprintf = hist_entry__dso_snprintf, 291 .se_filter = hist_entry__dso_filter, 292 .se_width_idx = HISTC_DSO, 293 }; 294 295 /* --sort symbol */ 296 297 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 298 { 299 return (int64_t)(right_ip - left_ip); 300 } 301 302 int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 303 { 304 if (!sym_l || !sym_r) 305 return cmp_null(sym_l, sym_r); 306 307 if (sym_l == sym_r) 308 return 0; 309 310 if (sym_l->inlined || sym_r->inlined) { 311 int ret = strcmp(sym_l->name, sym_r->name); 312 313 if (ret) 314 return ret; 315 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start)) 316 return 0; 317 } 318 319 if (sym_l->start != sym_r->start) 320 return (int64_t)(sym_r->start - sym_l->start); 321 322 return (int64_t)(sym_r->end - sym_l->end); 323 } 324 325 static int64_t 326 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 327 { 328 int64_t ret; 329 330 if (!left->ms.sym && !right->ms.sym) 331 return _sort__addr_cmp(left->ip, right->ip); 332 333 /* 334 * comparing symbol address alone is not enough since it's a 335 * relative address within a dso. 336 */ 337 if (!hists__has(left->hists, dso)) { 338 ret = sort__dso_cmp(left, right); 339 if (ret != 0) 340 return ret; 341 } 342 343 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 344 } 345 346 static int64_t 347 sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 348 { 349 if (!left->ms.sym || !right->ms.sym) 350 return cmp_null(left->ms.sym, right->ms.sym); 351 352 return strcmp(right->ms.sym->name, left->ms.sym->name); 353 } 354 355 static int _hist_entry__sym_snprintf(struct map_symbol *ms, 356 u64 ip, char level, char *bf, size_t size, 357 unsigned int width) 358 { 359 struct symbol *sym = ms->sym; 360 struct map *map = ms->map; 361 size_t ret = 0; 362 363 if (verbose > 0) { 364 struct dso *dso = map ? map__dso(map) : NULL; 365 char o = dso ? dso__symtab_origin(dso) : '!'; 366 u64 rip = ip; 367 368 if (dso && dso__kernel(dso) && dso__adjust_symbols(dso)) 369 rip = map__unmap_ip(map, ip); 370 371 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 372 BITS_PER_LONG / 4 + 2, rip, o); 373 } 374 375 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 376 if (sym && map) { 377 if (sym->type == STT_OBJECT) { 378 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 379 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 380 ip - map__unmap_ip(map, sym->start)); 381 } else { 382 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 383 width - ret, 384 sym->name); 385 if (sym->inlined) 386 ret += repsep_snprintf(bf + ret, size - ret, 387 " (inlined)"); 388 } 389 } else { 390 size_t len = BITS_PER_LONG / 4; 391 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 392 len, ip); 393 } 394 395 return ret; 396 } 397 398 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) 399 { 400 return _hist_entry__sym_snprintf(&he->ms, he->ip, 401 he->level, bf, size, width); 402 } 403 404 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 405 { 406 const char *sym = arg; 407 408 if (type != HIST_FILTER__SYMBOL) 409 return -1; 410 411 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 412 } 413 414 struct sort_entry sort_sym = { 415 .se_header = "Symbol", 416 .se_cmp = sort__sym_cmp, 417 .se_sort = sort__sym_sort, 418 .se_snprintf = hist_entry__sym_snprintf, 419 .se_filter = hist_entry__sym_filter, 420 .se_width_idx = HISTC_SYMBOL, 421 }; 422 423 /* --sort symoff */ 424 425 static int64_t 426 sort__symoff_cmp(struct hist_entry *left, struct hist_entry *right) 427 { 428 int64_t ret; 429 430 ret = sort__sym_cmp(left, right); 431 if (ret) 432 return ret; 433 434 return left->ip - right->ip; 435 } 436 437 static int64_t 438 sort__symoff_sort(struct hist_entry *left, struct hist_entry *right) 439 { 440 int64_t ret; 441 442 ret = sort__sym_sort(left, right); 443 if (ret) 444 return ret; 445 446 return left->ip - right->ip; 447 } 448 449 static int 450 hist_entry__symoff_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) 451 { 452 struct symbol *sym = he->ms.sym; 453 454 if (sym == NULL) 455 return repsep_snprintf(bf, size, "[%c] %-#.*llx", he->level, width - 4, he->ip); 456 457 return repsep_snprintf(bf, size, "[%c] %s+0x%llx", he->level, sym->name, he->ip - sym->start); 458 } 459 460 struct sort_entry sort_sym_offset = { 461 .se_header = "Symbol Offset", 462 .se_cmp = sort__symoff_cmp, 463 .se_sort = sort__symoff_sort, 464 .se_snprintf = hist_entry__symoff_snprintf, 465 .se_filter = hist_entry__sym_filter, 466 .se_width_idx = HISTC_SYMBOL_OFFSET, 467 }; 468 469 /* --sort srcline */ 470 471 char *hist_entry__srcline(struct hist_entry *he) 472 { 473 return map__srcline(he->ms.map, he->ip, he->ms.sym); 474 } 475 476 static int64_t 477 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 478 { 479 int64_t ret; 480 481 ret = _sort__addr_cmp(left->ip, right->ip); 482 if (ret) 483 return ret; 484 485 return sort__dso_cmp(left, right); 486 } 487 488 static int64_t 489 sort__srcline_collapse(struct hist_entry *left, struct hist_entry *right) 490 { 491 if (!left->srcline) 492 left->srcline = hist_entry__srcline(left); 493 if (!right->srcline) 494 right->srcline = hist_entry__srcline(right); 495 496 return strcmp(right->srcline, left->srcline); 497 } 498 499 static int64_t 500 sort__srcline_sort(struct hist_entry *left, struct hist_entry *right) 501 { 502 return sort__srcline_collapse(left, right); 503 } 504 505 static void 506 sort__srcline_init(struct hist_entry *he) 507 { 508 if (!he->srcline) 509 he->srcline = hist_entry__srcline(he); 510 } 511 512 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 513 size_t size, unsigned int width) 514 { 515 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 516 } 517 518 struct sort_entry sort_srcline = { 519 .se_header = "Source:Line", 520 .se_cmp = sort__srcline_cmp, 521 .se_collapse = sort__srcline_collapse, 522 .se_sort = sort__srcline_sort, 523 .se_init = sort__srcline_init, 524 .se_snprintf = hist_entry__srcline_snprintf, 525 .se_width_idx = HISTC_SRCLINE, 526 }; 527 528 /* --sort srcline_from */ 529 530 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams) 531 { 532 return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym); 533 } 534 535 static int64_t 536 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 537 { 538 return left->branch_info->from.addr - right->branch_info->from.addr; 539 } 540 541 static int64_t 542 sort__srcline_from_collapse(struct hist_entry *left, struct hist_entry *right) 543 { 544 if (!left->branch_info->srcline_from) 545 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from); 546 547 if (!right->branch_info->srcline_from) 548 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from); 549 550 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 551 } 552 553 static int64_t 554 sort__srcline_from_sort(struct hist_entry *left, struct hist_entry *right) 555 { 556 return sort__srcline_from_collapse(left, right); 557 } 558 559 static void sort__srcline_from_init(struct hist_entry *he) 560 { 561 if (!he->branch_info->srcline_from) 562 he->branch_info->srcline_from = addr_map_symbol__srcline(&he->branch_info->from); 563 } 564 565 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 566 size_t size, unsigned int width) 567 { 568 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 569 } 570 571 struct sort_entry sort_srcline_from = { 572 .se_header = "From Source:Line", 573 .se_cmp = sort__srcline_from_cmp, 574 .se_collapse = sort__srcline_from_collapse, 575 .se_sort = sort__srcline_from_sort, 576 .se_init = sort__srcline_from_init, 577 .se_snprintf = hist_entry__srcline_from_snprintf, 578 .se_width_idx = HISTC_SRCLINE_FROM, 579 }; 580 581 /* --sort srcline_to */ 582 583 static int64_t 584 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 585 { 586 return left->branch_info->to.addr - right->branch_info->to.addr; 587 } 588 589 static int64_t 590 sort__srcline_to_collapse(struct hist_entry *left, struct hist_entry *right) 591 { 592 if (!left->branch_info->srcline_to) 593 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to); 594 595 if (!right->branch_info->srcline_to) 596 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to); 597 598 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 599 } 600 601 static int64_t 602 sort__srcline_to_sort(struct hist_entry *left, struct hist_entry *right) 603 { 604 return sort__srcline_to_collapse(left, right); 605 } 606 607 static void sort__srcline_to_init(struct hist_entry *he) 608 { 609 if (!he->branch_info->srcline_to) 610 he->branch_info->srcline_to = addr_map_symbol__srcline(&he->branch_info->to); 611 } 612 613 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 614 size_t size, unsigned int width) 615 { 616 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 617 } 618 619 struct sort_entry sort_srcline_to = { 620 .se_header = "To Source:Line", 621 .se_cmp = sort__srcline_to_cmp, 622 .se_collapse = sort__srcline_to_collapse, 623 .se_sort = sort__srcline_to_sort, 624 .se_init = sort__srcline_to_init, 625 .se_snprintf = hist_entry__srcline_to_snprintf, 626 .se_width_idx = HISTC_SRCLINE_TO, 627 }; 628 629 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf, 630 size_t size, unsigned int width) 631 { 632 633 struct symbol *sym = he->ms.sym; 634 struct annotated_branch *branch; 635 double ipc = 0.0, coverage = 0.0; 636 char tmp[64]; 637 638 if (!sym) 639 return repsep_snprintf(bf, size, "%-*s", width, "-"); 640 641 branch = symbol__annotation(sym)->branch; 642 643 if (branch && branch->hit_cycles) 644 ipc = branch->hit_insn / ((double)branch->hit_cycles); 645 646 if (branch && branch->total_insn) { 647 coverage = branch->cover_insn * 100.0 / 648 ((double)branch->total_insn); 649 } 650 651 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage); 652 return repsep_snprintf(bf, size, "%-*s", width, tmp); 653 } 654 655 struct sort_entry sort_sym_ipc = { 656 .se_header = "IPC [IPC Coverage]", 657 .se_cmp = sort__sym_cmp, 658 .se_snprintf = hist_entry__sym_ipc_snprintf, 659 .se_width_idx = HISTC_SYMBOL_IPC, 660 }; 661 662 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he 663 __maybe_unused, 664 char *bf, size_t size, 665 unsigned int width) 666 { 667 char tmp[64]; 668 669 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-"); 670 return repsep_snprintf(bf, size, "%-*s", width, tmp); 671 } 672 673 struct sort_entry sort_sym_ipc_null = { 674 .se_header = "IPC [IPC Coverage]", 675 .se_cmp = sort__sym_cmp, 676 .se_snprintf = hist_entry__sym_ipc_null_snprintf, 677 .se_width_idx = HISTC_SYMBOL_IPC, 678 }; 679 680 /* --sort srcfile */ 681 682 static char no_srcfile[1]; 683 684 static char *hist_entry__get_srcfile(struct hist_entry *e) 685 { 686 char *sf, *p; 687 struct map *map = e->ms.map; 688 689 if (!map) 690 return no_srcfile; 691 692 sf = __get_srcline(map__dso(map), map__rip_2objdump(map, e->ip), 693 e->ms.sym, false, true, true, e->ip); 694 if (sf == SRCLINE_UNKNOWN) 695 return no_srcfile; 696 p = strchr(sf, ':'); 697 if (p && *sf) { 698 *p = 0; 699 return sf; 700 } 701 free(sf); 702 return no_srcfile; 703 } 704 705 static int64_t 706 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 707 { 708 return sort__srcline_cmp(left, right); 709 } 710 711 static int64_t 712 sort__srcfile_collapse(struct hist_entry *left, struct hist_entry *right) 713 { 714 if (!left->srcfile) 715 left->srcfile = hist_entry__get_srcfile(left); 716 if (!right->srcfile) 717 right->srcfile = hist_entry__get_srcfile(right); 718 719 return strcmp(right->srcfile, left->srcfile); 720 } 721 722 static int64_t 723 sort__srcfile_sort(struct hist_entry *left, struct hist_entry *right) 724 { 725 return sort__srcfile_collapse(left, right); 726 } 727 728 static void sort__srcfile_init(struct hist_entry *he) 729 { 730 if (!he->srcfile) 731 he->srcfile = hist_entry__get_srcfile(he); 732 } 733 734 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 735 size_t size, unsigned int width) 736 { 737 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 738 } 739 740 struct sort_entry sort_srcfile = { 741 .se_header = "Source File", 742 .se_cmp = sort__srcfile_cmp, 743 .se_collapse = sort__srcfile_collapse, 744 .se_sort = sort__srcfile_sort, 745 .se_init = sort__srcfile_init, 746 .se_snprintf = hist_entry__srcfile_snprintf, 747 .se_width_idx = HISTC_SRCFILE, 748 }; 749 750 /* --sort parent */ 751 752 static int64_t 753 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 754 { 755 struct symbol *sym_l = left->parent; 756 struct symbol *sym_r = right->parent; 757 758 if (!sym_l || !sym_r) 759 return cmp_null(sym_l, sym_r); 760 761 return strcmp(sym_r->name, sym_l->name); 762 } 763 764 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 765 size_t size, unsigned int width) 766 { 767 return repsep_snprintf(bf, size, "%-*.*s", width, width, 768 he->parent ? he->parent->name : "[other]"); 769 } 770 771 struct sort_entry sort_parent = { 772 .se_header = "Parent symbol", 773 .se_cmp = sort__parent_cmp, 774 .se_snprintf = hist_entry__parent_snprintf, 775 .se_width_idx = HISTC_PARENT, 776 }; 777 778 /* --sort cpu */ 779 780 static int64_t 781 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 782 { 783 return right->cpu - left->cpu; 784 } 785 786 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 787 size_t size, unsigned int width) 788 { 789 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 790 } 791 792 struct sort_entry sort_cpu = { 793 .se_header = "CPU", 794 .se_cmp = sort__cpu_cmp, 795 .se_snprintf = hist_entry__cpu_snprintf, 796 .se_width_idx = HISTC_CPU, 797 }; 798 799 /* --sort cgroup_id */ 800 801 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev) 802 { 803 return (int64_t)(right_dev - left_dev); 804 } 805 806 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino) 807 { 808 return (int64_t)(right_ino - left_ino); 809 } 810 811 static int64_t 812 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right) 813 { 814 int64_t ret; 815 816 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev); 817 if (ret != 0) 818 return ret; 819 820 return _sort__cgroup_inode_cmp(right->cgroup_id.ino, 821 left->cgroup_id.ino); 822 } 823 824 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he, 825 char *bf, size_t size, 826 unsigned int width __maybe_unused) 827 { 828 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev, 829 he->cgroup_id.ino); 830 } 831 832 struct sort_entry sort_cgroup_id = { 833 .se_header = "cgroup id (dev/inode)", 834 .se_cmp = sort__cgroup_id_cmp, 835 .se_snprintf = hist_entry__cgroup_id_snprintf, 836 .se_width_idx = HISTC_CGROUP_ID, 837 }; 838 839 /* --sort cgroup */ 840 841 static int64_t 842 sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right) 843 { 844 return right->cgroup - left->cgroup; 845 } 846 847 static int hist_entry__cgroup_snprintf(struct hist_entry *he, 848 char *bf, size_t size, 849 unsigned int width __maybe_unused) 850 { 851 const char *cgrp_name = "N/A"; 852 853 if (he->cgroup) { 854 struct cgroup *cgrp = cgroup__find(maps__machine(he->ms.maps)->env, 855 he->cgroup); 856 if (cgrp != NULL) 857 cgrp_name = cgrp->name; 858 else 859 cgrp_name = "unknown"; 860 } 861 862 return repsep_snprintf(bf, size, "%s", cgrp_name); 863 } 864 865 struct sort_entry sort_cgroup = { 866 .se_header = "Cgroup", 867 .se_cmp = sort__cgroup_cmp, 868 .se_snprintf = hist_entry__cgroup_snprintf, 869 .se_width_idx = HISTC_CGROUP, 870 }; 871 872 /* --sort socket */ 873 874 static int64_t 875 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 876 { 877 return right->socket - left->socket; 878 } 879 880 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 881 size_t size, unsigned int width) 882 { 883 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 884 } 885 886 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 887 { 888 int sk = *(const int *)arg; 889 890 if (type != HIST_FILTER__SOCKET) 891 return -1; 892 893 return sk >= 0 && he->socket != sk; 894 } 895 896 struct sort_entry sort_socket = { 897 .se_header = "Socket", 898 .se_cmp = sort__socket_cmp, 899 .se_snprintf = hist_entry__socket_snprintf, 900 .se_filter = hist_entry__socket_filter, 901 .se_width_idx = HISTC_SOCKET, 902 }; 903 904 /* --sort time */ 905 906 static int64_t 907 sort__time_cmp(struct hist_entry *left, struct hist_entry *right) 908 { 909 return right->time - left->time; 910 } 911 912 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf, 913 size_t size, unsigned int width) 914 { 915 char he_time[32]; 916 917 if (symbol_conf.nanosecs) 918 timestamp__scnprintf_nsec(he->time, he_time, 919 sizeof(he_time)); 920 else 921 timestamp__scnprintf_usec(he->time, he_time, 922 sizeof(he_time)); 923 924 return repsep_snprintf(bf, size, "%-.*s", width, he_time); 925 } 926 927 struct sort_entry sort_time = { 928 .se_header = "Time", 929 .se_cmp = sort__time_cmp, 930 .se_snprintf = hist_entry__time_snprintf, 931 .se_width_idx = HISTC_TIME, 932 }; 933 934 /* --sort trace */ 935 936 #ifdef HAVE_LIBTRACEEVENT 937 static char *get_trace_output(struct hist_entry *he) 938 { 939 struct trace_seq seq; 940 struct evsel *evsel; 941 struct tep_record rec = { 942 .data = he->raw_data, 943 .size = he->raw_size, 944 }; 945 946 evsel = hists_to_evsel(he->hists); 947 948 trace_seq_init(&seq); 949 if (symbol_conf.raw_trace) { 950 tep_print_fields(&seq, he->raw_data, he->raw_size, 951 evsel->tp_format); 952 } else { 953 tep_print_event(evsel->tp_format->tep, 954 &seq, &rec, "%s", TEP_PRINT_INFO); 955 } 956 /* 957 * Trim the buffer, it starts at 4KB and we're not going to 958 * add anything more to this buffer. 959 */ 960 return realloc(seq.buffer, seq.len + 1); 961 } 962 963 static int64_t 964 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 965 { 966 struct evsel *evsel; 967 968 evsel = hists_to_evsel(left->hists); 969 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 970 return 0; 971 972 if (left->trace_output == NULL) 973 left->trace_output = get_trace_output(left); 974 if (right->trace_output == NULL) 975 right->trace_output = get_trace_output(right); 976 977 return strcmp(right->trace_output, left->trace_output); 978 } 979 980 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 981 size_t size, unsigned int width) 982 { 983 struct evsel *evsel; 984 985 evsel = hists_to_evsel(he->hists); 986 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 987 return scnprintf(bf, size, "%-.*s", width, "N/A"); 988 989 if (he->trace_output == NULL) 990 he->trace_output = get_trace_output(he); 991 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 992 } 993 994 struct sort_entry sort_trace = { 995 .se_header = "Trace output", 996 .se_cmp = sort__trace_cmp, 997 .se_snprintf = hist_entry__trace_snprintf, 998 .se_width_idx = HISTC_TRACE, 999 }; 1000 #endif /* HAVE_LIBTRACEEVENT */ 1001 1002 /* sort keys for branch stacks */ 1003 1004 static int64_t 1005 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 1006 { 1007 if (!left->branch_info || !right->branch_info) 1008 return cmp_null(left->branch_info, right->branch_info); 1009 1010 return _sort__dso_cmp(left->branch_info->from.ms.map, 1011 right->branch_info->from.ms.map); 1012 } 1013 1014 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 1015 size_t size, unsigned int width) 1016 { 1017 if (he->branch_info) 1018 return _hist_entry__dso_snprintf(he->branch_info->from.ms.map, 1019 bf, size, width); 1020 else 1021 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1022 } 1023 1024 static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 1025 const void *arg) 1026 { 1027 const struct dso *dso = arg; 1028 1029 if (type != HIST_FILTER__DSO) 1030 return -1; 1031 1032 return dso && (!he->branch_info || !he->branch_info->from.ms.map || 1033 map__dso(he->branch_info->from.ms.map) != dso); 1034 } 1035 1036 static int64_t 1037 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 1038 { 1039 if (!left->branch_info || !right->branch_info) 1040 return cmp_null(left->branch_info, right->branch_info); 1041 1042 return _sort__dso_cmp(left->branch_info->to.ms.map, 1043 right->branch_info->to.ms.map); 1044 } 1045 1046 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 1047 size_t size, unsigned int width) 1048 { 1049 if (he->branch_info) 1050 return _hist_entry__dso_snprintf(he->branch_info->to.ms.map, 1051 bf, size, width); 1052 else 1053 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1054 } 1055 1056 static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 1057 const void *arg) 1058 { 1059 const struct dso *dso = arg; 1060 1061 if (type != HIST_FILTER__DSO) 1062 return -1; 1063 1064 return dso && (!he->branch_info || !he->branch_info->to.ms.map || 1065 map__dso(he->branch_info->to.ms.map) != dso); 1066 } 1067 1068 static int64_t 1069 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 1070 { 1071 struct addr_map_symbol *from_l, *from_r; 1072 1073 if (!left->branch_info || !right->branch_info) 1074 return cmp_null(left->branch_info, right->branch_info); 1075 1076 from_l = &left->branch_info->from; 1077 from_r = &right->branch_info->from; 1078 1079 if (!from_l->ms.sym && !from_r->ms.sym) 1080 return _sort__addr_cmp(from_l->addr, from_r->addr); 1081 1082 return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym); 1083 } 1084 1085 static int64_t 1086 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 1087 { 1088 struct addr_map_symbol *to_l, *to_r; 1089 1090 if (!left->branch_info || !right->branch_info) 1091 return cmp_null(left->branch_info, right->branch_info); 1092 1093 to_l = &left->branch_info->to; 1094 to_r = &right->branch_info->to; 1095 1096 if (!to_l->ms.sym && !to_r->ms.sym) 1097 return _sort__addr_cmp(to_l->addr, to_r->addr); 1098 1099 return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym); 1100 } 1101 1102 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 1103 size_t size, unsigned int width) 1104 { 1105 if (he->branch_info) { 1106 struct addr_map_symbol *from = &he->branch_info->from; 1107 1108 return _hist_entry__sym_snprintf(&from->ms, from->al_addr, 1109 from->al_level, bf, size, width); 1110 } 1111 1112 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1113 } 1114 1115 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 1116 size_t size, unsigned int width) 1117 { 1118 if (he->branch_info) { 1119 struct addr_map_symbol *to = &he->branch_info->to; 1120 1121 return _hist_entry__sym_snprintf(&to->ms, to->al_addr, 1122 to->al_level, bf, size, width); 1123 } 1124 1125 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1126 } 1127 1128 static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 1129 const void *arg) 1130 { 1131 const char *sym = arg; 1132 1133 if (type != HIST_FILTER__SYMBOL) 1134 return -1; 1135 1136 return sym && !(he->branch_info && he->branch_info->from.ms.sym && 1137 strstr(he->branch_info->from.ms.sym->name, sym)); 1138 } 1139 1140 static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 1141 const void *arg) 1142 { 1143 const char *sym = arg; 1144 1145 if (type != HIST_FILTER__SYMBOL) 1146 return -1; 1147 1148 return sym && !(he->branch_info && he->branch_info->to.ms.sym && 1149 strstr(he->branch_info->to.ms.sym->name, sym)); 1150 } 1151 1152 struct sort_entry sort_dso_from = { 1153 .se_header = "Source Shared Object", 1154 .se_cmp = sort__dso_from_cmp, 1155 .se_snprintf = hist_entry__dso_from_snprintf, 1156 .se_filter = hist_entry__dso_from_filter, 1157 .se_width_idx = HISTC_DSO_FROM, 1158 }; 1159 1160 struct sort_entry sort_dso_to = { 1161 .se_header = "Target Shared Object", 1162 .se_cmp = sort__dso_to_cmp, 1163 .se_snprintf = hist_entry__dso_to_snprintf, 1164 .se_filter = hist_entry__dso_to_filter, 1165 .se_width_idx = HISTC_DSO_TO, 1166 }; 1167 1168 struct sort_entry sort_sym_from = { 1169 .se_header = "Source Symbol", 1170 .se_cmp = sort__sym_from_cmp, 1171 .se_snprintf = hist_entry__sym_from_snprintf, 1172 .se_filter = hist_entry__sym_from_filter, 1173 .se_width_idx = HISTC_SYMBOL_FROM, 1174 }; 1175 1176 struct sort_entry sort_sym_to = { 1177 .se_header = "Target Symbol", 1178 .se_cmp = sort__sym_to_cmp, 1179 .se_snprintf = hist_entry__sym_to_snprintf, 1180 .se_filter = hist_entry__sym_to_filter, 1181 .se_width_idx = HISTC_SYMBOL_TO, 1182 }; 1183 1184 static int _hist_entry__addr_snprintf(struct map_symbol *ms, 1185 u64 ip, char level, char *bf, size_t size, 1186 unsigned int width) 1187 { 1188 struct symbol *sym = ms->sym; 1189 struct map *map = ms->map; 1190 size_t ret = 0, offs; 1191 1192 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 1193 if (sym && map) { 1194 if (sym->type == STT_OBJECT) { 1195 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 1196 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 1197 ip - map__unmap_ip(map, sym->start)); 1198 } else { 1199 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 1200 width - ret, 1201 sym->name); 1202 offs = ip - sym->start; 1203 if (offs) 1204 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", offs); 1205 } 1206 } else { 1207 size_t len = BITS_PER_LONG / 4; 1208 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 1209 len, ip); 1210 } 1211 1212 return ret; 1213 } 1214 1215 static int hist_entry__addr_from_snprintf(struct hist_entry *he, char *bf, 1216 size_t size, unsigned int width) 1217 { 1218 if (he->branch_info) { 1219 struct addr_map_symbol *from = &he->branch_info->from; 1220 1221 return _hist_entry__addr_snprintf(&from->ms, from->al_addr, 1222 he->level, bf, size, width); 1223 } 1224 1225 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1226 } 1227 1228 static int hist_entry__addr_to_snprintf(struct hist_entry *he, char *bf, 1229 size_t size, unsigned int width) 1230 { 1231 if (he->branch_info) { 1232 struct addr_map_symbol *to = &he->branch_info->to; 1233 1234 return _hist_entry__addr_snprintf(&to->ms, to->al_addr, 1235 he->level, bf, size, width); 1236 } 1237 1238 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1239 } 1240 1241 static int64_t 1242 sort__addr_from_cmp(struct hist_entry *left, struct hist_entry *right) 1243 { 1244 struct addr_map_symbol *from_l; 1245 struct addr_map_symbol *from_r; 1246 int64_t ret; 1247 1248 if (!left->branch_info || !right->branch_info) 1249 return cmp_null(left->branch_info, right->branch_info); 1250 1251 from_l = &left->branch_info->from; 1252 from_r = &right->branch_info->from; 1253 1254 /* 1255 * comparing symbol address alone is not enough since it's a 1256 * relative address within a dso. 1257 */ 1258 ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map); 1259 if (ret != 0) 1260 return ret; 1261 1262 return _sort__addr_cmp(from_l->addr, from_r->addr); 1263 } 1264 1265 static int64_t 1266 sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right) 1267 { 1268 struct addr_map_symbol *to_l; 1269 struct addr_map_symbol *to_r; 1270 int64_t ret; 1271 1272 if (!left->branch_info || !right->branch_info) 1273 return cmp_null(left->branch_info, right->branch_info); 1274 1275 to_l = &left->branch_info->to; 1276 to_r = &right->branch_info->to; 1277 1278 /* 1279 * comparing symbol address alone is not enough since it's a 1280 * relative address within a dso. 1281 */ 1282 ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map); 1283 if (ret != 0) 1284 return ret; 1285 1286 return _sort__addr_cmp(to_l->addr, to_r->addr); 1287 } 1288 1289 struct sort_entry sort_addr_from = { 1290 .se_header = "Source Address", 1291 .se_cmp = sort__addr_from_cmp, 1292 .se_snprintf = hist_entry__addr_from_snprintf, 1293 .se_filter = hist_entry__sym_from_filter, /* shared with sym_from */ 1294 .se_width_idx = HISTC_ADDR_FROM, 1295 }; 1296 1297 struct sort_entry sort_addr_to = { 1298 .se_header = "Target Address", 1299 .se_cmp = sort__addr_to_cmp, 1300 .se_snprintf = hist_entry__addr_to_snprintf, 1301 .se_filter = hist_entry__sym_to_filter, /* shared with sym_to */ 1302 .se_width_idx = HISTC_ADDR_TO, 1303 }; 1304 1305 1306 static int64_t 1307 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 1308 { 1309 unsigned char mp, p; 1310 1311 if (!left->branch_info || !right->branch_info) 1312 return cmp_null(left->branch_info, right->branch_info); 1313 1314 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 1315 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 1316 return mp || p; 1317 } 1318 1319 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 1320 size_t size, unsigned int width){ 1321 static const char *out = "N/A"; 1322 1323 if (he->branch_info) { 1324 if (he->branch_info->flags.predicted) 1325 out = "N"; 1326 else if (he->branch_info->flags.mispred) 1327 out = "Y"; 1328 } 1329 1330 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 1331 } 1332 1333 static int64_t 1334 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 1335 { 1336 if (!left->branch_info || !right->branch_info) 1337 return cmp_null(left->branch_info, right->branch_info); 1338 1339 return left->branch_info->flags.cycles - 1340 right->branch_info->flags.cycles; 1341 } 1342 1343 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 1344 size_t size, unsigned int width) 1345 { 1346 if (!he->branch_info) 1347 return scnprintf(bf, size, "%-.*s", width, "N/A"); 1348 if (he->branch_info->flags.cycles == 0) 1349 return repsep_snprintf(bf, size, "%-*s", width, "-"); 1350 return repsep_snprintf(bf, size, "%-*hd", width, 1351 he->branch_info->flags.cycles); 1352 } 1353 1354 struct sort_entry sort_cycles = { 1355 .se_header = "Basic Block Cycles", 1356 .se_cmp = sort__cycles_cmp, 1357 .se_snprintf = hist_entry__cycles_snprintf, 1358 .se_width_idx = HISTC_CYCLES, 1359 }; 1360 1361 /* --sort daddr_sym */ 1362 int64_t 1363 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1364 { 1365 uint64_t l = 0, r = 0; 1366 1367 if (left->mem_info) 1368 l = mem_info__daddr(left->mem_info)->addr; 1369 if (right->mem_info) 1370 r = mem_info__daddr(right->mem_info)->addr; 1371 1372 return (int64_t)(r - l); 1373 } 1374 1375 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 1376 size_t size, unsigned int width) 1377 { 1378 uint64_t addr = 0; 1379 struct map_symbol *ms = NULL; 1380 1381 if (he->mem_info) { 1382 addr = mem_info__daddr(he->mem_info)->addr; 1383 ms = &mem_info__daddr(he->mem_info)->ms; 1384 } 1385 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); 1386 } 1387 1388 int64_t 1389 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 1390 { 1391 uint64_t l = 0, r = 0; 1392 1393 if (left->mem_info) 1394 l = mem_info__iaddr(left->mem_info)->addr; 1395 if (right->mem_info) 1396 r = mem_info__iaddr(right->mem_info)->addr; 1397 1398 return (int64_t)(r - l); 1399 } 1400 1401 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 1402 size_t size, unsigned int width) 1403 { 1404 uint64_t addr = 0; 1405 struct map_symbol *ms = NULL; 1406 1407 if (he->mem_info) { 1408 addr = mem_info__iaddr(he->mem_info)->addr; 1409 ms = &mem_info__iaddr(he->mem_info)->ms; 1410 } 1411 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); 1412 } 1413 1414 static int64_t 1415 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1416 { 1417 struct map *map_l = NULL; 1418 struct map *map_r = NULL; 1419 1420 if (left->mem_info) 1421 map_l = mem_info__daddr(left->mem_info)->ms.map; 1422 if (right->mem_info) 1423 map_r = mem_info__daddr(right->mem_info)->ms.map; 1424 1425 return _sort__dso_cmp(map_l, map_r); 1426 } 1427 1428 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 1429 size_t size, unsigned int width) 1430 { 1431 struct map *map = NULL; 1432 1433 if (he->mem_info) 1434 map = mem_info__daddr(he->mem_info)->ms.map; 1435 1436 return _hist_entry__dso_snprintf(map, bf, size, width); 1437 } 1438 1439 static int64_t 1440 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 1441 { 1442 union perf_mem_data_src data_src_l; 1443 union perf_mem_data_src data_src_r; 1444 1445 if (left->mem_info) 1446 data_src_l = *mem_info__data_src(left->mem_info); 1447 else 1448 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 1449 1450 if (right->mem_info) 1451 data_src_r = *mem_info__data_src(right->mem_info); 1452 else 1453 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 1454 1455 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 1456 } 1457 1458 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 1459 size_t size, unsigned int width) 1460 { 1461 char out[10]; 1462 1463 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 1464 return repsep_snprintf(bf, size, "%.*s", width, out); 1465 } 1466 1467 static int64_t 1468 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 1469 { 1470 union perf_mem_data_src data_src_l; 1471 union perf_mem_data_src data_src_r; 1472 1473 if (left->mem_info) 1474 data_src_l = *mem_info__data_src(left->mem_info); 1475 else 1476 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 1477 1478 if (right->mem_info) 1479 data_src_r = *mem_info__data_src(right->mem_info); 1480 else 1481 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 1482 1483 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 1484 } 1485 1486 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1487 size_t size, unsigned int width) 1488 { 1489 char out[64]; 1490 1491 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1492 return repsep_snprintf(bf, size, "%-*s", width, out); 1493 } 1494 1495 static int64_t 1496 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1497 { 1498 union perf_mem_data_src data_src_l; 1499 union perf_mem_data_src data_src_r; 1500 1501 if (left->mem_info) 1502 data_src_l = *mem_info__data_src(left->mem_info); 1503 else 1504 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1505 1506 if (right->mem_info) 1507 data_src_r = *mem_info__data_src(right->mem_info); 1508 else 1509 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1510 1511 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1512 } 1513 1514 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1515 size_t size, unsigned int width) 1516 { 1517 char out[64]; 1518 1519 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1520 return repsep_snprintf(bf, size, "%-*s", width, out); 1521 } 1522 1523 static int64_t 1524 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1525 { 1526 union perf_mem_data_src data_src_l; 1527 union perf_mem_data_src data_src_r; 1528 1529 if (left->mem_info) 1530 data_src_l = *mem_info__data_src(left->mem_info); 1531 else 1532 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1533 1534 if (right->mem_info) 1535 data_src_r = *mem_info__data_src(right->mem_info); 1536 else 1537 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1538 1539 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1540 } 1541 1542 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1543 size_t size, unsigned int width) 1544 { 1545 char out[64]; 1546 1547 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1548 return repsep_snprintf(bf, size, "%-*s", width, out); 1549 } 1550 1551 int64_t 1552 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1553 { 1554 u64 l, r; 1555 struct map *l_map, *r_map; 1556 struct dso *l_dso, *r_dso; 1557 int rc; 1558 1559 if (!left->mem_info) return -1; 1560 if (!right->mem_info) return 1; 1561 1562 /* group event types together */ 1563 if (left->cpumode > right->cpumode) return -1; 1564 if (left->cpumode < right->cpumode) return 1; 1565 1566 l_map = mem_info__daddr(left->mem_info)->ms.map; 1567 r_map = mem_info__daddr(right->mem_info)->ms.map; 1568 1569 /* if both are NULL, jump to sort on al_addr instead */ 1570 if (!l_map && !r_map) 1571 goto addr; 1572 1573 if (!l_map) return -1; 1574 if (!r_map) return 1; 1575 1576 l_dso = map__dso(l_map); 1577 r_dso = map__dso(r_map); 1578 rc = dso__cmp_id(l_dso, r_dso); 1579 if (rc) 1580 return rc; 1581 /* 1582 * Addresses with no major/minor numbers are assumed to be 1583 * anonymous in userspace. Sort those on pid then address. 1584 * 1585 * The kernel and non-zero major/minor mapped areas are 1586 * assumed to be unity mapped. Sort those on address. 1587 */ 1588 1589 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1590 (!(map__flags(l_map) & MAP_SHARED)) && !dso__id(l_dso)->maj && !dso__id(l_dso)->min && 1591 !dso__id(l_dso)->ino && !dso__id(l_dso)->ino_generation) { 1592 /* userspace anonymous */ 1593 1594 if (thread__pid(left->thread) > thread__pid(right->thread)) 1595 return -1; 1596 if (thread__pid(left->thread) < thread__pid(right->thread)) 1597 return 1; 1598 } 1599 1600 addr: 1601 /* al_addr does all the right addr - start + offset calculations */ 1602 l = cl_address(mem_info__daddr(left->mem_info)->al_addr, chk_double_cl); 1603 r = cl_address(mem_info__daddr(right->mem_info)->al_addr, chk_double_cl); 1604 1605 if (l > r) return -1; 1606 if (l < r) return 1; 1607 1608 return 0; 1609 } 1610 1611 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1612 size_t size, unsigned int width) 1613 { 1614 1615 uint64_t addr = 0; 1616 struct map_symbol *ms = NULL; 1617 char level = he->level; 1618 1619 if (he->mem_info) { 1620 struct map *map = mem_info__daddr(he->mem_info)->ms.map; 1621 struct dso *dso = map ? map__dso(map) : NULL; 1622 1623 addr = cl_address(mem_info__daddr(he->mem_info)->al_addr, chk_double_cl); 1624 ms = &mem_info__daddr(he->mem_info)->ms; 1625 1626 /* print [s] for shared data mmaps */ 1627 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1628 map && !(map__prot(map) & PROT_EXEC) && 1629 (map__flags(map) & MAP_SHARED) && 1630 (dso__id(dso)->maj || dso__id(dso)->min || dso__id(dso)->ino || 1631 dso__id(dso)->ino_generation)) 1632 level = 's'; 1633 else if (!map) 1634 level = 'X'; 1635 } 1636 return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width); 1637 } 1638 1639 struct sort_entry sort_mispredict = { 1640 .se_header = "Branch Mispredicted", 1641 .se_cmp = sort__mispredict_cmp, 1642 .se_snprintf = hist_entry__mispredict_snprintf, 1643 .se_width_idx = HISTC_MISPREDICT, 1644 }; 1645 1646 static int64_t 1647 sort__weight_cmp(struct hist_entry *left, struct hist_entry *right) 1648 { 1649 return left->weight - right->weight; 1650 } 1651 1652 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1653 size_t size, unsigned int width) 1654 { 1655 return repsep_snprintf(bf, size, "%-*llu", width, he->weight); 1656 } 1657 1658 struct sort_entry sort_local_weight = { 1659 .se_header = "Local Weight", 1660 .se_cmp = sort__weight_cmp, 1661 .se_snprintf = hist_entry__local_weight_snprintf, 1662 .se_width_idx = HISTC_LOCAL_WEIGHT, 1663 }; 1664 1665 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1666 size_t size, unsigned int width) 1667 { 1668 return repsep_snprintf(bf, size, "%-*llu", width, 1669 he->weight * he->stat.nr_events); 1670 } 1671 1672 struct sort_entry sort_global_weight = { 1673 .se_header = "Weight", 1674 .se_cmp = sort__weight_cmp, 1675 .se_snprintf = hist_entry__global_weight_snprintf, 1676 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1677 }; 1678 1679 static int64_t 1680 sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right) 1681 { 1682 return left->ins_lat - right->ins_lat; 1683 } 1684 1685 static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf, 1686 size_t size, unsigned int width) 1687 { 1688 return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat); 1689 } 1690 1691 struct sort_entry sort_local_ins_lat = { 1692 .se_header = "Local INSTR Latency", 1693 .se_cmp = sort__ins_lat_cmp, 1694 .se_snprintf = hist_entry__local_ins_lat_snprintf, 1695 .se_width_idx = HISTC_LOCAL_INS_LAT, 1696 }; 1697 1698 static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf, 1699 size_t size, unsigned int width) 1700 { 1701 return repsep_snprintf(bf, size, "%-*u", width, 1702 he->ins_lat * he->stat.nr_events); 1703 } 1704 1705 struct sort_entry sort_global_ins_lat = { 1706 .se_header = "INSTR Latency", 1707 .se_cmp = sort__ins_lat_cmp, 1708 .se_snprintf = hist_entry__global_ins_lat_snprintf, 1709 .se_width_idx = HISTC_GLOBAL_INS_LAT, 1710 }; 1711 1712 static int64_t 1713 sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right) 1714 { 1715 return left->p_stage_cyc - right->p_stage_cyc; 1716 } 1717 1718 static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf, 1719 size_t size, unsigned int width) 1720 { 1721 return repsep_snprintf(bf, size, "%-*u", width, 1722 he->p_stage_cyc * he->stat.nr_events); 1723 } 1724 1725 1726 static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf, 1727 size_t size, unsigned int width) 1728 { 1729 return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc); 1730 } 1731 1732 struct sort_entry sort_local_p_stage_cyc = { 1733 .se_header = "Local Pipeline Stage Cycle", 1734 .se_cmp = sort__p_stage_cyc_cmp, 1735 .se_snprintf = hist_entry__p_stage_cyc_snprintf, 1736 .se_width_idx = HISTC_LOCAL_P_STAGE_CYC, 1737 }; 1738 1739 struct sort_entry sort_global_p_stage_cyc = { 1740 .se_header = "Pipeline Stage Cycle", 1741 .se_cmp = sort__p_stage_cyc_cmp, 1742 .se_snprintf = hist_entry__global_p_stage_cyc_snprintf, 1743 .se_width_idx = HISTC_GLOBAL_P_STAGE_CYC, 1744 }; 1745 1746 struct sort_entry sort_mem_daddr_sym = { 1747 .se_header = "Data Symbol", 1748 .se_cmp = sort__daddr_cmp, 1749 .se_snprintf = hist_entry__daddr_snprintf, 1750 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1751 }; 1752 1753 struct sort_entry sort_mem_iaddr_sym = { 1754 .se_header = "Code Symbol", 1755 .se_cmp = sort__iaddr_cmp, 1756 .se_snprintf = hist_entry__iaddr_snprintf, 1757 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1758 }; 1759 1760 struct sort_entry sort_mem_daddr_dso = { 1761 .se_header = "Data Object", 1762 .se_cmp = sort__dso_daddr_cmp, 1763 .se_snprintf = hist_entry__dso_daddr_snprintf, 1764 .se_width_idx = HISTC_MEM_DADDR_DSO, 1765 }; 1766 1767 struct sort_entry sort_mem_locked = { 1768 .se_header = "Locked", 1769 .se_cmp = sort__locked_cmp, 1770 .se_snprintf = hist_entry__locked_snprintf, 1771 .se_width_idx = HISTC_MEM_LOCKED, 1772 }; 1773 1774 struct sort_entry sort_mem_tlb = { 1775 .se_header = "TLB access", 1776 .se_cmp = sort__tlb_cmp, 1777 .se_snprintf = hist_entry__tlb_snprintf, 1778 .se_width_idx = HISTC_MEM_TLB, 1779 }; 1780 1781 struct sort_entry sort_mem_lvl = { 1782 .se_header = "Memory access", 1783 .se_cmp = sort__lvl_cmp, 1784 .se_snprintf = hist_entry__lvl_snprintf, 1785 .se_width_idx = HISTC_MEM_LVL, 1786 }; 1787 1788 struct sort_entry sort_mem_snoop = { 1789 .se_header = "Snoop", 1790 .se_cmp = sort__snoop_cmp, 1791 .se_snprintf = hist_entry__snoop_snprintf, 1792 .se_width_idx = HISTC_MEM_SNOOP, 1793 }; 1794 1795 struct sort_entry sort_mem_dcacheline = { 1796 .se_header = "Data Cacheline", 1797 .se_cmp = sort__dcacheline_cmp, 1798 .se_snprintf = hist_entry__dcacheline_snprintf, 1799 .se_width_idx = HISTC_MEM_DCACHELINE, 1800 }; 1801 1802 static int64_t 1803 sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right) 1804 { 1805 union perf_mem_data_src data_src_l; 1806 union perf_mem_data_src data_src_r; 1807 1808 if (left->mem_info) 1809 data_src_l = *mem_info__data_src(left->mem_info); 1810 else 1811 data_src_l.mem_blk = PERF_MEM_BLK_NA; 1812 1813 if (right->mem_info) 1814 data_src_r = *mem_info__data_src(right->mem_info); 1815 else 1816 data_src_r.mem_blk = PERF_MEM_BLK_NA; 1817 1818 return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk); 1819 } 1820 1821 static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf, 1822 size_t size, unsigned int width) 1823 { 1824 char out[16]; 1825 1826 perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info); 1827 return repsep_snprintf(bf, size, "%.*s", width, out); 1828 } 1829 1830 struct sort_entry sort_mem_blocked = { 1831 .se_header = "Blocked", 1832 .se_cmp = sort__blocked_cmp, 1833 .se_snprintf = hist_entry__blocked_snprintf, 1834 .se_width_idx = HISTC_MEM_BLOCKED, 1835 }; 1836 1837 static int64_t 1838 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1839 { 1840 uint64_t l = 0, r = 0; 1841 1842 if (left->mem_info) 1843 l = mem_info__daddr(left->mem_info)->phys_addr; 1844 if (right->mem_info) 1845 r = mem_info__daddr(right->mem_info)->phys_addr; 1846 1847 return (int64_t)(r - l); 1848 } 1849 1850 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf, 1851 size_t size, unsigned int width) 1852 { 1853 uint64_t addr = 0; 1854 size_t ret = 0; 1855 size_t len = BITS_PER_LONG / 4; 1856 1857 addr = mem_info__daddr(he->mem_info)->phys_addr; 1858 1859 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level); 1860 1861 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr); 1862 1863 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, ""); 1864 1865 if (ret > width) 1866 bf[width] = '\0'; 1867 1868 return width; 1869 } 1870 1871 struct sort_entry sort_mem_phys_daddr = { 1872 .se_header = "Data Physical Address", 1873 .se_cmp = sort__phys_daddr_cmp, 1874 .se_snprintf = hist_entry__phys_daddr_snprintf, 1875 .se_width_idx = HISTC_MEM_PHYS_DADDR, 1876 }; 1877 1878 static int64_t 1879 sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right) 1880 { 1881 uint64_t l = 0, r = 0; 1882 1883 if (left->mem_info) 1884 l = mem_info__daddr(left->mem_info)->data_page_size; 1885 if (right->mem_info) 1886 r = mem_info__daddr(right->mem_info)->data_page_size; 1887 1888 return (int64_t)(r - l); 1889 } 1890 1891 static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf, 1892 size_t size, unsigned int width) 1893 { 1894 char str[PAGE_SIZE_NAME_LEN]; 1895 1896 return repsep_snprintf(bf, size, "%-*s", width, 1897 get_page_size_name(mem_info__daddr(he->mem_info)->data_page_size, str)); 1898 } 1899 1900 struct sort_entry sort_mem_data_page_size = { 1901 .se_header = "Data Page Size", 1902 .se_cmp = sort__data_page_size_cmp, 1903 .se_snprintf = hist_entry__data_page_size_snprintf, 1904 .se_width_idx = HISTC_MEM_DATA_PAGE_SIZE, 1905 }; 1906 1907 static int64_t 1908 sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right) 1909 { 1910 uint64_t l = left->code_page_size; 1911 uint64_t r = right->code_page_size; 1912 1913 return (int64_t)(r - l); 1914 } 1915 1916 static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf, 1917 size_t size, unsigned int width) 1918 { 1919 char str[PAGE_SIZE_NAME_LEN]; 1920 1921 return repsep_snprintf(bf, size, "%-*s", width, 1922 get_page_size_name(he->code_page_size, str)); 1923 } 1924 1925 struct sort_entry sort_code_page_size = { 1926 .se_header = "Code Page Size", 1927 .se_cmp = sort__code_page_size_cmp, 1928 .se_snprintf = hist_entry__code_page_size_snprintf, 1929 .se_width_idx = HISTC_CODE_PAGE_SIZE, 1930 }; 1931 1932 static int64_t 1933 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1934 { 1935 if (!left->branch_info || !right->branch_info) 1936 return cmp_null(left->branch_info, right->branch_info); 1937 1938 return left->branch_info->flags.abort != 1939 right->branch_info->flags.abort; 1940 } 1941 1942 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1943 size_t size, unsigned int width) 1944 { 1945 static const char *out = "N/A"; 1946 1947 if (he->branch_info) { 1948 if (he->branch_info->flags.abort) 1949 out = "A"; 1950 else 1951 out = "."; 1952 } 1953 1954 return repsep_snprintf(bf, size, "%-*s", width, out); 1955 } 1956 1957 struct sort_entry sort_abort = { 1958 .se_header = "Transaction abort", 1959 .se_cmp = sort__abort_cmp, 1960 .se_snprintf = hist_entry__abort_snprintf, 1961 .se_width_idx = HISTC_ABORT, 1962 }; 1963 1964 static int64_t 1965 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1966 { 1967 if (!left->branch_info || !right->branch_info) 1968 return cmp_null(left->branch_info, right->branch_info); 1969 1970 return left->branch_info->flags.in_tx != 1971 right->branch_info->flags.in_tx; 1972 } 1973 1974 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1975 size_t size, unsigned int width) 1976 { 1977 static const char *out = "N/A"; 1978 1979 if (he->branch_info) { 1980 if (he->branch_info->flags.in_tx) 1981 out = "T"; 1982 else 1983 out = "."; 1984 } 1985 1986 return repsep_snprintf(bf, size, "%-*s", width, out); 1987 } 1988 1989 struct sort_entry sort_in_tx = { 1990 .se_header = "Branch in transaction", 1991 .se_cmp = sort__in_tx_cmp, 1992 .se_snprintf = hist_entry__in_tx_snprintf, 1993 .se_width_idx = HISTC_IN_TX, 1994 }; 1995 1996 static int64_t 1997 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1998 { 1999 return left->transaction - right->transaction; 2000 } 2001 2002 static inline char *add_str(char *p, const char *str) 2003 { 2004 strcpy(p, str); 2005 return p + strlen(str); 2006 } 2007 2008 static struct txbit { 2009 unsigned flag; 2010 const char *name; 2011 int skip_for_len; 2012 } txbits[] = { 2013 { PERF_TXN_ELISION, "EL ", 0 }, 2014 { PERF_TXN_TRANSACTION, "TX ", 1 }, 2015 { PERF_TXN_SYNC, "SYNC ", 1 }, 2016 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 2017 { PERF_TXN_RETRY, "RETRY ", 0 }, 2018 { PERF_TXN_CONFLICT, "CON ", 0 }, 2019 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 2020 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 2021 { 0, NULL, 0 } 2022 }; 2023 2024 int hist_entry__transaction_len(void) 2025 { 2026 int i; 2027 int len = 0; 2028 2029 for (i = 0; txbits[i].name; i++) { 2030 if (!txbits[i].skip_for_len) 2031 len += strlen(txbits[i].name); 2032 } 2033 len += 4; /* :XX<space> */ 2034 return len; 2035 } 2036 2037 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 2038 size_t size, unsigned int width) 2039 { 2040 u64 t = he->transaction; 2041 char buf[128]; 2042 char *p = buf; 2043 int i; 2044 2045 buf[0] = 0; 2046 for (i = 0; txbits[i].name; i++) 2047 if (txbits[i].flag & t) 2048 p = add_str(p, txbits[i].name); 2049 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 2050 p = add_str(p, "NEITHER "); 2051 if (t & PERF_TXN_ABORT_MASK) { 2052 sprintf(p, ":%" PRIx64, 2053 (t & PERF_TXN_ABORT_MASK) >> 2054 PERF_TXN_ABORT_SHIFT); 2055 p += strlen(p); 2056 } 2057 2058 return repsep_snprintf(bf, size, "%-*s", width, buf); 2059 } 2060 2061 struct sort_entry sort_transaction = { 2062 .se_header = "Transaction ", 2063 .se_cmp = sort__transaction_cmp, 2064 .se_snprintf = hist_entry__transaction_snprintf, 2065 .se_width_idx = HISTC_TRANSACTION, 2066 }; 2067 2068 /* --sort symbol_size */ 2069 2070 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r) 2071 { 2072 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0; 2073 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0; 2074 2075 return size_l < size_r ? -1 : 2076 size_l == size_r ? 0 : 1; 2077 } 2078 2079 static int64_t 2080 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right) 2081 { 2082 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym); 2083 } 2084 2085 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf, 2086 size_t bf_size, unsigned int width) 2087 { 2088 if (sym) 2089 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym)); 2090 2091 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 2092 } 2093 2094 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf, 2095 size_t size, unsigned int width) 2096 { 2097 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width); 2098 } 2099 2100 struct sort_entry sort_sym_size = { 2101 .se_header = "Symbol size", 2102 .se_cmp = sort__sym_size_cmp, 2103 .se_snprintf = hist_entry__sym_size_snprintf, 2104 .se_width_idx = HISTC_SYM_SIZE, 2105 }; 2106 2107 /* --sort dso_size */ 2108 2109 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r) 2110 { 2111 int64_t size_l = map_l != NULL ? map__size(map_l) : 0; 2112 int64_t size_r = map_r != NULL ? map__size(map_r) : 0; 2113 2114 return size_l < size_r ? -1 : 2115 size_l == size_r ? 0 : 1; 2116 } 2117 2118 static int64_t 2119 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right) 2120 { 2121 return _sort__dso_size_cmp(right->ms.map, left->ms.map); 2122 } 2123 2124 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf, 2125 size_t bf_size, unsigned int width) 2126 { 2127 if (map && map__dso(map)) 2128 return repsep_snprintf(bf, bf_size, "%*d", width, map__size(map)); 2129 2130 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 2131 } 2132 2133 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf, 2134 size_t size, unsigned int width) 2135 { 2136 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width); 2137 } 2138 2139 struct sort_entry sort_dso_size = { 2140 .se_header = "DSO size", 2141 .se_cmp = sort__dso_size_cmp, 2142 .se_snprintf = hist_entry__dso_size_snprintf, 2143 .se_width_idx = HISTC_DSO_SIZE, 2144 }; 2145 2146 /* --sort addr */ 2147 2148 static int64_t 2149 sort__addr_cmp(struct hist_entry *left, struct hist_entry *right) 2150 { 2151 u64 left_ip = left->ip; 2152 u64 right_ip = right->ip; 2153 struct map *left_map = left->ms.map; 2154 struct map *right_map = right->ms.map; 2155 2156 if (left_map) 2157 left_ip = map__unmap_ip(left_map, left_ip); 2158 if (right_map) 2159 right_ip = map__unmap_ip(right_map, right_ip); 2160 2161 return _sort__addr_cmp(left_ip, right_ip); 2162 } 2163 2164 static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf, 2165 size_t size, unsigned int width) 2166 { 2167 u64 ip = he->ip; 2168 struct map *map = he->ms.map; 2169 2170 if (map) 2171 ip = map__unmap_ip(map, ip); 2172 2173 return repsep_snprintf(bf, size, "%-#*llx", width, ip); 2174 } 2175 2176 struct sort_entry sort_addr = { 2177 .se_header = "Address", 2178 .se_cmp = sort__addr_cmp, 2179 .se_snprintf = hist_entry__addr_snprintf, 2180 .se_width_idx = HISTC_ADDR, 2181 }; 2182 2183 /* --sort type */ 2184 2185 struct annotated_data_type unknown_type = { 2186 .self = { 2187 .type_name = (char *)"(unknown)", 2188 .children = LIST_HEAD_INIT(unknown_type.self.children), 2189 }, 2190 }; 2191 2192 static int64_t 2193 sort__type_cmp(struct hist_entry *left, struct hist_entry *right) 2194 { 2195 return sort__addr_cmp(left, right); 2196 } 2197 2198 static void sort__type_init(struct hist_entry *he) 2199 { 2200 if (he->mem_type) 2201 return; 2202 2203 he->mem_type = hist_entry__get_data_type(he); 2204 if (he->mem_type == NULL) { 2205 he->mem_type = &unknown_type; 2206 he->mem_type_off = 0; 2207 } 2208 } 2209 2210 static int64_t 2211 sort__type_collapse(struct hist_entry *left, struct hist_entry *right) 2212 { 2213 struct annotated_data_type *left_type = left->mem_type; 2214 struct annotated_data_type *right_type = right->mem_type; 2215 2216 if (!left_type) { 2217 sort__type_init(left); 2218 left_type = left->mem_type; 2219 } 2220 2221 if (!right_type) { 2222 sort__type_init(right); 2223 right_type = right->mem_type; 2224 } 2225 2226 return strcmp(left_type->self.type_name, right_type->self.type_name); 2227 } 2228 2229 static int64_t 2230 sort__type_sort(struct hist_entry *left, struct hist_entry *right) 2231 { 2232 return sort__type_collapse(left, right); 2233 } 2234 2235 static int hist_entry__type_snprintf(struct hist_entry *he, char *bf, 2236 size_t size, unsigned int width) 2237 { 2238 return repsep_snprintf(bf, size, "%-*s", width, he->mem_type->self.type_name); 2239 } 2240 2241 struct sort_entry sort_type = { 2242 .se_header = "Data Type", 2243 .se_cmp = sort__type_cmp, 2244 .se_collapse = sort__type_collapse, 2245 .se_sort = sort__type_sort, 2246 .se_init = sort__type_init, 2247 .se_snprintf = hist_entry__type_snprintf, 2248 .se_width_idx = HISTC_TYPE, 2249 }; 2250 2251 /* --sort typeoff */ 2252 2253 static int64_t 2254 sort__typeoff_sort(struct hist_entry *left, struct hist_entry *right) 2255 { 2256 struct annotated_data_type *left_type = left->mem_type; 2257 struct annotated_data_type *right_type = right->mem_type; 2258 int64_t ret; 2259 2260 if (!left_type) { 2261 sort__type_init(left); 2262 left_type = left->mem_type; 2263 } 2264 2265 if (!right_type) { 2266 sort__type_init(right); 2267 right_type = right->mem_type; 2268 } 2269 2270 ret = strcmp(left_type->self.type_name, right_type->self.type_name); 2271 if (ret) 2272 return ret; 2273 return left->mem_type_off - right->mem_type_off; 2274 } 2275 2276 static void fill_member_name(char *buf, size_t sz, struct annotated_member *m, 2277 int offset, bool first) 2278 { 2279 struct annotated_member *child; 2280 2281 if (list_empty(&m->children)) 2282 return; 2283 2284 list_for_each_entry(child, &m->children, node) { 2285 if (child->offset <= offset && offset < child->offset + child->size) { 2286 int len = 0; 2287 2288 /* It can have anonymous struct/union members */ 2289 if (child->var_name) { 2290 len = scnprintf(buf, sz, "%s%s", 2291 first ? "" : ".", child->var_name); 2292 first = false; 2293 } 2294 2295 fill_member_name(buf + len, sz - len, child, offset, first); 2296 return; 2297 } 2298 } 2299 } 2300 2301 static int hist_entry__typeoff_snprintf(struct hist_entry *he, char *bf, 2302 size_t size, unsigned int width __maybe_unused) 2303 { 2304 struct annotated_data_type *he_type = he->mem_type; 2305 char buf[4096]; 2306 2307 buf[0] = '\0'; 2308 if (list_empty(&he_type->self.children)) 2309 snprintf(buf, sizeof(buf), "no field"); 2310 else 2311 fill_member_name(buf, sizeof(buf), &he_type->self, 2312 he->mem_type_off, true); 2313 buf[4095] = '\0'; 2314 2315 return repsep_snprintf(bf, size, "%s %+d (%s)", he_type->self.type_name, 2316 he->mem_type_off, buf); 2317 } 2318 2319 struct sort_entry sort_type_offset = { 2320 .se_header = "Data Type Offset", 2321 .se_cmp = sort__type_cmp, 2322 .se_collapse = sort__typeoff_sort, 2323 .se_sort = sort__typeoff_sort, 2324 .se_init = sort__type_init, 2325 .se_snprintf = hist_entry__typeoff_snprintf, 2326 .se_width_idx = HISTC_TYPE_OFFSET, 2327 }; 2328 2329 2330 struct sort_dimension { 2331 const char *name; 2332 struct sort_entry *entry; 2333 int taken; 2334 }; 2335 2336 int __weak arch_support_sort_key(const char *sort_key __maybe_unused) 2337 { 2338 return 0; 2339 } 2340 2341 const char * __weak arch_perf_header_entry(const char *se_header) 2342 { 2343 return se_header; 2344 } 2345 2346 static void sort_dimension_add_dynamic_header(struct sort_dimension *sd) 2347 { 2348 sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header); 2349 } 2350 2351 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 2352 2353 static struct sort_dimension common_sort_dimensions[] = { 2354 DIM(SORT_PID, "pid", sort_thread), 2355 DIM(SORT_COMM, "comm", sort_comm), 2356 DIM(SORT_DSO, "dso", sort_dso), 2357 DIM(SORT_SYM, "symbol", sort_sym), 2358 DIM(SORT_PARENT, "parent", sort_parent), 2359 DIM(SORT_CPU, "cpu", sort_cpu), 2360 DIM(SORT_SOCKET, "socket", sort_socket), 2361 DIM(SORT_SRCLINE, "srcline", sort_srcline), 2362 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 2363 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 2364 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 2365 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 2366 #ifdef HAVE_LIBTRACEEVENT 2367 DIM(SORT_TRACE, "trace", sort_trace), 2368 #endif 2369 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 2370 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size), 2371 DIM(SORT_CGROUP, "cgroup", sort_cgroup), 2372 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 2373 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null), 2374 DIM(SORT_TIME, "time", sort_time), 2375 DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size), 2376 DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat), 2377 DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat), 2378 DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc), 2379 DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc), 2380 DIM(SORT_ADDR, "addr", sort_addr), 2381 DIM(SORT_LOCAL_RETIRE_LAT, "local_retire_lat", sort_local_p_stage_cyc), 2382 DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc), 2383 DIM(SORT_SIMD, "simd", sort_simd), 2384 DIM(SORT_ANNOTATE_DATA_TYPE, "type", sort_type), 2385 DIM(SORT_ANNOTATE_DATA_TYPE_OFFSET, "typeoff", sort_type_offset), 2386 DIM(SORT_SYM_OFFSET, "symoff", sort_sym_offset), 2387 }; 2388 2389 #undef DIM 2390 2391 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 2392 2393 static struct sort_dimension bstack_sort_dimensions[] = { 2394 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 2395 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 2396 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 2397 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 2398 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 2399 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 2400 DIM(SORT_ABORT, "abort", sort_abort), 2401 DIM(SORT_CYCLES, "cycles", sort_cycles), 2402 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 2403 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 2404 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc), 2405 DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from), 2406 DIM(SORT_ADDR_TO, "addr_to", sort_addr_to), 2407 }; 2408 2409 #undef DIM 2410 2411 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 2412 2413 static struct sort_dimension memory_sort_dimensions[] = { 2414 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 2415 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 2416 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 2417 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 2418 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 2419 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 2420 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 2421 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 2422 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr), 2423 DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size), 2424 DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked), 2425 }; 2426 2427 #undef DIM 2428 2429 struct hpp_dimension { 2430 const char *name; 2431 struct perf_hpp_fmt *fmt; 2432 int taken; 2433 }; 2434 2435 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 2436 2437 static struct hpp_dimension hpp_sort_dimensions[] = { 2438 DIM(PERF_HPP__OVERHEAD, "overhead"), 2439 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 2440 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 2441 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 2442 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 2443 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 2444 DIM(PERF_HPP__SAMPLES, "sample"), 2445 DIM(PERF_HPP__PERIOD, "period"), 2446 DIM(PERF_HPP__WEIGHT1, "weight1"), 2447 DIM(PERF_HPP__WEIGHT2, "weight2"), 2448 DIM(PERF_HPP__WEIGHT3, "weight3"), 2449 /* aliases for weight_struct */ 2450 DIM(PERF_HPP__WEIGHT2, "ins_lat"), 2451 DIM(PERF_HPP__WEIGHT3, "retire_lat"), 2452 DIM(PERF_HPP__WEIGHT3, "p_stage_cyc"), 2453 }; 2454 2455 #undef DIM 2456 2457 struct hpp_sort_entry { 2458 struct perf_hpp_fmt hpp; 2459 struct sort_entry *se; 2460 }; 2461 2462 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 2463 { 2464 struct hpp_sort_entry *hse; 2465 2466 if (!perf_hpp__is_sort_entry(fmt)) 2467 return; 2468 2469 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2470 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 2471 } 2472 2473 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2474 struct hists *hists, int line __maybe_unused, 2475 int *span __maybe_unused) 2476 { 2477 struct hpp_sort_entry *hse; 2478 size_t len = fmt->user_len; 2479 2480 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2481 2482 if (!len) 2483 len = hists__col_len(hists, hse->se->se_width_idx); 2484 2485 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 2486 } 2487 2488 static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 2489 struct perf_hpp *hpp __maybe_unused, 2490 struct hists *hists) 2491 { 2492 struct hpp_sort_entry *hse; 2493 size_t len = fmt->user_len; 2494 2495 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2496 2497 if (!len) 2498 len = hists__col_len(hists, hse->se->se_width_idx); 2499 2500 return len; 2501 } 2502 2503 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2504 struct hist_entry *he) 2505 { 2506 struct hpp_sort_entry *hse; 2507 size_t len = fmt->user_len; 2508 2509 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2510 2511 if (!len) 2512 len = hists__col_len(he->hists, hse->se->se_width_idx); 2513 2514 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 2515 } 2516 2517 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 2518 struct hist_entry *a, struct hist_entry *b) 2519 { 2520 struct hpp_sort_entry *hse; 2521 2522 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2523 return hse->se->se_cmp(a, b); 2524 } 2525 2526 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 2527 struct hist_entry *a, struct hist_entry *b) 2528 { 2529 struct hpp_sort_entry *hse; 2530 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 2531 2532 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2533 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 2534 return collapse_fn(a, b); 2535 } 2536 2537 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 2538 struct hist_entry *a, struct hist_entry *b) 2539 { 2540 struct hpp_sort_entry *hse; 2541 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 2542 2543 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2544 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 2545 return sort_fn(a, b); 2546 } 2547 2548 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 2549 { 2550 return format->header == __sort__hpp_header; 2551 } 2552 2553 #define MK_SORT_ENTRY_CHK(key) \ 2554 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 2555 { \ 2556 struct hpp_sort_entry *hse; \ 2557 \ 2558 if (!perf_hpp__is_sort_entry(fmt)) \ 2559 return false; \ 2560 \ 2561 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 2562 return hse->se == &sort_ ## key ; \ 2563 } 2564 2565 #ifdef HAVE_LIBTRACEEVENT 2566 MK_SORT_ENTRY_CHK(trace) 2567 #else 2568 bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt __maybe_unused) 2569 { 2570 return false; 2571 } 2572 #endif 2573 MK_SORT_ENTRY_CHK(srcline) 2574 MK_SORT_ENTRY_CHK(srcfile) 2575 MK_SORT_ENTRY_CHK(thread) 2576 MK_SORT_ENTRY_CHK(comm) 2577 MK_SORT_ENTRY_CHK(dso) 2578 MK_SORT_ENTRY_CHK(sym) 2579 2580 2581 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2582 { 2583 struct hpp_sort_entry *hse_a; 2584 struct hpp_sort_entry *hse_b; 2585 2586 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 2587 return false; 2588 2589 hse_a = container_of(a, struct hpp_sort_entry, hpp); 2590 hse_b = container_of(b, struct hpp_sort_entry, hpp); 2591 2592 return hse_a->se == hse_b->se; 2593 } 2594 2595 static void hse_free(struct perf_hpp_fmt *fmt) 2596 { 2597 struct hpp_sort_entry *hse; 2598 2599 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2600 free(hse); 2601 } 2602 2603 static void hse_init(struct perf_hpp_fmt *fmt, struct hist_entry *he) 2604 { 2605 struct hpp_sort_entry *hse; 2606 2607 if (!perf_hpp__is_sort_entry(fmt)) 2608 return; 2609 2610 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2611 2612 if (hse->se->se_init) 2613 hse->se->se_init(he); 2614 } 2615 2616 static struct hpp_sort_entry * 2617 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 2618 { 2619 struct hpp_sort_entry *hse; 2620 2621 hse = malloc(sizeof(*hse)); 2622 if (hse == NULL) { 2623 pr_err("Memory allocation failed\n"); 2624 return NULL; 2625 } 2626 2627 hse->se = sd->entry; 2628 hse->hpp.name = sd->entry->se_header; 2629 hse->hpp.header = __sort__hpp_header; 2630 hse->hpp.width = __sort__hpp_width; 2631 hse->hpp.entry = __sort__hpp_entry; 2632 hse->hpp.color = NULL; 2633 2634 hse->hpp.cmp = __sort__hpp_cmp; 2635 hse->hpp.collapse = __sort__hpp_collapse; 2636 hse->hpp.sort = __sort__hpp_sort; 2637 hse->hpp.equal = __sort__hpp_equal; 2638 hse->hpp.free = hse_free; 2639 hse->hpp.init = hse_init; 2640 2641 INIT_LIST_HEAD(&hse->hpp.list); 2642 INIT_LIST_HEAD(&hse->hpp.sort_list); 2643 hse->hpp.elide = false; 2644 hse->hpp.len = 0; 2645 hse->hpp.user_len = 0; 2646 hse->hpp.level = level; 2647 2648 return hse; 2649 } 2650 2651 static void hpp_free(struct perf_hpp_fmt *fmt) 2652 { 2653 free(fmt); 2654 } 2655 2656 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 2657 int level) 2658 { 2659 struct perf_hpp_fmt *fmt; 2660 2661 fmt = memdup(hd->fmt, sizeof(*fmt)); 2662 if (fmt) { 2663 INIT_LIST_HEAD(&fmt->list); 2664 INIT_LIST_HEAD(&fmt->sort_list); 2665 fmt->free = hpp_free; 2666 fmt->level = level; 2667 } 2668 2669 return fmt; 2670 } 2671 2672 int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 2673 { 2674 struct perf_hpp_fmt *fmt; 2675 struct hpp_sort_entry *hse; 2676 int ret = -1; 2677 int r; 2678 2679 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 2680 if (!perf_hpp__is_sort_entry(fmt)) 2681 continue; 2682 2683 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2684 if (hse->se->se_filter == NULL) 2685 continue; 2686 2687 /* 2688 * hist entry is filtered if any of sort key in the hpp list 2689 * is applied. But it should skip non-matched filter types. 2690 */ 2691 r = hse->se->se_filter(he, type, arg); 2692 if (r >= 0) { 2693 if (ret < 0) 2694 ret = 0; 2695 ret |= r; 2696 } 2697 } 2698 2699 return ret; 2700 } 2701 2702 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 2703 struct perf_hpp_list *list, 2704 int level) 2705 { 2706 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 2707 2708 if (hse == NULL) 2709 return -1; 2710 2711 perf_hpp_list__register_sort_field(list, &hse->hpp); 2712 return 0; 2713 } 2714 2715 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 2716 struct perf_hpp_list *list) 2717 { 2718 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 2719 2720 if (hse == NULL) 2721 return -1; 2722 2723 perf_hpp_list__column_register(list, &hse->hpp); 2724 return 0; 2725 } 2726 2727 #ifndef HAVE_LIBTRACEEVENT 2728 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused) 2729 { 2730 return false; 2731 } 2732 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused, 2733 struct hists *hists __maybe_unused) 2734 { 2735 return false; 2736 } 2737 #else 2738 struct hpp_dynamic_entry { 2739 struct perf_hpp_fmt hpp; 2740 struct evsel *evsel; 2741 struct tep_format_field *field; 2742 unsigned dynamic_len; 2743 bool raw_trace; 2744 }; 2745 2746 static int hde_width(struct hpp_dynamic_entry *hde) 2747 { 2748 if (!hde->hpp.len) { 2749 int len = hde->dynamic_len; 2750 int namelen = strlen(hde->field->name); 2751 int fieldlen = hde->field->size; 2752 2753 if (namelen > len) 2754 len = namelen; 2755 2756 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) { 2757 /* length for print hex numbers */ 2758 fieldlen = hde->field->size * 2 + 2; 2759 } 2760 if (fieldlen > len) 2761 len = fieldlen; 2762 2763 hde->hpp.len = len; 2764 } 2765 return hde->hpp.len; 2766 } 2767 2768 static void update_dynamic_len(struct hpp_dynamic_entry *hde, 2769 struct hist_entry *he) 2770 { 2771 char *str, *pos; 2772 struct tep_format_field *field = hde->field; 2773 size_t namelen; 2774 bool last = false; 2775 2776 if (hde->raw_trace) 2777 return; 2778 2779 /* parse pretty print result and update max length */ 2780 if (!he->trace_output) 2781 he->trace_output = get_trace_output(he); 2782 2783 namelen = strlen(field->name); 2784 str = he->trace_output; 2785 2786 while (str) { 2787 pos = strchr(str, ' '); 2788 if (pos == NULL) { 2789 last = true; 2790 pos = str + strlen(str); 2791 } 2792 2793 if (!strncmp(str, field->name, namelen)) { 2794 size_t len; 2795 2796 str += namelen + 1; 2797 len = pos - str; 2798 2799 if (len > hde->dynamic_len) 2800 hde->dynamic_len = len; 2801 break; 2802 } 2803 2804 if (last) 2805 str = NULL; 2806 else 2807 str = pos + 1; 2808 } 2809 } 2810 2811 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2812 struct hists *hists __maybe_unused, 2813 int line __maybe_unused, 2814 int *span __maybe_unused) 2815 { 2816 struct hpp_dynamic_entry *hde; 2817 size_t len = fmt->user_len; 2818 2819 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2820 2821 if (!len) 2822 len = hde_width(hde); 2823 2824 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 2825 } 2826 2827 static int __sort__hde_width(struct perf_hpp_fmt *fmt, 2828 struct perf_hpp *hpp __maybe_unused, 2829 struct hists *hists __maybe_unused) 2830 { 2831 struct hpp_dynamic_entry *hde; 2832 size_t len = fmt->user_len; 2833 2834 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2835 2836 if (!len) 2837 len = hde_width(hde); 2838 2839 return len; 2840 } 2841 2842 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 2843 { 2844 struct hpp_dynamic_entry *hde; 2845 2846 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2847 2848 return hists_to_evsel(hists) == hde->evsel; 2849 } 2850 2851 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2852 struct hist_entry *he) 2853 { 2854 struct hpp_dynamic_entry *hde; 2855 size_t len = fmt->user_len; 2856 char *str, *pos; 2857 struct tep_format_field *field; 2858 size_t namelen; 2859 bool last = false; 2860 int ret; 2861 2862 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2863 2864 if (!len) 2865 len = hde_width(hde); 2866 2867 if (hde->raw_trace) 2868 goto raw_field; 2869 2870 if (!he->trace_output) 2871 he->trace_output = get_trace_output(he); 2872 2873 field = hde->field; 2874 namelen = strlen(field->name); 2875 str = he->trace_output; 2876 2877 while (str) { 2878 pos = strchr(str, ' '); 2879 if (pos == NULL) { 2880 last = true; 2881 pos = str + strlen(str); 2882 } 2883 2884 if (!strncmp(str, field->name, namelen)) { 2885 str += namelen + 1; 2886 str = strndup(str, pos - str); 2887 2888 if (str == NULL) 2889 return scnprintf(hpp->buf, hpp->size, 2890 "%*.*s", len, len, "ERROR"); 2891 break; 2892 } 2893 2894 if (last) 2895 str = NULL; 2896 else 2897 str = pos + 1; 2898 } 2899 2900 if (str == NULL) { 2901 struct trace_seq seq; 2902 raw_field: 2903 trace_seq_init(&seq); 2904 tep_print_field(&seq, he->raw_data, hde->field); 2905 str = seq.buffer; 2906 } 2907 2908 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 2909 free(str); 2910 return ret; 2911 } 2912 2913 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 2914 struct hist_entry *a, struct hist_entry *b) 2915 { 2916 struct hpp_dynamic_entry *hde; 2917 struct tep_format_field *field; 2918 unsigned offset, size; 2919 2920 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2921 2922 field = hde->field; 2923 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2924 unsigned long long dyn; 2925 2926 tep_read_number_field(field, a->raw_data, &dyn); 2927 offset = dyn & 0xffff; 2928 size = (dyn >> 16) & 0xffff; 2929 if (tep_field_is_relative(field->flags)) 2930 offset += field->offset + field->size; 2931 /* record max width for output */ 2932 if (size > hde->dynamic_len) 2933 hde->dynamic_len = size; 2934 } else { 2935 offset = field->offset; 2936 size = field->size; 2937 } 2938 2939 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 2940 } 2941 2942 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 2943 { 2944 return fmt->cmp == __sort__hde_cmp; 2945 } 2946 2947 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2948 { 2949 struct hpp_dynamic_entry *hde_a; 2950 struct hpp_dynamic_entry *hde_b; 2951 2952 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 2953 return false; 2954 2955 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 2956 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 2957 2958 return hde_a->field == hde_b->field; 2959 } 2960 2961 static void hde_free(struct perf_hpp_fmt *fmt) 2962 { 2963 struct hpp_dynamic_entry *hde; 2964 2965 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2966 free(hde); 2967 } 2968 2969 static void __sort__hde_init(struct perf_hpp_fmt *fmt, struct hist_entry *he) 2970 { 2971 struct hpp_dynamic_entry *hde; 2972 2973 if (!perf_hpp__is_dynamic_entry(fmt)) 2974 return; 2975 2976 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2977 update_dynamic_len(hde, he); 2978 } 2979 2980 static struct hpp_dynamic_entry * 2981 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field, 2982 int level) 2983 { 2984 struct hpp_dynamic_entry *hde; 2985 2986 hde = malloc(sizeof(*hde)); 2987 if (hde == NULL) { 2988 pr_debug("Memory allocation failed\n"); 2989 return NULL; 2990 } 2991 2992 hde->evsel = evsel; 2993 hde->field = field; 2994 hde->dynamic_len = 0; 2995 2996 hde->hpp.name = field->name; 2997 hde->hpp.header = __sort__hde_header; 2998 hde->hpp.width = __sort__hde_width; 2999 hde->hpp.entry = __sort__hde_entry; 3000 hde->hpp.color = NULL; 3001 3002 hde->hpp.init = __sort__hde_init; 3003 hde->hpp.cmp = __sort__hde_cmp; 3004 hde->hpp.collapse = __sort__hde_cmp; 3005 hde->hpp.sort = __sort__hde_cmp; 3006 hde->hpp.equal = __sort__hde_equal; 3007 hde->hpp.free = hde_free; 3008 3009 INIT_LIST_HEAD(&hde->hpp.list); 3010 INIT_LIST_HEAD(&hde->hpp.sort_list); 3011 hde->hpp.elide = false; 3012 hde->hpp.len = 0; 3013 hde->hpp.user_len = 0; 3014 hde->hpp.level = level; 3015 3016 return hde; 3017 } 3018 #endif /* HAVE_LIBTRACEEVENT */ 3019 3020 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 3021 { 3022 struct perf_hpp_fmt *new_fmt = NULL; 3023 3024 if (perf_hpp__is_sort_entry(fmt)) { 3025 struct hpp_sort_entry *hse, *new_hse; 3026 3027 hse = container_of(fmt, struct hpp_sort_entry, hpp); 3028 new_hse = memdup(hse, sizeof(*hse)); 3029 if (new_hse) 3030 new_fmt = &new_hse->hpp; 3031 #ifdef HAVE_LIBTRACEEVENT 3032 } else if (perf_hpp__is_dynamic_entry(fmt)) { 3033 struct hpp_dynamic_entry *hde, *new_hde; 3034 3035 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 3036 new_hde = memdup(hde, sizeof(*hde)); 3037 if (new_hde) 3038 new_fmt = &new_hde->hpp; 3039 #endif 3040 } else { 3041 new_fmt = memdup(fmt, sizeof(*fmt)); 3042 } 3043 3044 INIT_LIST_HEAD(&new_fmt->list); 3045 INIT_LIST_HEAD(&new_fmt->sort_list); 3046 3047 return new_fmt; 3048 } 3049 3050 static int parse_field_name(char *str, char **event, char **field, char **opt) 3051 { 3052 char *event_name, *field_name, *opt_name; 3053 3054 event_name = str; 3055 field_name = strchr(str, '.'); 3056 3057 if (field_name) { 3058 *field_name++ = '\0'; 3059 } else { 3060 event_name = NULL; 3061 field_name = str; 3062 } 3063 3064 opt_name = strchr(field_name, '/'); 3065 if (opt_name) 3066 *opt_name++ = '\0'; 3067 3068 *event = event_name; 3069 *field = field_name; 3070 *opt = opt_name; 3071 3072 return 0; 3073 } 3074 3075 /* find match evsel using a given event name. The event name can be: 3076 * 1. '%' + event index (e.g. '%1' for first event) 3077 * 2. full event name (e.g. sched:sched_switch) 3078 * 3. partial event name (should not contain ':') 3079 */ 3080 static struct evsel *find_evsel(struct evlist *evlist, char *event_name) 3081 { 3082 struct evsel *evsel = NULL; 3083 struct evsel *pos; 3084 bool full_name; 3085 3086 /* case 1 */ 3087 if (event_name[0] == '%') { 3088 int nr = strtol(event_name+1, NULL, 0); 3089 3090 if (nr > evlist->core.nr_entries) 3091 return NULL; 3092 3093 evsel = evlist__first(evlist); 3094 while (--nr > 0) 3095 evsel = evsel__next(evsel); 3096 3097 return evsel; 3098 } 3099 3100 full_name = !!strchr(event_name, ':'); 3101 evlist__for_each_entry(evlist, pos) { 3102 /* case 2 */ 3103 if (full_name && evsel__name_is(pos, event_name)) 3104 return pos; 3105 /* case 3 */ 3106 if (!full_name && strstr(pos->name, event_name)) { 3107 if (evsel) { 3108 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 3109 event_name, evsel->name, pos->name); 3110 return NULL; 3111 } 3112 evsel = pos; 3113 } 3114 } 3115 3116 return evsel; 3117 } 3118 3119 #ifdef HAVE_LIBTRACEEVENT 3120 static int __dynamic_dimension__add(struct evsel *evsel, 3121 struct tep_format_field *field, 3122 bool raw_trace, int level) 3123 { 3124 struct hpp_dynamic_entry *hde; 3125 3126 hde = __alloc_dynamic_entry(evsel, field, level); 3127 if (hde == NULL) 3128 return -ENOMEM; 3129 3130 hde->raw_trace = raw_trace; 3131 3132 perf_hpp__register_sort_field(&hde->hpp); 3133 return 0; 3134 } 3135 3136 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level) 3137 { 3138 int ret; 3139 struct tep_format_field *field; 3140 3141 field = evsel->tp_format->format.fields; 3142 while (field) { 3143 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 3144 if (ret < 0) 3145 return ret; 3146 3147 field = field->next; 3148 } 3149 return 0; 3150 } 3151 3152 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace, 3153 int level) 3154 { 3155 int ret; 3156 struct evsel *evsel; 3157 3158 evlist__for_each_entry(evlist, evsel) { 3159 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 3160 continue; 3161 3162 ret = add_evsel_fields(evsel, raw_trace, level); 3163 if (ret < 0) 3164 return ret; 3165 } 3166 return 0; 3167 } 3168 3169 static int add_all_matching_fields(struct evlist *evlist, 3170 char *field_name, bool raw_trace, int level) 3171 { 3172 int ret = -ESRCH; 3173 struct evsel *evsel; 3174 struct tep_format_field *field; 3175 3176 evlist__for_each_entry(evlist, evsel) { 3177 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 3178 continue; 3179 3180 field = tep_find_any_field(evsel->tp_format, field_name); 3181 if (field == NULL) 3182 continue; 3183 3184 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 3185 if (ret < 0) 3186 break; 3187 } 3188 return ret; 3189 } 3190 #endif /* HAVE_LIBTRACEEVENT */ 3191 3192 static int add_dynamic_entry(struct evlist *evlist, const char *tok, 3193 int level) 3194 { 3195 char *str, *event_name, *field_name, *opt_name; 3196 struct evsel *evsel; 3197 bool raw_trace = symbol_conf.raw_trace; 3198 int ret = 0; 3199 3200 if (evlist == NULL) 3201 return -ENOENT; 3202 3203 str = strdup(tok); 3204 if (str == NULL) 3205 return -ENOMEM; 3206 3207 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 3208 ret = -EINVAL; 3209 goto out; 3210 } 3211 3212 if (opt_name) { 3213 if (strcmp(opt_name, "raw")) { 3214 pr_debug("unsupported field option %s\n", opt_name); 3215 ret = -EINVAL; 3216 goto out; 3217 } 3218 raw_trace = true; 3219 } 3220 3221 #ifdef HAVE_LIBTRACEEVENT 3222 if (!strcmp(field_name, "trace_fields")) { 3223 ret = add_all_dynamic_fields(evlist, raw_trace, level); 3224 goto out; 3225 } 3226 3227 if (event_name == NULL) { 3228 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 3229 goto out; 3230 } 3231 #else 3232 evlist__for_each_entry(evlist, evsel) { 3233 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) { 3234 pr_err("%s %s", ret ? "," : "This perf binary isn't linked with libtraceevent, can't process", evsel__name(evsel)); 3235 ret = -ENOTSUP; 3236 } 3237 } 3238 3239 if (ret) { 3240 pr_err("\n"); 3241 goto out; 3242 } 3243 #endif 3244 3245 evsel = find_evsel(evlist, event_name); 3246 if (evsel == NULL) { 3247 pr_debug("Cannot find event: %s\n", event_name); 3248 ret = -ENOENT; 3249 goto out; 3250 } 3251 3252 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 3253 pr_debug("%s is not a tracepoint event\n", event_name); 3254 ret = -EINVAL; 3255 goto out; 3256 } 3257 3258 #ifdef HAVE_LIBTRACEEVENT 3259 if (!strcmp(field_name, "*")) { 3260 ret = add_evsel_fields(evsel, raw_trace, level); 3261 } else { 3262 struct tep_format_field *field = tep_find_any_field(evsel->tp_format, field_name); 3263 3264 if (field == NULL) { 3265 pr_debug("Cannot find event field for %s.%s\n", 3266 event_name, field_name); 3267 return -ENOENT; 3268 } 3269 3270 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 3271 } 3272 #else 3273 (void)level; 3274 (void)raw_trace; 3275 #endif /* HAVE_LIBTRACEEVENT */ 3276 3277 out: 3278 free(str); 3279 return ret; 3280 } 3281 3282 static int __sort_dimension__add(struct sort_dimension *sd, 3283 struct perf_hpp_list *list, 3284 int level) 3285 { 3286 if (sd->taken) 3287 return 0; 3288 3289 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 3290 return -1; 3291 3292 if (sd->entry->se_collapse) 3293 list->need_collapse = 1; 3294 3295 sd->taken = 1; 3296 3297 return 0; 3298 } 3299 3300 static int __hpp_dimension__add(struct hpp_dimension *hd, 3301 struct perf_hpp_list *list, 3302 int level) 3303 { 3304 struct perf_hpp_fmt *fmt; 3305 3306 if (hd->taken) 3307 return 0; 3308 3309 fmt = __hpp_dimension__alloc_hpp(hd, level); 3310 if (!fmt) 3311 return -1; 3312 3313 hd->taken = 1; 3314 perf_hpp_list__register_sort_field(list, fmt); 3315 return 0; 3316 } 3317 3318 static int __sort_dimension__add_output(struct perf_hpp_list *list, 3319 struct sort_dimension *sd) 3320 { 3321 if (sd->taken) 3322 return 0; 3323 3324 if (__sort_dimension__add_hpp_output(sd, list) < 0) 3325 return -1; 3326 3327 sd->taken = 1; 3328 return 0; 3329 } 3330 3331 static int __hpp_dimension__add_output(struct perf_hpp_list *list, 3332 struct hpp_dimension *hd) 3333 { 3334 struct perf_hpp_fmt *fmt; 3335 3336 if (hd->taken) 3337 return 0; 3338 3339 fmt = __hpp_dimension__alloc_hpp(hd, 0); 3340 if (!fmt) 3341 return -1; 3342 3343 hd->taken = 1; 3344 perf_hpp_list__column_register(list, fmt); 3345 return 0; 3346 } 3347 3348 int hpp_dimension__add_output(unsigned col) 3349 { 3350 BUG_ON(col >= PERF_HPP__MAX_INDEX); 3351 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 3352 } 3353 3354 int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 3355 struct evlist *evlist, 3356 int level) 3357 { 3358 unsigned int i, j; 3359 3360 /* 3361 * Check to see if there are any arch specific 3362 * sort dimensions not applicable for the current 3363 * architecture. If so, Skip that sort key since 3364 * we don't want to display it in the output fields. 3365 */ 3366 for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) { 3367 if (!strcmp(arch_specific_sort_keys[j], tok) && 3368 !arch_support_sort_key(tok)) { 3369 return 0; 3370 } 3371 } 3372 3373 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 3374 struct sort_dimension *sd = &common_sort_dimensions[i]; 3375 3376 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3377 continue; 3378 3379 for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) { 3380 if (sd->name && !strcmp(dynamic_headers[j], sd->name)) 3381 sort_dimension_add_dynamic_header(sd); 3382 } 3383 3384 if (sd->entry == &sort_parent && parent_pattern) { 3385 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 3386 if (ret) { 3387 char err[BUFSIZ]; 3388 3389 regerror(ret, &parent_regex, err, sizeof(err)); 3390 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 3391 return -EINVAL; 3392 } 3393 list->parent = 1; 3394 } else if (sd->entry == &sort_sym) { 3395 list->sym = 1; 3396 /* 3397 * perf diff displays the performance difference amongst 3398 * two or more perf.data files. Those files could come 3399 * from different binaries. So we should not compare 3400 * their ips, but the name of symbol. 3401 */ 3402 if (sort__mode == SORT_MODE__DIFF) 3403 sd->entry->se_collapse = sort__sym_sort; 3404 3405 } else if (sd->entry == &sort_dso) { 3406 list->dso = 1; 3407 } else if (sd->entry == &sort_socket) { 3408 list->socket = 1; 3409 } else if (sd->entry == &sort_thread) { 3410 list->thread = 1; 3411 } else if (sd->entry == &sort_comm) { 3412 list->comm = 1; 3413 } else if (sd->entry == &sort_type_offset) { 3414 symbol_conf.annotate_data_member = true; 3415 } 3416 3417 return __sort_dimension__add(sd, list, level); 3418 } 3419 3420 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 3421 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 3422 3423 if (strncasecmp(tok, hd->name, strlen(tok))) 3424 continue; 3425 3426 return __hpp_dimension__add(hd, list, level); 3427 } 3428 3429 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 3430 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 3431 3432 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3433 continue; 3434 3435 if (sort__mode != SORT_MODE__BRANCH) 3436 return -EINVAL; 3437 3438 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 3439 list->sym = 1; 3440 3441 __sort_dimension__add(sd, list, level); 3442 return 0; 3443 } 3444 3445 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 3446 struct sort_dimension *sd = &memory_sort_dimensions[i]; 3447 3448 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3449 continue; 3450 3451 if (sort__mode != SORT_MODE__MEMORY) 3452 return -EINVAL; 3453 3454 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0) 3455 return -EINVAL; 3456 3457 if (sd->entry == &sort_mem_daddr_sym) 3458 list->sym = 1; 3459 3460 __sort_dimension__add(sd, list, level); 3461 return 0; 3462 } 3463 3464 if (!add_dynamic_entry(evlist, tok, level)) 3465 return 0; 3466 3467 return -ESRCH; 3468 } 3469 3470 static int setup_sort_list(struct perf_hpp_list *list, char *str, 3471 struct evlist *evlist) 3472 { 3473 char *tmp, *tok; 3474 int ret = 0; 3475 int level = 0; 3476 int next_level = 1; 3477 bool in_group = false; 3478 3479 do { 3480 tok = str; 3481 tmp = strpbrk(str, "{}, "); 3482 if (tmp) { 3483 if (in_group) 3484 next_level = level; 3485 else 3486 next_level = level + 1; 3487 3488 if (*tmp == '{') 3489 in_group = true; 3490 else if (*tmp == '}') 3491 in_group = false; 3492 3493 *tmp = '\0'; 3494 str = tmp + 1; 3495 } 3496 3497 if (*tok) { 3498 ret = sort_dimension__add(list, tok, evlist, level); 3499 if (ret == -EINVAL) { 3500 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok))) 3501 ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 3502 else 3503 ui__error("Invalid --sort key: `%s'", tok); 3504 break; 3505 } else if (ret == -ESRCH) { 3506 ui__error("Unknown --sort key: `%s'", tok); 3507 break; 3508 } 3509 } 3510 3511 level = next_level; 3512 } while (tmp); 3513 3514 return ret; 3515 } 3516 3517 static const char *get_default_sort_order(struct evlist *evlist) 3518 { 3519 const char *default_sort_orders[] = { 3520 default_sort_order, 3521 default_branch_sort_order, 3522 default_mem_sort_order, 3523 default_top_sort_order, 3524 default_diff_sort_order, 3525 default_tracepoint_sort_order, 3526 }; 3527 bool use_trace = true; 3528 struct evsel *evsel; 3529 3530 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 3531 3532 if (evlist == NULL || evlist__empty(evlist)) 3533 goto out_no_evlist; 3534 3535 evlist__for_each_entry(evlist, evsel) { 3536 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 3537 use_trace = false; 3538 break; 3539 } 3540 } 3541 3542 if (use_trace) { 3543 sort__mode = SORT_MODE__TRACEPOINT; 3544 if (symbol_conf.raw_trace) 3545 return "trace_fields"; 3546 } 3547 out_no_evlist: 3548 return default_sort_orders[sort__mode]; 3549 } 3550 3551 static int setup_sort_order(struct evlist *evlist) 3552 { 3553 char *new_sort_order; 3554 3555 /* 3556 * Append '+'-prefixed sort order to the default sort 3557 * order string. 3558 */ 3559 if (!sort_order || is_strict_order(sort_order)) 3560 return 0; 3561 3562 if (sort_order[1] == '\0') { 3563 ui__error("Invalid --sort key: `+'"); 3564 return -EINVAL; 3565 } 3566 3567 /* 3568 * We allocate new sort_order string, but we never free it, 3569 * because it's checked over the rest of the code. 3570 */ 3571 if (asprintf(&new_sort_order, "%s,%s", 3572 get_default_sort_order(evlist), sort_order + 1) < 0) { 3573 pr_err("Not enough memory to set up --sort"); 3574 return -ENOMEM; 3575 } 3576 3577 sort_order = new_sort_order; 3578 return 0; 3579 } 3580 3581 /* 3582 * Adds 'pre,' prefix into 'str' is 'pre' is 3583 * not already part of 'str'. 3584 */ 3585 static char *prefix_if_not_in(const char *pre, char *str) 3586 { 3587 char *n; 3588 3589 if (!str || strstr(str, pre)) 3590 return str; 3591 3592 if (asprintf(&n, "%s,%s", pre, str) < 0) 3593 n = NULL; 3594 3595 free(str); 3596 return n; 3597 } 3598 3599 static char *setup_overhead(char *keys) 3600 { 3601 if (sort__mode == SORT_MODE__DIFF) 3602 return keys; 3603 3604 keys = prefix_if_not_in("overhead", keys); 3605 3606 if (symbol_conf.cumulate_callchain) 3607 keys = prefix_if_not_in("overhead_children", keys); 3608 3609 return keys; 3610 } 3611 3612 static int __setup_sorting(struct evlist *evlist) 3613 { 3614 char *str; 3615 const char *sort_keys; 3616 int ret = 0; 3617 3618 ret = setup_sort_order(evlist); 3619 if (ret) 3620 return ret; 3621 3622 sort_keys = sort_order; 3623 if (sort_keys == NULL) { 3624 if (is_strict_order(field_order)) { 3625 /* 3626 * If user specified field order but no sort order, 3627 * we'll honor it and not add default sort orders. 3628 */ 3629 return 0; 3630 } 3631 3632 sort_keys = get_default_sort_order(evlist); 3633 } 3634 3635 str = strdup(sort_keys); 3636 if (str == NULL) { 3637 pr_err("Not enough memory to setup sort keys"); 3638 return -ENOMEM; 3639 } 3640 3641 /* 3642 * Prepend overhead fields for backward compatibility. 3643 */ 3644 if (!is_strict_order(field_order)) { 3645 str = setup_overhead(str); 3646 if (str == NULL) { 3647 pr_err("Not enough memory to setup overhead keys"); 3648 return -ENOMEM; 3649 } 3650 } 3651 3652 ret = setup_sort_list(&perf_hpp_list, str, evlist); 3653 3654 free(str); 3655 return ret; 3656 } 3657 3658 void perf_hpp__set_elide(int idx, bool elide) 3659 { 3660 struct perf_hpp_fmt *fmt; 3661 struct hpp_sort_entry *hse; 3662 3663 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3664 if (!perf_hpp__is_sort_entry(fmt)) 3665 continue; 3666 3667 hse = container_of(fmt, struct hpp_sort_entry, hpp); 3668 if (hse->se->se_width_idx == idx) { 3669 fmt->elide = elide; 3670 break; 3671 } 3672 } 3673 } 3674 3675 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 3676 { 3677 if (list && strlist__nr_entries(list) == 1) { 3678 if (fp != NULL) 3679 fprintf(fp, "# %s: %s\n", list_name, 3680 strlist__entry(list, 0)->s); 3681 return true; 3682 } 3683 return false; 3684 } 3685 3686 static bool get_elide(int idx, FILE *output) 3687 { 3688 switch (idx) { 3689 case HISTC_SYMBOL: 3690 return __get_elide(symbol_conf.sym_list, "symbol", output); 3691 case HISTC_DSO: 3692 return __get_elide(symbol_conf.dso_list, "dso", output); 3693 case HISTC_COMM: 3694 return __get_elide(symbol_conf.comm_list, "comm", output); 3695 default: 3696 break; 3697 } 3698 3699 if (sort__mode != SORT_MODE__BRANCH) 3700 return false; 3701 3702 switch (idx) { 3703 case HISTC_SYMBOL_FROM: 3704 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 3705 case HISTC_SYMBOL_TO: 3706 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 3707 case HISTC_DSO_FROM: 3708 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 3709 case HISTC_DSO_TO: 3710 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 3711 case HISTC_ADDR_FROM: 3712 return __get_elide(symbol_conf.sym_from_list, "addr_from", output); 3713 case HISTC_ADDR_TO: 3714 return __get_elide(symbol_conf.sym_to_list, "addr_to", output); 3715 default: 3716 break; 3717 } 3718 3719 return false; 3720 } 3721 3722 void sort__setup_elide(FILE *output) 3723 { 3724 struct perf_hpp_fmt *fmt; 3725 struct hpp_sort_entry *hse; 3726 3727 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3728 if (!perf_hpp__is_sort_entry(fmt)) 3729 continue; 3730 3731 hse = container_of(fmt, struct hpp_sort_entry, hpp); 3732 fmt->elide = get_elide(hse->se->se_width_idx, output); 3733 } 3734 3735 /* 3736 * It makes no sense to elide all of sort entries. 3737 * Just revert them to show up again. 3738 */ 3739 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3740 if (!perf_hpp__is_sort_entry(fmt)) 3741 continue; 3742 3743 if (!fmt->elide) 3744 return; 3745 } 3746 3747 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3748 if (!perf_hpp__is_sort_entry(fmt)) 3749 continue; 3750 3751 fmt->elide = false; 3752 } 3753 } 3754 3755 int output_field_add(struct perf_hpp_list *list, const char *tok) 3756 { 3757 unsigned int i; 3758 3759 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 3760 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 3761 3762 if (strncasecmp(tok, hd->name, strlen(tok))) 3763 continue; 3764 3765 if (!strcasecmp(tok, "weight")) 3766 ui__warning("--fields weight shows the average value unlike in the --sort key.\n"); 3767 3768 return __hpp_dimension__add_output(list, hd); 3769 } 3770 3771 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 3772 struct sort_dimension *sd = &common_sort_dimensions[i]; 3773 3774 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3775 continue; 3776 3777 return __sort_dimension__add_output(list, sd); 3778 } 3779 3780 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 3781 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 3782 3783 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3784 continue; 3785 3786 if (sort__mode != SORT_MODE__BRANCH) 3787 return -EINVAL; 3788 3789 return __sort_dimension__add_output(list, sd); 3790 } 3791 3792 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 3793 struct sort_dimension *sd = &memory_sort_dimensions[i]; 3794 3795 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3796 continue; 3797 3798 if (sort__mode != SORT_MODE__MEMORY) 3799 return -EINVAL; 3800 3801 return __sort_dimension__add_output(list, sd); 3802 } 3803 3804 return -ESRCH; 3805 } 3806 3807 static int setup_output_list(struct perf_hpp_list *list, char *str) 3808 { 3809 char *tmp, *tok; 3810 int ret = 0; 3811 3812 for (tok = strtok_r(str, ", ", &tmp); 3813 tok; tok = strtok_r(NULL, ", ", &tmp)) { 3814 ret = output_field_add(list, tok); 3815 if (ret == -EINVAL) { 3816 ui__error("Invalid --fields key: `%s'", tok); 3817 break; 3818 } else if (ret == -ESRCH) { 3819 ui__error("Unknown --fields key: `%s'", tok); 3820 break; 3821 } 3822 } 3823 3824 return ret; 3825 } 3826 3827 void reset_dimensions(void) 3828 { 3829 unsigned int i; 3830 3831 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 3832 common_sort_dimensions[i].taken = 0; 3833 3834 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 3835 hpp_sort_dimensions[i].taken = 0; 3836 3837 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 3838 bstack_sort_dimensions[i].taken = 0; 3839 3840 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 3841 memory_sort_dimensions[i].taken = 0; 3842 } 3843 3844 bool is_strict_order(const char *order) 3845 { 3846 return order && (*order != '+'); 3847 } 3848 3849 static int __setup_output_field(void) 3850 { 3851 char *str, *strp; 3852 int ret = -EINVAL; 3853 3854 if (field_order == NULL) 3855 return 0; 3856 3857 strp = str = strdup(field_order); 3858 if (str == NULL) { 3859 pr_err("Not enough memory to setup output fields"); 3860 return -ENOMEM; 3861 } 3862 3863 if (!is_strict_order(field_order)) 3864 strp++; 3865 3866 if (!strlen(strp)) { 3867 ui__error("Invalid --fields key: `+'"); 3868 goto out; 3869 } 3870 3871 ret = setup_output_list(&perf_hpp_list, strp); 3872 3873 out: 3874 free(str); 3875 return ret; 3876 } 3877 3878 int setup_sorting(struct evlist *evlist) 3879 { 3880 int err; 3881 3882 err = __setup_sorting(evlist); 3883 if (err < 0) 3884 return err; 3885 3886 if (parent_pattern != default_parent_pattern) { 3887 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 3888 if (err < 0) 3889 return err; 3890 } 3891 3892 reset_dimensions(); 3893 3894 /* 3895 * perf diff doesn't use default hpp output fields. 3896 */ 3897 if (sort__mode != SORT_MODE__DIFF) 3898 perf_hpp__init(); 3899 3900 err = __setup_output_field(); 3901 if (err < 0) 3902 return err; 3903 3904 /* copy sort keys to output fields */ 3905 perf_hpp__setup_output_field(&perf_hpp_list); 3906 /* and then copy output fields to sort keys */ 3907 perf_hpp__append_sort_keys(&perf_hpp_list); 3908 3909 /* setup hists-specific output fields */ 3910 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 3911 return -1; 3912 3913 return 0; 3914 } 3915 3916 void reset_output_field(void) 3917 { 3918 perf_hpp_list.need_collapse = 0; 3919 perf_hpp_list.parent = 0; 3920 perf_hpp_list.sym = 0; 3921 perf_hpp_list.dso = 0; 3922 3923 field_order = NULL; 3924 sort_order = NULL; 3925 3926 reset_dimensions(); 3927 perf_hpp__reset_output_field(&perf_hpp_list); 3928 } 3929 3930 #define INDENT (3*8 + 1) 3931 3932 static void add_key(struct strbuf *sb, const char *str, int *llen) 3933 { 3934 if (!str) 3935 return; 3936 3937 if (*llen >= 75) { 3938 strbuf_addstr(sb, "\n\t\t\t "); 3939 *llen = INDENT; 3940 } 3941 strbuf_addf(sb, " %s", str); 3942 *llen += strlen(str) + 1; 3943 } 3944 3945 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n, 3946 int *llen) 3947 { 3948 int i; 3949 3950 for (i = 0; i < n; i++) 3951 add_key(sb, s[i].name, llen); 3952 } 3953 3954 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n, 3955 int *llen) 3956 { 3957 int i; 3958 3959 for (i = 0; i < n; i++) 3960 add_key(sb, s[i].name, llen); 3961 } 3962 3963 char *sort_help(const char *prefix) 3964 { 3965 struct strbuf sb; 3966 char *s; 3967 int len = strlen(prefix) + INDENT; 3968 3969 strbuf_init(&sb, 300); 3970 strbuf_addstr(&sb, prefix); 3971 add_hpp_sort_string(&sb, hpp_sort_dimensions, 3972 ARRAY_SIZE(hpp_sort_dimensions), &len); 3973 add_sort_string(&sb, common_sort_dimensions, 3974 ARRAY_SIZE(common_sort_dimensions), &len); 3975 add_sort_string(&sb, bstack_sort_dimensions, 3976 ARRAY_SIZE(bstack_sort_dimensions), &len); 3977 add_sort_string(&sb, memory_sort_dimensions, 3978 ARRAY_SIZE(memory_sort_dimensions), &len); 3979 s = strbuf_detach(&sb, NULL); 3980 strbuf_release(&sb); 3981 return s; 3982 } 3983