1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <regex.h> 5 #include <stdlib.h> 6 #include <linux/mman.h> 7 #include <linux/time64.h> 8 #include "debug.h" 9 #include "dso.h" 10 #include "sort.h" 11 #include "hist.h" 12 #include "cacheline.h" 13 #include "comm.h" 14 #include "map.h" 15 #include "maps.h" 16 #include "symbol.h" 17 #include "map_symbol.h" 18 #include "branch.h" 19 #include "thread.h" 20 #include "evsel.h" 21 #include "evlist.h" 22 #include "srcline.h" 23 #include "strlist.h" 24 #include "strbuf.h" 25 #include "mem-events.h" 26 #include "annotate.h" 27 #include "annotate-data.h" 28 #include "event.h" 29 #include "time-utils.h" 30 #include "cgroup.h" 31 #include "machine.h" 32 #include "trace-event.h" 33 #include <linux/kernel.h> 34 #include <linux/string.h> 35 36 #ifdef HAVE_LIBTRACEEVENT 37 #include <traceevent/event-parse.h> 38 #endif 39 40 regex_t parent_regex; 41 const char default_parent_pattern[] = "^sys_|^do_page_fault"; 42 const char *parent_pattern = default_parent_pattern; 43 const char *default_sort_order = "comm,dso,symbol"; 44 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 45 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc"; 46 const char default_top_sort_order[] = "dso,symbol"; 47 const char default_diff_sort_order[] = "dso,symbol"; 48 const char default_tracepoint_sort_order[] = "trace"; 49 const char *sort_order; 50 const char *field_order; 51 regex_t ignore_callees_regex; 52 int have_ignore_callees = 0; 53 enum sort_mode sort__mode = SORT_MODE__NORMAL; 54 static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"}; 55 static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"}; 56 57 /* 58 * Some architectures have Adjacent Cacheline Prefetch feature, which 59 * behaves like the cacheline size is doubled. Enable this flag to 60 * check things in double cacheline granularity. 61 */ 62 bool chk_double_cl; 63 64 /* 65 * Replaces all occurrences of a char used with the: 66 * 67 * -t, --field-separator 68 * 69 * option, that uses a special separator character and don't pad with spaces, 70 * replacing all occurrences of this separator in symbol names (and other 71 * output) with a '.' character, that thus it's the only non valid separator. 72 */ 73 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 74 { 75 int n; 76 va_list ap; 77 78 va_start(ap, fmt); 79 n = vsnprintf(bf, size, fmt, ap); 80 if (symbol_conf.field_sep && n > 0) { 81 char *sep = bf; 82 83 while (1) { 84 sep = strchr(sep, *symbol_conf.field_sep); 85 if (sep == NULL) 86 break; 87 *sep = '.'; 88 } 89 } 90 va_end(ap); 91 92 if (n >= (int)size) 93 return size - 1; 94 return n; 95 } 96 97 static int64_t cmp_null(const void *l, const void *r) 98 { 99 if (!l && !r) 100 return 0; 101 else if (!l) 102 return -1; 103 else 104 return 1; 105 } 106 107 /* --sort pid */ 108 109 static int64_t 110 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 111 { 112 return thread__tid(right->thread) - thread__tid(left->thread); 113 } 114 115 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 116 size_t size, unsigned int width) 117 { 118 const char *comm = thread__comm_str(he->thread); 119 120 width = max(7U, width) - 8; 121 return repsep_snprintf(bf, size, "%7d:%-*.*s", thread__tid(he->thread), 122 width, width, comm ?: ""); 123 } 124 125 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 126 { 127 const struct thread *th = arg; 128 129 if (type != HIST_FILTER__THREAD) 130 return -1; 131 132 return th && !RC_CHK_EQUAL(he->thread, th); 133 } 134 135 struct sort_entry sort_thread = { 136 .se_header = " Pid:Command", 137 .se_cmp = sort__thread_cmp, 138 .se_snprintf = hist_entry__thread_snprintf, 139 .se_filter = hist_entry__thread_filter, 140 .se_width_idx = HISTC_THREAD, 141 }; 142 143 /* --sort simd */ 144 145 static int64_t 146 sort__simd_cmp(struct hist_entry *left, struct hist_entry *right) 147 { 148 if (left->simd_flags.arch != right->simd_flags.arch) 149 return (int64_t) left->simd_flags.arch - right->simd_flags.arch; 150 151 return (int64_t) left->simd_flags.pred - right->simd_flags.pred; 152 } 153 154 static const char *hist_entry__get_simd_name(struct simd_flags *simd_flags) 155 { 156 u64 arch = simd_flags->arch; 157 158 if (arch & SIMD_OP_FLAGS_ARCH_SVE) 159 return "SVE"; 160 else 161 return "n/a"; 162 } 163 164 static int hist_entry__simd_snprintf(struct hist_entry *he, char *bf, 165 size_t size, unsigned int width __maybe_unused) 166 { 167 const char *name; 168 169 if (!he->simd_flags.arch) 170 return repsep_snprintf(bf, size, ""); 171 172 name = hist_entry__get_simd_name(&he->simd_flags); 173 174 if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_EMPTY) 175 return repsep_snprintf(bf, size, "[e] %s", name); 176 else if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_PARTIAL) 177 return repsep_snprintf(bf, size, "[p] %s", name); 178 179 return repsep_snprintf(bf, size, "[.] %s", name); 180 } 181 182 struct sort_entry sort_simd = { 183 .se_header = "Simd ", 184 .se_cmp = sort__simd_cmp, 185 .se_snprintf = hist_entry__simd_snprintf, 186 .se_width_idx = HISTC_SIMD, 187 }; 188 189 /* --sort comm */ 190 191 /* 192 * We can't use pointer comparison in functions below, 193 * because it gives different results based on pointer 194 * values, which could break some sorting assumptions. 195 */ 196 static int64_t 197 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 198 { 199 return strcmp(comm__str(right->comm), comm__str(left->comm)); 200 } 201 202 static int64_t 203 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 204 { 205 return strcmp(comm__str(right->comm), comm__str(left->comm)); 206 } 207 208 static int64_t 209 sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 210 { 211 return strcmp(comm__str(right->comm), comm__str(left->comm)); 212 } 213 214 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 215 size_t size, unsigned int width) 216 { 217 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 218 } 219 220 struct sort_entry sort_comm = { 221 .se_header = "Command", 222 .se_cmp = sort__comm_cmp, 223 .se_collapse = sort__comm_collapse, 224 .se_sort = sort__comm_sort, 225 .se_snprintf = hist_entry__comm_snprintf, 226 .se_filter = hist_entry__thread_filter, 227 .se_width_idx = HISTC_COMM, 228 }; 229 230 /* --sort dso */ 231 232 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 233 { 234 struct dso *dso_l = map_l ? map__dso(map_l) : NULL; 235 struct dso *dso_r = map_r ? map__dso(map_r) : NULL; 236 const char *dso_name_l, *dso_name_r; 237 238 if (!dso_l || !dso_r) 239 return cmp_null(dso_r, dso_l); 240 241 if (verbose > 0) { 242 dso_name_l = dso_l->long_name; 243 dso_name_r = dso_r->long_name; 244 } else { 245 dso_name_l = dso_l->short_name; 246 dso_name_r = dso_r->short_name; 247 } 248 249 return strcmp(dso_name_l, dso_name_r); 250 } 251 252 static int64_t 253 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 254 { 255 return _sort__dso_cmp(right->ms.map, left->ms.map); 256 } 257 258 static int _hist_entry__dso_snprintf(struct map *map, char *bf, 259 size_t size, unsigned int width) 260 { 261 const struct dso *dso = map ? map__dso(map) : NULL; 262 const char *dso_name = "[unknown]"; 263 264 if (dso) 265 dso_name = verbose > 0 ? dso->long_name : dso->short_name; 266 267 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 268 } 269 270 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 271 size_t size, unsigned int width) 272 { 273 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 274 } 275 276 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 277 { 278 const struct dso *dso = arg; 279 280 if (type != HIST_FILTER__DSO) 281 return -1; 282 283 return dso && (!he->ms.map || map__dso(he->ms.map) != dso); 284 } 285 286 struct sort_entry sort_dso = { 287 .se_header = "Shared Object", 288 .se_cmp = sort__dso_cmp, 289 .se_snprintf = hist_entry__dso_snprintf, 290 .se_filter = hist_entry__dso_filter, 291 .se_width_idx = HISTC_DSO, 292 }; 293 294 /* --sort symbol */ 295 296 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 297 { 298 return (int64_t)(right_ip - left_ip); 299 } 300 301 int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 302 { 303 if (!sym_l || !sym_r) 304 return cmp_null(sym_l, sym_r); 305 306 if (sym_l == sym_r) 307 return 0; 308 309 if (sym_l->inlined || sym_r->inlined) { 310 int ret = strcmp(sym_l->name, sym_r->name); 311 312 if (ret) 313 return ret; 314 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start)) 315 return 0; 316 } 317 318 if (sym_l->start != sym_r->start) 319 return (int64_t)(sym_r->start - sym_l->start); 320 321 return (int64_t)(sym_r->end - sym_l->end); 322 } 323 324 static int64_t 325 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 326 { 327 int64_t ret; 328 329 if (!left->ms.sym && !right->ms.sym) 330 return _sort__addr_cmp(left->ip, right->ip); 331 332 /* 333 * comparing symbol address alone is not enough since it's a 334 * relative address within a dso. 335 */ 336 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) { 337 ret = sort__dso_cmp(left, right); 338 if (ret != 0) 339 return ret; 340 } 341 342 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 343 } 344 345 static int64_t 346 sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 347 { 348 if (!left->ms.sym || !right->ms.sym) 349 return cmp_null(left->ms.sym, right->ms.sym); 350 351 return strcmp(right->ms.sym->name, left->ms.sym->name); 352 } 353 354 static int _hist_entry__sym_snprintf(struct map_symbol *ms, 355 u64 ip, char level, char *bf, size_t size, 356 unsigned int width) 357 { 358 struct symbol *sym = ms->sym; 359 struct map *map = ms->map; 360 size_t ret = 0; 361 362 if (verbose > 0) { 363 struct dso *dso = map ? map__dso(map) : NULL; 364 char o = dso ? dso__symtab_origin(dso) : '!'; 365 u64 rip = ip; 366 367 if (dso && dso->kernel && dso->adjust_symbols) 368 rip = map__unmap_ip(map, ip); 369 370 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 371 BITS_PER_LONG / 4 + 2, rip, o); 372 } 373 374 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 375 if (sym && map) { 376 if (sym->type == STT_OBJECT) { 377 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 378 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 379 ip - map__unmap_ip(map, sym->start)); 380 } else { 381 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 382 width - ret, 383 sym->name); 384 if (sym->inlined) 385 ret += repsep_snprintf(bf + ret, size - ret, 386 " (inlined)"); 387 } 388 } else { 389 size_t len = BITS_PER_LONG / 4; 390 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 391 len, ip); 392 } 393 394 return ret; 395 } 396 397 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) 398 { 399 return _hist_entry__sym_snprintf(&he->ms, he->ip, 400 he->level, bf, size, width); 401 } 402 403 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 404 { 405 const char *sym = arg; 406 407 if (type != HIST_FILTER__SYMBOL) 408 return -1; 409 410 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 411 } 412 413 struct sort_entry sort_sym = { 414 .se_header = "Symbol", 415 .se_cmp = sort__sym_cmp, 416 .se_sort = sort__sym_sort, 417 .se_snprintf = hist_entry__sym_snprintf, 418 .se_filter = hist_entry__sym_filter, 419 .se_width_idx = HISTC_SYMBOL, 420 }; 421 422 /* --sort symoff */ 423 424 static int64_t 425 sort__symoff_cmp(struct hist_entry *left, struct hist_entry *right) 426 { 427 int64_t ret; 428 429 ret = sort__sym_cmp(left, right); 430 if (ret) 431 return ret; 432 433 return left->ip - right->ip; 434 } 435 436 static int64_t 437 sort__symoff_sort(struct hist_entry *left, struct hist_entry *right) 438 { 439 int64_t ret; 440 441 ret = sort__sym_sort(left, right); 442 if (ret) 443 return ret; 444 445 return left->ip - right->ip; 446 } 447 448 static int 449 hist_entry__symoff_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) 450 { 451 struct symbol *sym = he->ms.sym; 452 453 if (sym == NULL) 454 return repsep_snprintf(bf, size, "[%c] %-#.*llx", he->level, width - 4, he->ip); 455 456 return repsep_snprintf(bf, size, "[%c] %s+0x%llx", he->level, sym->name, he->ip - sym->start); 457 } 458 459 struct sort_entry sort_sym_offset = { 460 .se_header = "Symbol Offset", 461 .se_cmp = sort__symoff_cmp, 462 .se_sort = sort__symoff_sort, 463 .se_snprintf = hist_entry__symoff_snprintf, 464 .se_filter = hist_entry__sym_filter, 465 .se_width_idx = HISTC_SYMBOL_OFFSET, 466 }; 467 468 /* --sort srcline */ 469 470 char *hist_entry__srcline(struct hist_entry *he) 471 { 472 return map__srcline(he->ms.map, he->ip, he->ms.sym); 473 } 474 475 static int64_t 476 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 477 { 478 int64_t ret; 479 480 ret = _sort__addr_cmp(left->ip, right->ip); 481 if (ret) 482 return ret; 483 484 return sort__dso_cmp(left, right); 485 } 486 487 static int64_t 488 sort__srcline_collapse(struct hist_entry *left, struct hist_entry *right) 489 { 490 if (!left->srcline) 491 left->srcline = hist_entry__srcline(left); 492 if (!right->srcline) 493 right->srcline = hist_entry__srcline(right); 494 495 return strcmp(right->srcline, left->srcline); 496 } 497 498 static int64_t 499 sort__srcline_sort(struct hist_entry *left, struct hist_entry *right) 500 { 501 return sort__srcline_collapse(left, right); 502 } 503 504 static void 505 sort__srcline_init(struct hist_entry *he) 506 { 507 if (!he->srcline) 508 he->srcline = hist_entry__srcline(he); 509 } 510 511 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 512 size_t size, unsigned int width) 513 { 514 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 515 } 516 517 struct sort_entry sort_srcline = { 518 .se_header = "Source:Line", 519 .se_cmp = sort__srcline_cmp, 520 .se_collapse = sort__srcline_collapse, 521 .se_sort = sort__srcline_sort, 522 .se_init = sort__srcline_init, 523 .se_snprintf = hist_entry__srcline_snprintf, 524 .se_width_idx = HISTC_SRCLINE, 525 }; 526 527 /* --sort srcline_from */ 528 529 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams) 530 { 531 return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym); 532 } 533 534 static int64_t 535 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 536 { 537 return left->branch_info->from.addr - right->branch_info->from.addr; 538 } 539 540 static int64_t 541 sort__srcline_from_collapse(struct hist_entry *left, struct hist_entry *right) 542 { 543 if (!left->branch_info->srcline_from) 544 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from); 545 546 if (!right->branch_info->srcline_from) 547 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from); 548 549 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 550 } 551 552 static int64_t 553 sort__srcline_from_sort(struct hist_entry *left, struct hist_entry *right) 554 { 555 return sort__srcline_from_collapse(left, right); 556 } 557 558 static void sort__srcline_from_init(struct hist_entry *he) 559 { 560 if (!he->branch_info->srcline_from) 561 he->branch_info->srcline_from = addr_map_symbol__srcline(&he->branch_info->from); 562 } 563 564 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 565 size_t size, unsigned int width) 566 { 567 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 568 } 569 570 struct sort_entry sort_srcline_from = { 571 .se_header = "From Source:Line", 572 .se_cmp = sort__srcline_from_cmp, 573 .se_collapse = sort__srcline_from_collapse, 574 .se_sort = sort__srcline_from_sort, 575 .se_init = sort__srcline_from_init, 576 .se_snprintf = hist_entry__srcline_from_snprintf, 577 .se_width_idx = HISTC_SRCLINE_FROM, 578 }; 579 580 /* --sort srcline_to */ 581 582 static int64_t 583 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 584 { 585 return left->branch_info->to.addr - right->branch_info->to.addr; 586 } 587 588 static int64_t 589 sort__srcline_to_collapse(struct hist_entry *left, struct hist_entry *right) 590 { 591 if (!left->branch_info->srcline_to) 592 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to); 593 594 if (!right->branch_info->srcline_to) 595 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to); 596 597 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 598 } 599 600 static int64_t 601 sort__srcline_to_sort(struct hist_entry *left, struct hist_entry *right) 602 { 603 return sort__srcline_to_collapse(left, right); 604 } 605 606 static void sort__srcline_to_init(struct hist_entry *he) 607 { 608 if (!he->branch_info->srcline_to) 609 he->branch_info->srcline_to = addr_map_symbol__srcline(&he->branch_info->to); 610 } 611 612 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 613 size_t size, unsigned int width) 614 { 615 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 616 } 617 618 struct sort_entry sort_srcline_to = { 619 .se_header = "To Source:Line", 620 .se_cmp = sort__srcline_to_cmp, 621 .se_collapse = sort__srcline_to_collapse, 622 .se_sort = sort__srcline_to_sort, 623 .se_init = sort__srcline_to_init, 624 .se_snprintf = hist_entry__srcline_to_snprintf, 625 .se_width_idx = HISTC_SRCLINE_TO, 626 }; 627 628 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf, 629 size_t size, unsigned int width) 630 { 631 632 struct symbol *sym = he->ms.sym; 633 struct annotated_branch *branch; 634 double ipc = 0.0, coverage = 0.0; 635 char tmp[64]; 636 637 if (!sym) 638 return repsep_snprintf(bf, size, "%-*s", width, "-"); 639 640 branch = symbol__annotation(sym)->branch; 641 642 if (branch && branch->hit_cycles) 643 ipc = branch->hit_insn / ((double)branch->hit_cycles); 644 645 if (branch && branch->total_insn) { 646 coverage = branch->cover_insn * 100.0 / 647 ((double)branch->total_insn); 648 } 649 650 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage); 651 return repsep_snprintf(bf, size, "%-*s", width, tmp); 652 } 653 654 struct sort_entry sort_sym_ipc = { 655 .se_header = "IPC [IPC Coverage]", 656 .se_cmp = sort__sym_cmp, 657 .se_snprintf = hist_entry__sym_ipc_snprintf, 658 .se_width_idx = HISTC_SYMBOL_IPC, 659 }; 660 661 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he 662 __maybe_unused, 663 char *bf, size_t size, 664 unsigned int width) 665 { 666 char tmp[64]; 667 668 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-"); 669 return repsep_snprintf(bf, size, "%-*s", width, tmp); 670 } 671 672 struct sort_entry sort_sym_ipc_null = { 673 .se_header = "IPC [IPC Coverage]", 674 .se_cmp = sort__sym_cmp, 675 .se_snprintf = hist_entry__sym_ipc_null_snprintf, 676 .se_width_idx = HISTC_SYMBOL_IPC, 677 }; 678 679 /* --sort srcfile */ 680 681 static char no_srcfile[1]; 682 683 static char *hist_entry__get_srcfile(struct hist_entry *e) 684 { 685 char *sf, *p; 686 struct map *map = e->ms.map; 687 688 if (!map) 689 return no_srcfile; 690 691 sf = __get_srcline(map__dso(map), map__rip_2objdump(map, e->ip), 692 e->ms.sym, false, true, true, e->ip); 693 if (sf == SRCLINE_UNKNOWN) 694 return no_srcfile; 695 p = strchr(sf, ':'); 696 if (p && *sf) { 697 *p = 0; 698 return sf; 699 } 700 free(sf); 701 return no_srcfile; 702 } 703 704 static int64_t 705 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 706 { 707 return sort__srcline_cmp(left, right); 708 } 709 710 static int64_t 711 sort__srcfile_collapse(struct hist_entry *left, struct hist_entry *right) 712 { 713 if (!left->srcfile) 714 left->srcfile = hist_entry__get_srcfile(left); 715 if (!right->srcfile) 716 right->srcfile = hist_entry__get_srcfile(right); 717 718 return strcmp(right->srcfile, left->srcfile); 719 } 720 721 static int64_t 722 sort__srcfile_sort(struct hist_entry *left, struct hist_entry *right) 723 { 724 return sort__srcfile_collapse(left, right); 725 } 726 727 static void sort__srcfile_init(struct hist_entry *he) 728 { 729 if (!he->srcfile) 730 he->srcfile = hist_entry__get_srcfile(he); 731 } 732 733 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 734 size_t size, unsigned int width) 735 { 736 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 737 } 738 739 struct sort_entry sort_srcfile = { 740 .se_header = "Source File", 741 .se_cmp = sort__srcfile_cmp, 742 .se_collapse = sort__srcfile_collapse, 743 .se_sort = sort__srcfile_sort, 744 .se_init = sort__srcfile_init, 745 .se_snprintf = hist_entry__srcfile_snprintf, 746 .se_width_idx = HISTC_SRCFILE, 747 }; 748 749 /* --sort parent */ 750 751 static int64_t 752 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 753 { 754 struct symbol *sym_l = left->parent; 755 struct symbol *sym_r = right->parent; 756 757 if (!sym_l || !sym_r) 758 return cmp_null(sym_l, sym_r); 759 760 return strcmp(sym_r->name, sym_l->name); 761 } 762 763 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 764 size_t size, unsigned int width) 765 { 766 return repsep_snprintf(bf, size, "%-*.*s", width, width, 767 he->parent ? he->parent->name : "[other]"); 768 } 769 770 struct sort_entry sort_parent = { 771 .se_header = "Parent symbol", 772 .se_cmp = sort__parent_cmp, 773 .se_snprintf = hist_entry__parent_snprintf, 774 .se_width_idx = HISTC_PARENT, 775 }; 776 777 /* --sort cpu */ 778 779 static int64_t 780 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 781 { 782 return right->cpu - left->cpu; 783 } 784 785 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 786 size_t size, unsigned int width) 787 { 788 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 789 } 790 791 struct sort_entry sort_cpu = { 792 .se_header = "CPU", 793 .se_cmp = sort__cpu_cmp, 794 .se_snprintf = hist_entry__cpu_snprintf, 795 .se_width_idx = HISTC_CPU, 796 }; 797 798 /* --sort cgroup_id */ 799 800 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev) 801 { 802 return (int64_t)(right_dev - left_dev); 803 } 804 805 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino) 806 { 807 return (int64_t)(right_ino - left_ino); 808 } 809 810 static int64_t 811 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right) 812 { 813 int64_t ret; 814 815 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev); 816 if (ret != 0) 817 return ret; 818 819 return _sort__cgroup_inode_cmp(right->cgroup_id.ino, 820 left->cgroup_id.ino); 821 } 822 823 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he, 824 char *bf, size_t size, 825 unsigned int width __maybe_unused) 826 { 827 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev, 828 he->cgroup_id.ino); 829 } 830 831 struct sort_entry sort_cgroup_id = { 832 .se_header = "cgroup id (dev/inode)", 833 .se_cmp = sort__cgroup_id_cmp, 834 .se_snprintf = hist_entry__cgroup_id_snprintf, 835 .se_width_idx = HISTC_CGROUP_ID, 836 }; 837 838 /* --sort cgroup */ 839 840 static int64_t 841 sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right) 842 { 843 return right->cgroup - left->cgroup; 844 } 845 846 static int hist_entry__cgroup_snprintf(struct hist_entry *he, 847 char *bf, size_t size, 848 unsigned int width __maybe_unused) 849 { 850 const char *cgrp_name = "N/A"; 851 852 if (he->cgroup) { 853 struct cgroup *cgrp = cgroup__find(maps__machine(he->ms.maps)->env, 854 he->cgroup); 855 if (cgrp != NULL) 856 cgrp_name = cgrp->name; 857 else 858 cgrp_name = "unknown"; 859 } 860 861 return repsep_snprintf(bf, size, "%s", cgrp_name); 862 } 863 864 struct sort_entry sort_cgroup = { 865 .se_header = "Cgroup", 866 .se_cmp = sort__cgroup_cmp, 867 .se_snprintf = hist_entry__cgroup_snprintf, 868 .se_width_idx = HISTC_CGROUP, 869 }; 870 871 /* --sort socket */ 872 873 static int64_t 874 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 875 { 876 return right->socket - left->socket; 877 } 878 879 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 880 size_t size, unsigned int width) 881 { 882 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 883 } 884 885 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 886 { 887 int sk = *(const int *)arg; 888 889 if (type != HIST_FILTER__SOCKET) 890 return -1; 891 892 return sk >= 0 && he->socket != sk; 893 } 894 895 struct sort_entry sort_socket = { 896 .se_header = "Socket", 897 .se_cmp = sort__socket_cmp, 898 .se_snprintf = hist_entry__socket_snprintf, 899 .se_filter = hist_entry__socket_filter, 900 .se_width_idx = HISTC_SOCKET, 901 }; 902 903 /* --sort time */ 904 905 static int64_t 906 sort__time_cmp(struct hist_entry *left, struct hist_entry *right) 907 { 908 return right->time - left->time; 909 } 910 911 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf, 912 size_t size, unsigned int width) 913 { 914 char he_time[32]; 915 916 if (symbol_conf.nanosecs) 917 timestamp__scnprintf_nsec(he->time, he_time, 918 sizeof(he_time)); 919 else 920 timestamp__scnprintf_usec(he->time, he_time, 921 sizeof(he_time)); 922 923 return repsep_snprintf(bf, size, "%-.*s", width, he_time); 924 } 925 926 struct sort_entry sort_time = { 927 .se_header = "Time", 928 .se_cmp = sort__time_cmp, 929 .se_snprintf = hist_entry__time_snprintf, 930 .se_width_idx = HISTC_TIME, 931 }; 932 933 /* --sort trace */ 934 935 #ifdef HAVE_LIBTRACEEVENT 936 static char *get_trace_output(struct hist_entry *he) 937 { 938 struct trace_seq seq; 939 struct evsel *evsel; 940 struct tep_record rec = { 941 .data = he->raw_data, 942 .size = he->raw_size, 943 }; 944 945 evsel = hists_to_evsel(he->hists); 946 947 trace_seq_init(&seq); 948 if (symbol_conf.raw_trace) { 949 tep_print_fields(&seq, he->raw_data, he->raw_size, 950 evsel->tp_format); 951 } else { 952 tep_print_event(evsel->tp_format->tep, 953 &seq, &rec, "%s", TEP_PRINT_INFO); 954 } 955 /* 956 * Trim the buffer, it starts at 4KB and we're not going to 957 * add anything more to this buffer. 958 */ 959 return realloc(seq.buffer, seq.len + 1); 960 } 961 962 static int64_t 963 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 964 { 965 struct evsel *evsel; 966 967 evsel = hists_to_evsel(left->hists); 968 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 969 return 0; 970 971 if (left->trace_output == NULL) 972 left->trace_output = get_trace_output(left); 973 if (right->trace_output == NULL) 974 right->trace_output = get_trace_output(right); 975 976 return strcmp(right->trace_output, left->trace_output); 977 } 978 979 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 980 size_t size, unsigned int width) 981 { 982 struct evsel *evsel; 983 984 evsel = hists_to_evsel(he->hists); 985 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 986 return scnprintf(bf, size, "%-.*s", width, "N/A"); 987 988 if (he->trace_output == NULL) 989 he->trace_output = get_trace_output(he); 990 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 991 } 992 993 struct sort_entry sort_trace = { 994 .se_header = "Trace output", 995 .se_cmp = sort__trace_cmp, 996 .se_snprintf = hist_entry__trace_snprintf, 997 .se_width_idx = HISTC_TRACE, 998 }; 999 #endif /* HAVE_LIBTRACEEVENT */ 1000 1001 /* sort keys for branch stacks */ 1002 1003 static int64_t 1004 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 1005 { 1006 if (!left->branch_info || !right->branch_info) 1007 return cmp_null(left->branch_info, right->branch_info); 1008 1009 return _sort__dso_cmp(left->branch_info->from.ms.map, 1010 right->branch_info->from.ms.map); 1011 } 1012 1013 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 1014 size_t size, unsigned int width) 1015 { 1016 if (he->branch_info) 1017 return _hist_entry__dso_snprintf(he->branch_info->from.ms.map, 1018 bf, size, width); 1019 else 1020 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1021 } 1022 1023 static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 1024 const void *arg) 1025 { 1026 const struct dso *dso = arg; 1027 1028 if (type != HIST_FILTER__DSO) 1029 return -1; 1030 1031 return dso && (!he->branch_info || !he->branch_info->from.ms.map || 1032 map__dso(he->branch_info->from.ms.map) != dso); 1033 } 1034 1035 static int64_t 1036 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 1037 { 1038 if (!left->branch_info || !right->branch_info) 1039 return cmp_null(left->branch_info, right->branch_info); 1040 1041 return _sort__dso_cmp(left->branch_info->to.ms.map, 1042 right->branch_info->to.ms.map); 1043 } 1044 1045 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 1046 size_t size, unsigned int width) 1047 { 1048 if (he->branch_info) 1049 return _hist_entry__dso_snprintf(he->branch_info->to.ms.map, 1050 bf, size, width); 1051 else 1052 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1053 } 1054 1055 static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 1056 const void *arg) 1057 { 1058 const struct dso *dso = arg; 1059 1060 if (type != HIST_FILTER__DSO) 1061 return -1; 1062 1063 return dso && (!he->branch_info || !he->branch_info->to.ms.map || 1064 map__dso(he->branch_info->to.ms.map) != dso); 1065 } 1066 1067 static int64_t 1068 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 1069 { 1070 struct addr_map_symbol *from_l, *from_r; 1071 1072 if (!left->branch_info || !right->branch_info) 1073 return cmp_null(left->branch_info, right->branch_info); 1074 1075 from_l = &left->branch_info->from; 1076 from_r = &right->branch_info->from; 1077 1078 if (!from_l->ms.sym && !from_r->ms.sym) 1079 return _sort__addr_cmp(from_l->addr, from_r->addr); 1080 1081 return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym); 1082 } 1083 1084 static int64_t 1085 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 1086 { 1087 struct addr_map_symbol *to_l, *to_r; 1088 1089 if (!left->branch_info || !right->branch_info) 1090 return cmp_null(left->branch_info, right->branch_info); 1091 1092 to_l = &left->branch_info->to; 1093 to_r = &right->branch_info->to; 1094 1095 if (!to_l->ms.sym && !to_r->ms.sym) 1096 return _sort__addr_cmp(to_l->addr, to_r->addr); 1097 1098 return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym); 1099 } 1100 1101 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 1102 size_t size, unsigned int width) 1103 { 1104 if (he->branch_info) { 1105 struct addr_map_symbol *from = &he->branch_info->from; 1106 1107 return _hist_entry__sym_snprintf(&from->ms, from->al_addr, 1108 from->al_level, bf, size, width); 1109 } 1110 1111 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1112 } 1113 1114 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 1115 size_t size, unsigned int width) 1116 { 1117 if (he->branch_info) { 1118 struct addr_map_symbol *to = &he->branch_info->to; 1119 1120 return _hist_entry__sym_snprintf(&to->ms, to->al_addr, 1121 to->al_level, bf, size, width); 1122 } 1123 1124 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1125 } 1126 1127 static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 1128 const void *arg) 1129 { 1130 const char *sym = arg; 1131 1132 if (type != HIST_FILTER__SYMBOL) 1133 return -1; 1134 1135 return sym && !(he->branch_info && he->branch_info->from.ms.sym && 1136 strstr(he->branch_info->from.ms.sym->name, sym)); 1137 } 1138 1139 static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 1140 const void *arg) 1141 { 1142 const char *sym = arg; 1143 1144 if (type != HIST_FILTER__SYMBOL) 1145 return -1; 1146 1147 return sym && !(he->branch_info && he->branch_info->to.ms.sym && 1148 strstr(he->branch_info->to.ms.sym->name, sym)); 1149 } 1150 1151 struct sort_entry sort_dso_from = { 1152 .se_header = "Source Shared Object", 1153 .se_cmp = sort__dso_from_cmp, 1154 .se_snprintf = hist_entry__dso_from_snprintf, 1155 .se_filter = hist_entry__dso_from_filter, 1156 .se_width_idx = HISTC_DSO_FROM, 1157 }; 1158 1159 struct sort_entry sort_dso_to = { 1160 .se_header = "Target Shared Object", 1161 .se_cmp = sort__dso_to_cmp, 1162 .se_snprintf = hist_entry__dso_to_snprintf, 1163 .se_filter = hist_entry__dso_to_filter, 1164 .se_width_idx = HISTC_DSO_TO, 1165 }; 1166 1167 struct sort_entry sort_sym_from = { 1168 .se_header = "Source Symbol", 1169 .se_cmp = sort__sym_from_cmp, 1170 .se_snprintf = hist_entry__sym_from_snprintf, 1171 .se_filter = hist_entry__sym_from_filter, 1172 .se_width_idx = HISTC_SYMBOL_FROM, 1173 }; 1174 1175 struct sort_entry sort_sym_to = { 1176 .se_header = "Target Symbol", 1177 .se_cmp = sort__sym_to_cmp, 1178 .se_snprintf = hist_entry__sym_to_snprintf, 1179 .se_filter = hist_entry__sym_to_filter, 1180 .se_width_idx = HISTC_SYMBOL_TO, 1181 }; 1182 1183 static int _hist_entry__addr_snprintf(struct map_symbol *ms, 1184 u64 ip, char level, char *bf, size_t size, 1185 unsigned int width) 1186 { 1187 struct symbol *sym = ms->sym; 1188 struct map *map = ms->map; 1189 size_t ret = 0, offs; 1190 1191 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 1192 if (sym && map) { 1193 if (sym->type == STT_OBJECT) { 1194 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 1195 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 1196 ip - map__unmap_ip(map, sym->start)); 1197 } else { 1198 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 1199 width - ret, 1200 sym->name); 1201 offs = ip - sym->start; 1202 if (offs) 1203 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", offs); 1204 } 1205 } else { 1206 size_t len = BITS_PER_LONG / 4; 1207 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 1208 len, ip); 1209 } 1210 1211 return ret; 1212 } 1213 1214 static int hist_entry__addr_from_snprintf(struct hist_entry *he, char *bf, 1215 size_t size, unsigned int width) 1216 { 1217 if (he->branch_info) { 1218 struct addr_map_symbol *from = &he->branch_info->from; 1219 1220 return _hist_entry__addr_snprintf(&from->ms, from->al_addr, 1221 he->level, bf, size, width); 1222 } 1223 1224 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1225 } 1226 1227 static int hist_entry__addr_to_snprintf(struct hist_entry *he, char *bf, 1228 size_t size, unsigned int width) 1229 { 1230 if (he->branch_info) { 1231 struct addr_map_symbol *to = &he->branch_info->to; 1232 1233 return _hist_entry__addr_snprintf(&to->ms, to->al_addr, 1234 he->level, bf, size, width); 1235 } 1236 1237 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1238 } 1239 1240 static int64_t 1241 sort__addr_from_cmp(struct hist_entry *left, struct hist_entry *right) 1242 { 1243 struct addr_map_symbol *from_l; 1244 struct addr_map_symbol *from_r; 1245 int64_t ret; 1246 1247 if (!left->branch_info || !right->branch_info) 1248 return cmp_null(left->branch_info, right->branch_info); 1249 1250 from_l = &left->branch_info->from; 1251 from_r = &right->branch_info->from; 1252 1253 /* 1254 * comparing symbol address alone is not enough since it's a 1255 * relative address within a dso. 1256 */ 1257 ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map); 1258 if (ret != 0) 1259 return ret; 1260 1261 return _sort__addr_cmp(from_l->addr, from_r->addr); 1262 } 1263 1264 static int64_t 1265 sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right) 1266 { 1267 struct addr_map_symbol *to_l; 1268 struct addr_map_symbol *to_r; 1269 int64_t ret; 1270 1271 if (!left->branch_info || !right->branch_info) 1272 return cmp_null(left->branch_info, right->branch_info); 1273 1274 to_l = &left->branch_info->to; 1275 to_r = &right->branch_info->to; 1276 1277 /* 1278 * comparing symbol address alone is not enough since it's a 1279 * relative address within a dso. 1280 */ 1281 ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map); 1282 if (ret != 0) 1283 return ret; 1284 1285 return _sort__addr_cmp(to_l->addr, to_r->addr); 1286 } 1287 1288 struct sort_entry sort_addr_from = { 1289 .se_header = "Source Address", 1290 .se_cmp = sort__addr_from_cmp, 1291 .se_snprintf = hist_entry__addr_from_snprintf, 1292 .se_filter = hist_entry__sym_from_filter, /* shared with sym_from */ 1293 .se_width_idx = HISTC_ADDR_FROM, 1294 }; 1295 1296 struct sort_entry sort_addr_to = { 1297 .se_header = "Target Address", 1298 .se_cmp = sort__addr_to_cmp, 1299 .se_snprintf = hist_entry__addr_to_snprintf, 1300 .se_filter = hist_entry__sym_to_filter, /* shared with sym_to */ 1301 .se_width_idx = HISTC_ADDR_TO, 1302 }; 1303 1304 1305 static int64_t 1306 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 1307 { 1308 unsigned char mp, p; 1309 1310 if (!left->branch_info || !right->branch_info) 1311 return cmp_null(left->branch_info, right->branch_info); 1312 1313 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 1314 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 1315 return mp || p; 1316 } 1317 1318 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 1319 size_t size, unsigned int width){ 1320 static const char *out = "N/A"; 1321 1322 if (he->branch_info) { 1323 if (he->branch_info->flags.predicted) 1324 out = "N"; 1325 else if (he->branch_info->flags.mispred) 1326 out = "Y"; 1327 } 1328 1329 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 1330 } 1331 1332 static int64_t 1333 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 1334 { 1335 if (!left->branch_info || !right->branch_info) 1336 return cmp_null(left->branch_info, right->branch_info); 1337 1338 return left->branch_info->flags.cycles - 1339 right->branch_info->flags.cycles; 1340 } 1341 1342 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 1343 size_t size, unsigned int width) 1344 { 1345 if (!he->branch_info) 1346 return scnprintf(bf, size, "%-.*s", width, "N/A"); 1347 if (he->branch_info->flags.cycles == 0) 1348 return repsep_snprintf(bf, size, "%-*s", width, "-"); 1349 return repsep_snprintf(bf, size, "%-*hd", width, 1350 he->branch_info->flags.cycles); 1351 } 1352 1353 struct sort_entry sort_cycles = { 1354 .se_header = "Basic Block Cycles", 1355 .se_cmp = sort__cycles_cmp, 1356 .se_snprintf = hist_entry__cycles_snprintf, 1357 .se_width_idx = HISTC_CYCLES, 1358 }; 1359 1360 /* --sort daddr_sym */ 1361 int64_t 1362 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1363 { 1364 uint64_t l = 0, r = 0; 1365 1366 if (left->mem_info) 1367 l = left->mem_info->daddr.addr; 1368 if (right->mem_info) 1369 r = right->mem_info->daddr.addr; 1370 1371 return (int64_t)(r - l); 1372 } 1373 1374 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 1375 size_t size, unsigned int width) 1376 { 1377 uint64_t addr = 0; 1378 struct map_symbol *ms = NULL; 1379 1380 if (he->mem_info) { 1381 addr = he->mem_info->daddr.addr; 1382 ms = &he->mem_info->daddr.ms; 1383 } 1384 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); 1385 } 1386 1387 int64_t 1388 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 1389 { 1390 uint64_t l = 0, r = 0; 1391 1392 if (left->mem_info) 1393 l = left->mem_info->iaddr.addr; 1394 if (right->mem_info) 1395 r = right->mem_info->iaddr.addr; 1396 1397 return (int64_t)(r - l); 1398 } 1399 1400 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 1401 size_t size, unsigned int width) 1402 { 1403 uint64_t addr = 0; 1404 struct map_symbol *ms = NULL; 1405 1406 if (he->mem_info) { 1407 addr = he->mem_info->iaddr.addr; 1408 ms = &he->mem_info->iaddr.ms; 1409 } 1410 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); 1411 } 1412 1413 static int64_t 1414 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1415 { 1416 struct map *map_l = NULL; 1417 struct map *map_r = NULL; 1418 1419 if (left->mem_info) 1420 map_l = left->mem_info->daddr.ms.map; 1421 if (right->mem_info) 1422 map_r = right->mem_info->daddr.ms.map; 1423 1424 return _sort__dso_cmp(map_l, map_r); 1425 } 1426 1427 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 1428 size_t size, unsigned int width) 1429 { 1430 struct map *map = NULL; 1431 1432 if (he->mem_info) 1433 map = he->mem_info->daddr.ms.map; 1434 1435 return _hist_entry__dso_snprintf(map, bf, size, width); 1436 } 1437 1438 static int64_t 1439 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 1440 { 1441 union perf_mem_data_src data_src_l; 1442 union perf_mem_data_src data_src_r; 1443 1444 if (left->mem_info) 1445 data_src_l = left->mem_info->data_src; 1446 else 1447 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 1448 1449 if (right->mem_info) 1450 data_src_r = right->mem_info->data_src; 1451 else 1452 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 1453 1454 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 1455 } 1456 1457 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 1458 size_t size, unsigned int width) 1459 { 1460 char out[10]; 1461 1462 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 1463 return repsep_snprintf(bf, size, "%.*s", width, out); 1464 } 1465 1466 static int64_t 1467 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 1468 { 1469 union perf_mem_data_src data_src_l; 1470 union perf_mem_data_src data_src_r; 1471 1472 if (left->mem_info) 1473 data_src_l = left->mem_info->data_src; 1474 else 1475 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 1476 1477 if (right->mem_info) 1478 data_src_r = right->mem_info->data_src; 1479 else 1480 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 1481 1482 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 1483 } 1484 1485 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1486 size_t size, unsigned int width) 1487 { 1488 char out[64]; 1489 1490 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1491 return repsep_snprintf(bf, size, "%-*s", width, out); 1492 } 1493 1494 static int64_t 1495 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1496 { 1497 union perf_mem_data_src data_src_l; 1498 union perf_mem_data_src data_src_r; 1499 1500 if (left->mem_info) 1501 data_src_l = left->mem_info->data_src; 1502 else 1503 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1504 1505 if (right->mem_info) 1506 data_src_r = right->mem_info->data_src; 1507 else 1508 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1509 1510 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1511 } 1512 1513 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1514 size_t size, unsigned int width) 1515 { 1516 char out[64]; 1517 1518 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1519 return repsep_snprintf(bf, size, "%-*s", width, out); 1520 } 1521 1522 static int64_t 1523 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1524 { 1525 union perf_mem_data_src data_src_l; 1526 union perf_mem_data_src data_src_r; 1527 1528 if (left->mem_info) 1529 data_src_l = left->mem_info->data_src; 1530 else 1531 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1532 1533 if (right->mem_info) 1534 data_src_r = right->mem_info->data_src; 1535 else 1536 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1537 1538 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1539 } 1540 1541 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1542 size_t size, unsigned int width) 1543 { 1544 char out[64]; 1545 1546 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1547 return repsep_snprintf(bf, size, "%-*s", width, out); 1548 } 1549 1550 int64_t 1551 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1552 { 1553 u64 l, r; 1554 struct map *l_map, *r_map; 1555 struct dso *l_dso, *r_dso; 1556 int rc; 1557 1558 if (!left->mem_info) return -1; 1559 if (!right->mem_info) return 1; 1560 1561 /* group event types together */ 1562 if (left->cpumode > right->cpumode) return -1; 1563 if (left->cpumode < right->cpumode) return 1; 1564 1565 l_map = left->mem_info->daddr.ms.map; 1566 r_map = right->mem_info->daddr.ms.map; 1567 1568 /* if both are NULL, jump to sort on al_addr instead */ 1569 if (!l_map && !r_map) 1570 goto addr; 1571 1572 if (!l_map) return -1; 1573 if (!r_map) return 1; 1574 1575 l_dso = map__dso(l_map); 1576 r_dso = map__dso(r_map); 1577 rc = dso__cmp_id(l_dso, r_dso); 1578 if (rc) 1579 return rc; 1580 /* 1581 * Addresses with no major/minor numbers are assumed to be 1582 * anonymous in userspace. Sort those on pid then address. 1583 * 1584 * The kernel and non-zero major/minor mapped areas are 1585 * assumed to be unity mapped. Sort those on address. 1586 */ 1587 1588 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1589 (!(map__flags(l_map) & MAP_SHARED)) && !l_dso->id.maj && !l_dso->id.min && 1590 !l_dso->id.ino && !l_dso->id.ino_generation) { 1591 /* userspace anonymous */ 1592 1593 if (thread__pid(left->thread) > thread__pid(right->thread)) 1594 return -1; 1595 if (thread__pid(left->thread) < thread__pid(right->thread)) 1596 return 1; 1597 } 1598 1599 addr: 1600 /* al_addr does all the right addr - start + offset calculations */ 1601 l = cl_address(left->mem_info->daddr.al_addr, chk_double_cl); 1602 r = cl_address(right->mem_info->daddr.al_addr, chk_double_cl); 1603 1604 if (l > r) return -1; 1605 if (l < r) return 1; 1606 1607 return 0; 1608 } 1609 1610 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1611 size_t size, unsigned int width) 1612 { 1613 1614 uint64_t addr = 0; 1615 struct map_symbol *ms = NULL; 1616 char level = he->level; 1617 1618 if (he->mem_info) { 1619 struct map *map = he->mem_info->daddr.ms.map; 1620 struct dso *dso = map ? map__dso(map) : NULL; 1621 1622 addr = cl_address(he->mem_info->daddr.al_addr, chk_double_cl); 1623 ms = &he->mem_info->daddr.ms; 1624 1625 /* print [s] for shared data mmaps */ 1626 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1627 map && !(map__prot(map) & PROT_EXEC) && 1628 (map__flags(map) & MAP_SHARED) && 1629 (dso->id.maj || dso->id.min || dso->id.ino || dso->id.ino_generation)) 1630 level = 's'; 1631 else if (!map) 1632 level = 'X'; 1633 } 1634 return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width); 1635 } 1636 1637 struct sort_entry sort_mispredict = { 1638 .se_header = "Branch Mispredicted", 1639 .se_cmp = sort__mispredict_cmp, 1640 .se_snprintf = hist_entry__mispredict_snprintf, 1641 .se_width_idx = HISTC_MISPREDICT, 1642 }; 1643 1644 static int64_t 1645 sort__weight_cmp(struct hist_entry *left, struct hist_entry *right) 1646 { 1647 return left->weight - right->weight; 1648 } 1649 1650 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1651 size_t size, unsigned int width) 1652 { 1653 return repsep_snprintf(bf, size, "%-*llu", width, he->weight); 1654 } 1655 1656 struct sort_entry sort_local_weight = { 1657 .se_header = "Local Weight", 1658 .se_cmp = sort__weight_cmp, 1659 .se_snprintf = hist_entry__local_weight_snprintf, 1660 .se_width_idx = HISTC_LOCAL_WEIGHT, 1661 }; 1662 1663 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1664 size_t size, unsigned int width) 1665 { 1666 return repsep_snprintf(bf, size, "%-*llu", width, 1667 he->weight * he->stat.nr_events); 1668 } 1669 1670 struct sort_entry sort_global_weight = { 1671 .se_header = "Weight", 1672 .se_cmp = sort__weight_cmp, 1673 .se_snprintf = hist_entry__global_weight_snprintf, 1674 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1675 }; 1676 1677 static int64_t 1678 sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right) 1679 { 1680 return left->ins_lat - right->ins_lat; 1681 } 1682 1683 static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf, 1684 size_t size, unsigned int width) 1685 { 1686 return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat); 1687 } 1688 1689 struct sort_entry sort_local_ins_lat = { 1690 .se_header = "Local INSTR Latency", 1691 .se_cmp = sort__ins_lat_cmp, 1692 .se_snprintf = hist_entry__local_ins_lat_snprintf, 1693 .se_width_idx = HISTC_LOCAL_INS_LAT, 1694 }; 1695 1696 static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf, 1697 size_t size, unsigned int width) 1698 { 1699 return repsep_snprintf(bf, size, "%-*u", width, 1700 he->ins_lat * he->stat.nr_events); 1701 } 1702 1703 struct sort_entry sort_global_ins_lat = { 1704 .se_header = "INSTR Latency", 1705 .se_cmp = sort__ins_lat_cmp, 1706 .se_snprintf = hist_entry__global_ins_lat_snprintf, 1707 .se_width_idx = HISTC_GLOBAL_INS_LAT, 1708 }; 1709 1710 static int64_t 1711 sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right) 1712 { 1713 return left->p_stage_cyc - right->p_stage_cyc; 1714 } 1715 1716 static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf, 1717 size_t size, unsigned int width) 1718 { 1719 return repsep_snprintf(bf, size, "%-*u", width, 1720 he->p_stage_cyc * he->stat.nr_events); 1721 } 1722 1723 1724 static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf, 1725 size_t size, unsigned int width) 1726 { 1727 return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc); 1728 } 1729 1730 struct sort_entry sort_local_p_stage_cyc = { 1731 .se_header = "Local Pipeline Stage Cycle", 1732 .se_cmp = sort__p_stage_cyc_cmp, 1733 .se_snprintf = hist_entry__p_stage_cyc_snprintf, 1734 .se_width_idx = HISTC_LOCAL_P_STAGE_CYC, 1735 }; 1736 1737 struct sort_entry sort_global_p_stage_cyc = { 1738 .se_header = "Pipeline Stage Cycle", 1739 .se_cmp = sort__p_stage_cyc_cmp, 1740 .se_snprintf = hist_entry__global_p_stage_cyc_snprintf, 1741 .se_width_idx = HISTC_GLOBAL_P_STAGE_CYC, 1742 }; 1743 1744 struct sort_entry sort_mem_daddr_sym = { 1745 .se_header = "Data Symbol", 1746 .se_cmp = sort__daddr_cmp, 1747 .se_snprintf = hist_entry__daddr_snprintf, 1748 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1749 }; 1750 1751 struct sort_entry sort_mem_iaddr_sym = { 1752 .se_header = "Code Symbol", 1753 .se_cmp = sort__iaddr_cmp, 1754 .se_snprintf = hist_entry__iaddr_snprintf, 1755 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1756 }; 1757 1758 struct sort_entry sort_mem_daddr_dso = { 1759 .se_header = "Data Object", 1760 .se_cmp = sort__dso_daddr_cmp, 1761 .se_snprintf = hist_entry__dso_daddr_snprintf, 1762 .se_width_idx = HISTC_MEM_DADDR_DSO, 1763 }; 1764 1765 struct sort_entry sort_mem_locked = { 1766 .se_header = "Locked", 1767 .se_cmp = sort__locked_cmp, 1768 .se_snprintf = hist_entry__locked_snprintf, 1769 .se_width_idx = HISTC_MEM_LOCKED, 1770 }; 1771 1772 struct sort_entry sort_mem_tlb = { 1773 .se_header = "TLB access", 1774 .se_cmp = sort__tlb_cmp, 1775 .se_snprintf = hist_entry__tlb_snprintf, 1776 .se_width_idx = HISTC_MEM_TLB, 1777 }; 1778 1779 struct sort_entry sort_mem_lvl = { 1780 .se_header = "Memory access", 1781 .se_cmp = sort__lvl_cmp, 1782 .se_snprintf = hist_entry__lvl_snprintf, 1783 .se_width_idx = HISTC_MEM_LVL, 1784 }; 1785 1786 struct sort_entry sort_mem_snoop = { 1787 .se_header = "Snoop", 1788 .se_cmp = sort__snoop_cmp, 1789 .se_snprintf = hist_entry__snoop_snprintf, 1790 .se_width_idx = HISTC_MEM_SNOOP, 1791 }; 1792 1793 struct sort_entry sort_mem_dcacheline = { 1794 .se_header = "Data Cacheline", 1795 .se_cmp = sort__dcacheline_cmp, 1796 .se_snprintf = hist_entry__dcacheline_snprintf, 1797 .se_width_idx = HISTC_MEM_DCACHELINE, 1798 }; 1799 1800 static int64_t 1801 sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right) 1802 { 1803 union perf_mem_data_src data_src_l; 1804 union perf_mem_data_src data_src_r; 1805 1806 if (left->mem_info) 1807 data_src_l = left->mem_info->data_src; 1808 else 1809 data_src_l.mem_blk = PERF_MEM_BLK_NA; 1810 1811 if (right->mem_info) 1812 data_src_r = right->mem_info->data_src; 1813 else 1814 data_src_r.mem_blk = PERF_MEM_BLK_NA; 1815 1816 return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk); 1817 } 1818 1819 static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf, 1820 size_t size, unsigned int width) 1821 { 1822 char out[16]; 1823 1824 perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info); 1825 return repsep_snprintf(bf, size, "%.*s", width, out); 1826 } 1827 1828 struct sort_entry sort_mem_blocked = { 1829 .se_header = "Blocked", 1830 .se_cmp = sort__blocked_cmp, 1831 .se_snprintf = hist_entry__blocked_snprintf, 1832 .se_width_idx = HISTC_MEM_BLOCKED, 1833 }; 1834 1835 static int64_t 1836 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1837 { 1838 uint64_t l = 0, r = 0; 1839 1840 if (left->mem_info) 1841 l = left->mem_info->daddr.phys_addr; 1842 if (right->mem_info) 1843 r = right->mem_info->daddr.phys_addr; 1844 1845 return (int64_t)(r - l); 1846 } 1847 1848 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf, 1849 size_t size, unsigned int width) 1850 { 1851 uint64_t addr = 0; 1852 size_t ret = 0; 1853 size_t len = BITS_PER_LONG / 4; 1854 1855 addr = he->mem_info->daddr.phys_addr; 1856 1857 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level); 1858 1859 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr); 1860 1861 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, ""); 1862 1863 if (ret > width) 1864 bf[width] = '\0'; 1865 1866 return width; 1867 } 1868 1869 struct sort_entry sort_mem_phys_daddr = { 1870 .se_header = "Data Physical Address", 1871 .se_cmp = sort__phys_daddr_cmp, 1872 .se_snprintf = hist_entry__phys_daddr_snprintf, 1873 .se_width_idx = HISTC_MEM_PHYS_DADDR, 1874 }; 1875 1876 static int64_t 1877 sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right) 1878 { 1879 uint64_t l = 0, r = 0; 1880 1881 if (left->mem_info) 1882 l = left->mem_info->daddr.data_page_size; 1883 if (right->mem_info) 1884 r = right->mem_info->daddr.data_page_size; 1885 1886 return (int64_t)(r - l); 1887 } 1888 1889 static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf, 1890 size_t size, unsigned int width) 1891 { 1892 char str[PAGE_SIZE_NAME_LEN]; 1893 1894 return repsep_snprintf(bf, size, "%-*s", width, 1895 get_page_size_name(he->mem_info->daddr.data_page_size, str)); 1896 } 1897 1898 struct sort_entry sort_mem_data_page_size = { 1899 .se_header = "Data Page Size", 1900 .se_cmp = sort__data_page_size_cmp, 1901 .se_snprintf = hist_entry__data_page_size_snprintf, 1902 .se_width_idx = HISTC_MEM_DATA_PAGE_SIZE, 1903 }; 1904 1905 static int64_t 1906 sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right) 1907 { 1908 uint64_t l = left->code_page_size; 1909 uint64_t r = right->code_page_size; 1910 1911 return (int64_t)(r - l); 1912 } 1913 1914 static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf, 1915 size_t size, unsigned int width) 1916 { 1917 char str[PAGE_SIZE_NAME_LEN]; 1918 1919 return repsep_snprintf(bf, size, "%-*s", width, 1920 get_page_size_name(he->code_page_size, str)); 1921 } 1922 1923 struct sort_entry sort_code_page_size = { 1924 .se_header = "Code Page Size", 1925 .se_cmp = sort__code_page_size_cmp, 1926 .se_snprintf = hist_entry__code_page_size_snprintf, 1927 .se_width_idx = HISTC_CODE_PAGE_SIZE, 1928 }; 1929 1930 static int64_t 1931 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1932 { 1933 if (!left->branch_info || !right->branch_info) 1934 return cmp_null(left->branch_info, right->branch_info); 1935 1936 return left->branch_info->flags.abort != 1937 right->branch_info->flags.abort; 1938 } 1939 1940 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1941 size_t size, unsigned int width) 1942 { 1943 static const char *out = "N/A"; 1944 1945 if (he->branch_info) { 1946 if (he->branch_info->flags.abort) 1947 out = "A"; 1948 else 1949 out = "."; 1950 } 1951 1952 return repsep_snprintf(bf, size, "%-*s", width, out); 1953 } 1954 1955 struct sort_entry sort_abort = { 1956 .se_header = "Transaction abort", 1957 .se_cmp = sort__abort_cmp, 1958 .se_snprintf = hist_entry__abort_snprintf, 1959 .se_width_idx = HISTC_ABORT, 1960 }; 1961 1962 static int64_t 1963 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1964 { 1965 if (!left->branch_info || !right->branch_info) 1966 return cmp_null(left->branch_info, right->branch_info); 1967 1968 return left->branch_info->flags.in_tx != 1969 right->branch_info->flags.in_tx; 1970 } 1971 1972 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1973 size_t size, unsigned int width) 1974 { 1975 static const char *out = "N/A"; 1976 1977 if (he->branch_info) { 1978 if (he->branch_info->flags.in_tx) 1979 out = "T"; 1980 else 1981 out = "."; 1982 } 1983 1984 return repsep_snprintf(bf, size, "%-*s", width, out); 1985 } 1986 1987 struct sort_entry sort_in_tx = { 1988 .se_header = "Branch in transaction", 1989 .se_cmp = sort__in_tx_cmp, 1990 .se_snprintf = hist_entry__in_tx_snprintf, 1991 .se_width_idx = HISTC_IN_TX, 1992 }; 1993 1994 static int64_t 1995 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1996 { 1997 return left->transaction - right->transaction; 1998 } 1999 2000 static inline char *add_str(char *p, const char *str) 2001 { 2002 strcpy(p, str); 2003 return p + strlen(str); 2004 } 2005 2006 static struct txbit { 2007 unsigned flag; 2008 const char *name; 2009 int skip_for_len; 2010 } txbits[] = { 2011 { PERF_TXN_ELISION, "EL ", 0 }, 2012 { PERF_TXN_TRANSACTION, "TX ", 1 }, 2013 { PERF_TXN_SYNC, "SYNC ", 1 }, 2014 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 2015 { PERF_TXN_RETRY, "RETRY ", 0 }, 2016 { PERF_TXN_CONFLICT, "CON ", 0 }, 2017 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 2018 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 2019 { 0, NULL, 0 } 2020 }; 2021 2022 int hist_entry__transaction_len(void) 2023 { 2024 int i; 2025 int len = 0; 2026 2027 for (i = 0; txbits[i].name; i++) { 2028 if (!txbits[i].skip_for_len) 2029 len += strlen(txbits[i].name); 2030 } 2031 len += 4; /* :XX<space> */ 2032 return len; 2033 } 2034 2035 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 2036 size_t size, unsigned int width) 2037 { 2038 u64 t = he->transaction; 2039 char buf[128]; 2040 char *p = buf; 2041 int i; 2042 2043 buf[0] = 0; 2044 for (i = 0; txbits[i].name; i++) 2045 if (txbits[i].flag & t) 2046 p = add_str(p, txbits[i].name); 2047 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 2048 p = add_str(p, "NEITHER "); 2049 if (t & PERF_TXN_ABORT_MASK) { 2050 sprintf(p, ":%" PRIx64, 2051 (t & PERF_TXN_ABORT_MASK) >> 2052 PERF_TXN_ABORT_SHIFT); 2053 p += strlen(p); 2054 } 2055 2056 return repsep_snprintf(bf, size, "%-*s", width, buf); 2057 } 2058 2059 struct sort_entry sort_transaction = { 2060 .se_header = "Transaction ", 2061 .se_cmp = sort__transaction_cmp, 2062 .se_snprintf = hist_entry__transaction_snprintf, 2063 .se_width_idx = HISTC_TRANSACTION, 2064 }; 2065 2066 /* --sort symbol_size */ 2067 2068 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r) 2069 { 2070 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0; 2071 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0; 2072 2073 return size_l < size_r ? -1 : 2074 size_l == size_r ? 0 : 1; 2075 } 2076 2077 static int64_t 2078 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right) 2079 { 2080 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym); 2081 } 2082 2083 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf, 2084 size_t bf_size, unsigned int width) 2085 { 2086 if (sym) 2087 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym)); 2088 2089 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 2090 } 2091 2092 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf, 2093 size_t size, unsigned int width) 2094 { 2095 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width); 2096 } 2097 2098 struct sort_entry sort_sym_size = { 2099 .se_header = "Symbol size", 2100 .se_cmp = sort__sym_size_cmp, 2101 .se_snprintf = hist_entry__sym_size_snprintf, 2102 .se_width_idx = HISTC_SYM_SIZE, 2103 }; 2104 2105 /* --sort dso_size */ 2106 2107 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r) 2108 { 2109 int64_t size_l = map_l != NULL ? map__size(map_l) : 0; 2110 int64_t size_r = map_r != NULL ? map__size(map_r) : 0; 2111 2112 return size_l < size_r ? -1 : 2113 size_l == size_r ? 0 : 1; 2114 } 2115 2116 static int64_t 2117 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right) 2118 { 2119 return _sort__dso_size_cmp(right->ms.map, left->ms.map); 2120 } 2121 2122 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf, 2123 size_t bf_size, unsigned int width) 2124 { 2125 if (map && map__dso(map)) 2126 return repsep_snprintf(bf, bf_size, "%*d", width, map__size(map)); 2127 2128 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 2129 } 2130 2131 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf, 2132 size_t size, unsigned int width) 2133 { 2134 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width); 2135 } 2136 2137 struct sort_entry sort_dso_size = { 2138 .se_header = "DSO size", 2139 .se_cmp = sort__dso_size_cmp, 2140 .se_snprintf = hist_entry__dso_size_snprintf, 2141 .se_width_idx = HISTC_DSO_SIZE, 2142 }; 2143 2144 /* --sort addr */ 2145 2146 static int64_t 2147 sort__addr_cmp(struct hist_entry *left, struct hist_entry *right) 2148 { 2149 u64 left_ip = left->ip; 2150 u64 right_ip = right->ip; 2151 struct map *left_map = left->ms.map; 2152 struct map *right_map = right->ms.map; 2153 2154 if (left_map) 2155 left_ip = map__unmap_ip(left_map, left_ip); 2156 if (right_map) 2157 right_ip = map__unmap_ip(right_map, right_ip); 2158 2159 return _sort__addr_cmp(left_ip, right_ip); 2160 } 2161 2162 static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf, 2163 size_t size, unsigned int width) 2164 { 2165 u64 ip = he->ip; 2166 struct map *map = he->ms.map; 2167 2168 if (map) 2169 ip = map__unmap_ip(map, ip); 2170 2171 return repsep_snprintf(bf, size, "%-#*llx", width, ip); 2172 } 2173 2174 struct sort_entry sort_addr = { 2175 .se_header = "Address", 2176 .se_cmp = sort__addr_cmp, 2177 .se_snprintf = hist_entry__addr_snprintf, 2178 .se_width_idx = HISTC_ADDR, 2179 }; 2180 2181 /* --sort type */ 2182 2183 struct annotated_data_type unknown_type = { 2184 .self = { 2185 .type_name = (char *)"(unknown)", 2186 .children = LIST_HEAD_INIT(unknown_type.self.children), 2187 }, 2188 }; 2189 2190 static int64_t 2191 sort__type_cmp(struct hist_entry *left, struct hist_entry *right) 2192 { 2193 return sort__addr_cmp(left, right); 2194 } 2195 2196 static void sort__type_init(struct hist_entry *he) 2197 { 2198 if (he->mem_type) 2199 return; 2200 2201 he->mem_type = hist_entry__get_data_type(he); 2202 if (he->mem_type == NULL) { 2203 he->mem_type = &unknown_type; 2204 he->mem_type_off = 0; 2205 } 2206 } 2207 2208 static int64_t 2209 sort__type_collapse(struct hist_entry *left, struct hist_entry *right) 2210 { 2211 struct annotated_data_type *left_type = left->mem_type; 2212 struct annotated_data_type *right_type = right->mem_type; 2213 2214 if (!left_type) { 2215 sort__type_init(left); 2216 left_type = left->mem_type; 2217 } 2218 2219 if (!right_type) { 2220 sort__type_init(right); 2221 right_type = right->mem_type; 2222 } 2223 2224 return strcmp(left_type->self.type_name, right_type->self.type_name); 2225 } 2226 2227 static int64_t 2228 sort__type_sort(struct hist_entry *left, struct hist_entry *right) 2229 { 2230 return sort__type_collapse(left, right); 2231 } 2232 2233 static int hist_entry__type_snprintf(struct hist_entry *he, char *bf, 2234 size_t size, unsigned int width) 2235 { 2236 return repsep_snprintf(bf, size, "%-*s", width, he->mem_type->self.type_name); 2237 } 2238 2239 struct sort_entry sort_type = { 2240 .se_header = "Data Type", 2241 .se_cmp = sort__type_cmp, 2242 .se_collapse = sort__type_collapse, 2243 .se_sort = sort__type_sort, 2244 .se_init = sort__type_init, 2245 .se_snprintf = hist_entry__type_snprintf, 2246 .se_width_idx = HISTC_TYPE, 2247 }; 2248 2249 /* --sort typeoff */ 2250 2251 static int64_t 2252 sort__typeoff_sort(struct hist_entry *left, struct hist_entry *right) 2253 { 2254 struct annotated_data_type *left_type = left->mem_type; 2255 struct annotated_data_type *right_type = right->mem_type; 2256 int64_t ret; 2257 2258 if (!left_type) { 2259 sort__type_init(left); 2260 left_type = left->mem_type; 2261 } 2262 2263 if (!right_type) { 2264 sort__type_init(right); 2265 right_type = right->mem_type; 2266 } 2267 2268 ret = strcmp(left_type->self.type_name, right_type->self.type_name); 2269 if (ret) 2270 return ret; 2271 return left->mem_type_off - right->mem_type_off; 2272 } 2273 2274 static void fill_member_name(char *buf, size_t sz, struct annotated_member *m, 2275 int offset, bool first) 2276 { 2277 struct annotated_member *child; 2278 2279 if (list_empty(&m->children)) 2280 return; 2281 2282 list_for_each_entry(child, &m->children, node) { 2283 if (child->offset <= offset && offset < child->offset + child->size) { 2284 int len = 0; 2285 2286 /* It can have anonymous struct/union members */ 2287 if (child->var_name) { 2288 len = scnprintf(buf, sz, "%s%s", 2289 first ? "" : ".", child->var_name); 2290 first = false; 2291 } 2292 2293 fill_member_name(buf + len, sz - len, child, offset, first); 2294 return; 2295 } 2296 } 2297 } 2298 2299 static int hist_entry__typeoff_snprintf(struct hist_entry *he, char *bf, 2300 size_t size, unsigned int width __maybe_unused) 2301 { 2302 struct annotated_data_type *he_type = he->mem_type; 2303 char buf[4096]; 2304 2305 buf[0] = '\0'; 2306 if (list_empty(&he_type->self.children)) 2307 snprintf(buf, sizeof(buf), "no field"); 2308 else 2309 fill_member_name(buf, sizeof(buf), &he_type->self, 2310 he->mem_type_off, true); 2311 buf[4095] = '\0'; 2312 2313 return repsep_snprintf(bf, size, "%s %+d (%s)", he_type->self.type_name, 2314 he->mem_type_off, buf); 2315 } 2316 2317 struct sort_entry sort_type_offset = { 2318 .se_header = "Data Type Offset", 2319 .se_cmp = sort__type_cmp, 2320 .se_collapse = sort__typeoff_sort, 2321 .se_sort = sort__typeoff_sort, 2322 .se_init = sort__type_init, 2323 .se_snprintf = hist_entry__typeoff_snprintf, 2324 .se_width_idx = HISTC_TYPE_OFFSET, 2325 }; 2326 2327 2328 struct sort_dimension { 2329 const char *name; 2330 struct sort_entry *entry; 2331 int taken; 2332 }; 2333 2334 int __weak arch_support_sort_key(const char *sort_key __maybe_unused) 2335 { 2336 return 0; 2337 } 2338 2339 const char * __weak arch_perf_header_entry(const char *se_header) 2340 { 2341 return se_header; 2342 } 2343 2344 static void sort_dimension_add_dynamic_header(struct sort_dimension *sd) 2345 { 2346 sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header); 2347 } 2348 2349 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 2350 2351 static struct sort_dimension common_sort_dimensions[] = { 2352 DIM(SORT_PID, "pid", sort_thread), 2353 DIM(SORT_COMM, "comm", sort_comm), 2354 DIM(SORT_DSO, "dso", sort_dso), 2355 DIM(SORT_SYM, "symbol", sort_sym), 2356 DIM(SORT_PARENT, "parent", sort_parent), 2357 DIM(SORT_CPU, "cpu", sort_cpu), 2358 DIM(SORT_SOCKET, "socket", sort_socket), 2359 DIM(SORT_SRCLINE, "srcline", sort_srcline), 2360 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 2361 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 2362 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 2363 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 2364 #ifdef HAVE_LIBTRACEEVENT 2365 DIM(SORT_TRACE, "trace", sort_trace), 2366 #endif 2367 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 2368 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size), 2369 DIM(SORT_CGROUP, "cgroup", sort_cgroup), 2370 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 2371 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null), 2372 DIM(SORT_TIME, "time", sort_time), 2373 DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size), 2374 DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat), 2375 DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat), 2376 DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc), 2377 DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc), 2378 DIM(SORT_ADDR, "addr", sort_addr), 2379 DIM(SORT_LOCAL_RETIRE_LAT, "local_retire_lat", sort_local_p_stage_cyc), 2380 DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc), 2381 DIM(SORT_SIMD, "simd", sort_simd), 2382 DIM(SORT_ANNOTATE_DATA_TYPE, "type", sort_type), 2383 DIM(SORT_ANNOTATE_DATA_TYPE_OFFSET, "typeoff", sort_type_offset), 2384 DIM(SORT_SYM_OFFSET, "symoff", sort_sym_offset), 2385 }; 2386 2387 #undef DIM 2388 2389 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 2390 2391 static struct sort_dimension bstack_sort_dimensions[] = { 2392 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 2393 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 2394 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 2395 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 2396 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 2397 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 2398 DIM(SORT_ABORT, "abort", sort_abort), 2399 DIM(SORT_CYCLES, "cycles", sort_cycles), 2400 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 2401 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 2402 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc), 2403 DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from), 2404 DIM(SORT_ADDR_TO, "addr_to", sort_addr_to), 2405 }; 2406 2407 #undef DIM 2408 2409 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 2410 2411 static struct sort_dimension memory_sort_dimensions[] = { 2412 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 2413 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 2414 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 2415 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 2416 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 2417 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 2418 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 2419 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 2420 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr), 2421 DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size), 2422 DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked), 2423 }; 2424 2425 #undef DIM 2426 2427 struct hpp_dimension { 2428 const char *name; 2429 struct perf_hpp_fmt *fmt; 2430 int taken; 2431 }; 2432 2433 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 2434 2435 static struct hpp_dimension hpp_sort_dimensions[] = { 2436 DIM(PERF_HPP__OVERHEAD, "overhead"), 2437 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 2438 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 2439 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 2440 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 2441 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 2442 DIM(PERF_HPP__SAMPLES, "sample"), 2443 DIM(PERF_HPP__PERIOD, "period"), 2444 }; 2445 2446 #undef DIM 2447 2448 struct hpp_sort_entry { 2449 struct perf_hpp_fmt hpp; 2450 struct sort_entry *se; 2451 }; 2452 2453 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 2454 { 2455 struct hpp_sort_entry *hse; 2456 2457 if (!perf_hpp__is_sort_entry(fmt)) 2458 return; 2459 2460 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2461 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 2462 } 2463 2464 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2465 struct hists *hists, int line __maybe_unused, 2466 int *span __maybe_unused) 2467 { 2468 struct hpp_sort_entry *hse; 2469 size_t len = fmt->user_len; 2470 2471 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2472 2473 if (!len) 2474 len = hists__col_len(hists, hse->se->se_width_idx); 2475 2476 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 2477 } 2478 2479 static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 2480 struct perf_hpp *hpp __maybe_unused, 2481 struct hists *hists) 2482 { 2483 struct hpp_sort_entry *hse; 2484 size_t len = fmt->user_len; 2485 2486 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2487 2488 if (!len) 2489 len = hists__col_len(hists, hse->se->se_width_idx); 2490 2491 return len; 2492 } 2493 2494 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2495 struct hist_entry *he) 2496 { 2497 struct hpp_sort_entry *hse; 2498 size_t len = fmt->user_len; 2499 2500 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2501 2502 if (!len) 2503 len = hists__col_len(he->hists, hse->se->se_width_idx); 2504 2505 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 2506 } 2507 2508 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 2509 struct hist_entry *a, struct hist_entry *b) 2510 { 2511 struct hpp_sort_entry *hse; 2512 2513 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2514 return hse->se->se_cmp(a, b); 2515 } 2516 2517 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 2518 struct hist_entry *a, struct hist_entry *b) 2519 { 2520 struct hpp_sort_entry *hse; 2521 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 2522 2523 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2524 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 2525 return collapse_fn(a, b); 2526 } 2527 2528 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 2529 struct hist_entry *a, struct hist_entry *b) 2530 { 2531 struct hpp_sort_entry *hse; 2532 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 2533 2534 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2535 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 2536 return sort_fn(a, b); 2537 } 2538 2539 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 2540 { 2541 return format->header == __sort__hpp_header; 2542 } 2543 2544 #define MK_SORT_ENTRY_CHK(key) \ 2545 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 2546 { \ 2547 struct hpp_sort_entry *hse; \ 2548 \ 2549 if (!perf_hpp__is_sort_entry(fmt)) \ 2550 return false; \ 2551 \ 2552 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 2553 return hse->se == &sort_ ## key ; \ 2554 } 2555 2556 #ifdef HAVE_LIBTRACEEVENT 2557 MK_SORT_ENTRY_CHK(trace) 2558 #else 2559 bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt __maybe_unused) 2560 { 2561 return false; 2562 } 2563 #endif 2564 MK_SORT_ENTRY_CHK(srcline) 2565 MK_SORT_ENTRY_CHK(srcfile) 2566 MK_SORT_ENTRY_CHK(thread) 2567 MK_SORT_ENTRY_CHK(comm) 2568 MK_SORT_ENTRY_CHK(dso) 2569 MK_SORT_ENTRY_CHK(sym) 2570 2571 2572 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2573 { 2574 struct hpp_sort_entry *hse_a; 2575 struct hpp_sort_entry *hse_b; 2576 2577 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 2578 return false; 2579 2580 hse_a = container_of(a, struct hpp_sort_entry, hpp); 2581 hse_b = container_of(b, struct hpp_sort_entry, hpp); 2582 2583 return hse_a->se == hse_b->se; 2584 } 2585 2586 static void hse_free(struct perf_hpp_fmt *fmt) 2587 { 2588 struct hpp_sort_entry *hse; 2589 2590 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2591 free(hse); 2592 } 2593 2594 static void hse_init(struct perf_hpp_fmt *fmt, struct hist_entry *he) 2595 { 2596 struct hpp_sort_entry *hse; 2597 2598 if (!perf_hpp__is_sort_entry(fmt)) 2599 return; 2600 2601 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2602 2603 if (hse->se->se_init) 2604 hse->se->se_init(he); 2605 } 2606 2607 static struct hpp_sort_entry * 2608 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 2609 { 2610 struct hpp_sort_entry *hse; 2611 2612 hse = malloc(sizeof(*hse)); 2613 if (hse == NULL) { 2614 pr_err("Memory allocation failed\n"); 2615 return NULL; 2616 } 2617 2618 hse->se = sd->entry; 2619 hse->hpp.name = sd->entry->se_header; 2620 hse->hpp.header = __sort__hpp_header; 2621 hse->hpp.width = __sort__hpp_width; 2622 hse->hpp.entry = __sort__hpp_entry; 2623 hse->hpp.color = NULL; 2624 2625 hse->hpp.cmp = __sort__hpp_cmp; 2626 hse->hpp.collapse = __sort__hpp_collapse; 2627 hse->hpp.sort = __sort__hpp_sort; 2628 hse->hpp.equal = __sort__hpp_equal; 2629 hse->hpp.free = hse_free; 2630 hse->hpp.init = hse_init; 2631 2632 INIT_LIST_HEAD(&hse->hpp.list); 2633 INIT_LIST_HEAD(&hse->hpp.sort_list); 2634 hse->hpp.elide = false; 2635 hse->hpp.len = 0; 2636 hse->hpp.user_len = 0; 2637 hse->hpp.level = level; 2638 2639 return hse; 2640 } 2641 2642 static void hpp_free(struct perf_hpp_fmt *fmt) 2643 { 2644 free(fmt); 2645 } 2646 2647 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 2648 int level) 2649 { 2650 struct perf_hpp_fmt *fmt; 2651 2652 fmt = memdup(hd->fmt, sizeof(*fmt)); 2653 if (fmt) { 2654 INIT_LIST_HEAD(&fmt->list); 2655 INIT_LIST_HEAD(&fmt->sort_list); 2656 fmt->free = hpp_free; 2657 fmt->level = level; 2658 } 2659 2660 return fmt; 2661 } 2662 2663 int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 2664 { 2665 struct perf_hpp_fmt *fmt; 2666 struct hpp_sort_entry *hse; 2667 int ret = -1; 2668 int r; 2669 2670 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 2671 if (!perf_hpp__is_sort_entry(fmt)) 2672 continue; 2673 2674 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2675 if (hse->se->se_filter == NULL) 2676 continue; 2677 2678 /* 2679 * hist entry is filtered if any of sort key in the hpp list 2680 * is applied. But it should skip non-matched filter types. 2681 */ 2682 r = hse->se->se_filter(he, type, arg); 2683 if (r >= 0) { 2684 if (ret < 0) 2685 ret = 0; 2686 ret |= r; 2687 } 2688 } 2689 2690 return ret; 2691 } 2692 2693 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 2694 struct perf_hpp_list *list, 2695 int level) 2696 { 2697 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 2698 2699 if (hse == NULL) 2700 return -1; 2701 2702 perf_hpp_list__register_sort_field(list, &hse->hpp); 2703 return 0; 2704 } 2705 2706 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 2707 struct perf_hpp_list *list) 2708 { 2709 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 2710 2711 if (hse == NULL) 2712 return -1; 2713 2714 perf_hpp_list__column_register(list, &hse->hpp); 2715 return 0; 2716 } 2717 2718 #ifndef HAVE_LIBTRACEEVENT 2719 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused) 2720 { 2721 return false; 2722 } 2723 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused, 2724 struct hists *hists __maybe_unused) 2725 { 2726 return false; 2727 } 2728 #else 2729 struct hpp_dynamic_entry { 2730 struct perf_hpp_fmt hpp; 2731 struct evsel *evsel; 2732 struct tep_format_field *field; 2733 unsigned dynamic_len; 2734 bool raw_trace; 2735 }; 2736 2737 static int hde_width(struct hpp_dynamic_entry *hde) 2738 { 2739 if (!hde->hpp.len) { 2740 int len = hde->dynamic_len; 2741 int namelen = strlen(hde->field->name); 2742 int fieldlen = hde->field->size; 2743 2744 if (namelen > len) 2745 len = namelen; 2746 2747 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) { 2748 /* length for print hex numbers */ 2749 fieldlen = hde->field->size * 2 + 2; 2750 } 2751 if (fieldlen > len) 2752 len = fieldlen; 2753 2754 hde->hpp.len = len; 2755 } 2756 return hde->hpp.len; 2757 } 2758 2759 static void update_dynamic_len(struct hpp_dynamic_entry *hde, 2760 struct hist_entry *he) 2761 { 2762 char *str, *pos; 2763 struct tep_format_field *field = hde->field; 2764 size_t namelen; 2765 bool last = false; 2766 2767 if (hde->raw_trace) 2768 return; 2769 2770 /* parse pretty print result and update max length */ 2771 if (!he->trace_output) 2772 he->trace_output = get_trace_output(he); 2773 2774 namelen = strlen(field->name); 2775 str = he->trace_output; 2776 2777 while (str) { 2778 pos = strchr(str, ' '); 2779 if (pos == NULL) { 2780 last = true; 2781 pos = str + strlen(str); 2782 } 2783 2784 if (!strncmp(str, field->name, namelen)) { 2785 size_t len; 2786 2787 str += namelen + 1; 2788 len = pos - str; 2789 2790 if (len > hde->dynamic_len) 2791 hde->dynamic_len = len; 2792 break; 2793 } 2794 2795 if (last) 2796 str = NULL; 2797 else 2798 str = pos + 1; 2799 } 2800 } 2801 2802 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2803 struct hists *hists __maybe_unused, 2804 int line __maybe_unused, 2805 int *span __maybe_unused) 2806 { 2807 struct hpp_dynamic_entry *hde; 2808 size_t len = fmt->user_len; 2809 2810 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2811 2812 if (!len) 2813 len = hde_width(hde); 2814 2815 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 2816 } 2817 2818 static int __sort__hde_width(struct perf_hpp_fmt *fmt, 2819 struct perf_hpp *hpp __maybe_unused, 2820 struct hists *hists __maybe_unused) 2821 { 2822 struct hpp_dynamic_entry *hde; 2823 size_t len = fmt->user_len; 2824 2825 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2826 2827 if (!len) 2828 len = hde_width(hde); 2829 2830 return len; 2831 } 2832 2833 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 2834 { 2835 struct hpp_dynamic_entry *hde; 2836 2837 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2838 2839 return hists_to_evsel(hists) == hde->evsel; 2840 } 2841 2842 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2843 struct hist_entry *he) 2844 { 2845 struct hpp_dynamic_entry *hde; 2846 size_t len = fmt->user_len; 2847 char *str, *pos; 2848 struct tep_format_field *field; 2849 size_t namelen; 2850 bool last = false; 2851 int ret; 2852 2853 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2854 2855 if (!len) 2856 len = hde_width(hde); 2857 2858 if (hde->raw_trace) 2859 goto raw_field; 2860 2861 if (!he->trace_output) 2862 he->trace_output = get_trace_output(he); 2863 2864 field = hde->field; 2865 namelen = strlen(field->name); 2866 str = he->trace_output; 2867 2868 while (str) { 2869 pos = strchr(str, ' '); 2870 if (pos == NULL) { 2871 last = true; 2872 pos = str + strlen(str); 2873 } 2874 2875 if (!strncmp(str, field->name, namelen)) { 2876 str += namelen + 1; 2877 str = strndup(str, pos - str); 2878 2879 if (str == NULL) 2880 return scnprintf(hpp->buf, hpp->size, 2881 "%*.*s", len, len, "ERROR"); 2882 break; 2883 } 2884 2885 if (last) 2886 str = NULL; 2887 else 2888 str = pos + 1; 2889 } 2890 2891 if (str == NULL) { 2892 struct trace_seq seq; 2893 raw_field: 2894 trace_seq_init(&seq); 2895 tep_print_field(&seq, he->raw_data, hde->field); 2896 str = seq.buffer; 2897 } 2898 2899 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 2900 free(str); 2901 return ret; 2902 } 2903 2904 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 2905 struct hist_entry *a, struct hist_entry *b) 2906 { 2907 struct hpp_dynamic_entry *hde; 2908 struct tep_format_field *field; 2909 unsigned offset, size; 2910 2911 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2912 2913 field = hde->field; 2914 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2915 unsigned long long dyn; 2916 2917 tep_read_number_field(field, a->raw_data, &dyn); 2918 offset = dyn & 0xffff; 2919 size = (dyn >> 16) & 0xffff; 2920 if (tep_field_is_relative(field->flags)) 2921 offset += field->offset + field->size; 2922 /* record max width for output */ 2923 if (size > hde->dynamic_len) 2924 hde->dynamic_len = size; 2925 } else { 2926 offset = field->offset; 2927 size = field->size; 2928 } 2929 2930 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 2931 } 2932 2933 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 2934 { 2935 return fmt->cmp == __sort__hde_cmp; 2936 } 2937 2938 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2939 { 2940 struct hpp_dynamic_entry *hde_a; 2941 struct hpp_dynamic_entry *hde_b; 2942 2943 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 2944 return false; 2945 2946 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 2947 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 2948 2949 return hde_a->field == hde_b->field; 2950 } 2951 2952 static void hde_free(struct perf_hpp_fmt *fmt) 2953 { 2954 struct hpp_dynamic_entry *hde; 2955 2956 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2957 free(hde); 2958 } 2959 2960 static void __sort__hde_init(struct perf_hpp_fmt *fmt, struct hist_entry *he) 2961 { 2962 struct hpp_dynamic_entry *hde; 2963 2964 if (!perf_hpp__is_dynamic_entry(fmt)) 2965 return; 2966 2967 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2968 update_dynamic_len(hde, he); 2969 } 2970 2971 static struct hpp_dynamic_entry * 2972 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field, 2973 int level) 2974 { 2975 struct hpp_dynamic_entry *hde; 2976 2977 hde = malloc(sizeof(*hde)); 2978 if (hde == NULL) { 2979 pr_debug("Memory allocation failed\n"); 2980 return NULL; 2981 } 2982 2983 hde->evsel = evsel; 2984 hde->field = field; 2985 hde->dynamic_len = 0; 2986 2987 hde->hpp.name = field->name; 2988 hde->hpp.header = __sort__hde_header; 2989 hde->hpp.width = __sort__hde_width; 2990 hde->hpp.entry = __sort__hde_entry; 2991 hde->hpp.color = NULL; 2992 2993 hde->hpp.init = __sort__hde_init; 2994 hde->hpp.cmp = __sort__hde_cmp; 2995 hde->hpp.collapse = __sort__hde_cmp; 2996 hde->hpp.sort = __sort__hde_cmp; 2997 hde->hpp.equal = __sort__hde_equal; 2998 hde->hpp.free = hde_free; 2999 3000 INIT_LIST_HEAD(&hde->hpp.list); 3001 INIT_LIST_HEAD(&hde->hpp.sort_list); 3002 hde->hpp.elide = false; 3003 hde->hpp.len = 0; 3004 hde->hpp.user_len = 0; 3005 hde->hpp.level = level; 3006 3007 return hde; 3008 } 3009 #endif /* HAVE_LIBTRACEEVENT */ 3010 3011 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 3012 { 3013 struct perf_hpp_fmt *new_fmt = NULL; 3014 3015 if (perf_hpp__is_sort_entry(fmt)) { 3016 struct hpp_sort_entry *hse, *new_hse; 3017 3018 hse = container_of(fmt, struct hpp_sort_entry, hpp); 3019 new_hse = memdup(hse, sizeof(*hse)); 3020 if (new_hse) 3021 new_fmt = &new_hse->hpp; 3022 #ifdef HAVE_LIBTRACEEVENT 3023 } else if (perf_hpp__is_dynamic_entry(fmt)) { 3024 struct hpp_dynamic_entry *hde, *new_hde; 3025 3026 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 3027 new_hde = memdup(hde, sizeof(*hde)); 3028 if (new_hde) 3029 new_fmt = &new_hde->hpp; 3030 #endif 3031 } else { 3032 new_fmt = memdup(fmt, sizeof(*fmt)); 3033 } 3034 3035 INIT_LIST_HEAD(&new_fmt->list); 3036 INIT_LIST_HEAD(&new_fmt->sort_list); 3037 3038 return new_fmt; 3039 } 3040 3041 static int parse_field_name(char *str, char **event, char **field, char **opt) 3042 { 3043 char *event_name, *field_name, *opt_name; 3044 3045 event_name = str; 3046 field_name = strchr(str, '.'); 3047 3048 if (field_name) { 3049 *field_name++ = '\0'; 3050 } else { 3051 event_name = NULL; 3052 field_name = str; 3053 } 3054 3055 opt_name = strchr(field_name, '/'); 3056 if (opt_name) 3057 *opt_name++ = '\0'; 3058 3059 *event = event_name; 3060 *field = field_name; 3061 *opt = opt_name; 3062 3063 return 0; 3064 } 3065 3066 /* find match evsel using a given event name. The event name can be: 3067 * 1. '%' + event index (e.g. '%1' for first event) 3068 * 2. full event name (e.g. sched:sched_switch) 3069 * 3. partial event name (should not contain ':') 3070 */ 3071 static struct evsel *find_evsel(struct evlist *evlist, char *event_name) 3072 { 3073 struct evsel *evsel = NULL; 3074 struct evsel *pos; 3075 bool full_name; 3076 3077 /* case 1 */ 3078 if (event_name[0] == '%') { 3079 int nr = strtol(event_name+1, NULL, 0); 3080 3081 if (nr > evlist->core.nr_entries) 3082 return NULL; 3083 3084 evsel = evlist__first(evlist); 3085 while (--nr > 0) 3086 evsel = evsel__next(evsel); 3087 3088 return evsel; 3089 } 3090 3091 full_name = !!strchr(event_name, ':'); 3092 evlist__for_each_entry(evlist, pos) { 3093 /* case 2 */ 3094 if (full_name && evsel__name_is(pos, event_name)) 3095 return pos; 3096 /* case 3 */ 3097 if (!full_name && strstr(pos->name, event_name)) { 3098 if (evsel) { 3099 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 3100 event_name, evsel->name, pos->name); 3101 return NULL; 3102 } 3103 evsel = pos; 3104 } 3105 } 3106 3107 return evsel; 3108 } 3109 3110 #ifdef HAVE_LIBTRACEEVENT 3111 static int __dynamic_dimension__add(struct evsel *evsel, 3112 struct tep_format_field *field, 3113 bool raw_trace, int level) 3114 { 3115 struct hpp_dynamic_entry *hde; 3116 3117 hde = __alloc_dynamic_entry(evsel, field, level); 3118 if (hde == NULL) 3119 return -ENOMEM; 3120 3121 hde->raw_trace = raw_trace; 3122 3123 perf_hpp__register_sort_field(&hde->hpp); 3124 return 0; 3125 } 3126 3127 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level) 3128 { 3129 int ret; 3130 struct tep_format_field *field; 3131 3132 field = evsel->tp_format->format.fields; 3133 while (field) { 3134 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 3135 if (ret < 0) 3136 return ret; 3137 3138 field = field->next; 3139 } 3140 return 0; 3141 } 3142 3143 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace, 3144 int level) 3145 { 3146 int ret; 3147 struct evsel *evsel; 3148 3149 evlist__for_each_entry(evlist, evsel) { 3150 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 3151 continue; 3152 3153 ret = add_evsel_fields(evsel, raw_trace, level); 3154 if (ret < 0) 3155 return ret; 3156 } 3157 return 0; 3158 } 3159 3160 static int add_all_matching_fields(struct evlist *evlist, 3161 char *field_name, bool raw_trace, int level) 3162 { 3163 int ret = -ESRCH; 3164 struct evsel *evsel; 3165 struct tep_format_field *field; 3166 3167 evlist__for_each_entry(evlist, evsel) { 3168 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 3169 continue; 3170 3171 field = tep_find_any_field(evsel->tp_format, field_name); 3172 if (field == NULL) 3173 continue; 3174 3175 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 3176 if (ret < 0) 3177 break; 3178 } 3179 return ret; 3180 } 3181 #endif /* HAVE_LIBTRACEEVENT */ 3182 3183 static int add_dynamic_entry(struct evlist *evlist, const char *tok, 3184 int level) 3185 { 3186 char *str, *event_name, *field_name, *opt_name; 3187 struct evsel *evsel; 3188 bool raw_trace = symbol_conf.raw_trace; 3189 int ret = 0; 3190 3191 if (evlist == NULL) 3192 return -ENOENT; 3193 3194 str = strdup(tok); 3195 if (str == NULL) 3196 return -ENOMEM; 3197 3198 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 3199 ret = -EINVAL; 3200 goto out; 3201 } 3202 3203 if (opt_name) { 3204 if (strcmp(opt_name, "raw")) { 3205 pr_debug("unsupported field option %s\n", opt_name); 3206 ret = -EINVAL; 3207 goto out; 3208 } 3209 raw_trace = true; 3210 } 3211 3212 #ifdef HAVE_LIBTRACEEVENT 3213 if (!strcmp(field_name, "trace_fields")) { 3214 ret = add_all_dynamic_fields(evlist, raw_trace, level); 3215 goto out; 3216 } 3217 3218 if (event_name == NULL) { 3219 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 3220 goto out; 3221 } 3222 #else 3223 evlist__for_each_entry(evlist, evsel) { 3224 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) { 3225 pr_err("%s %s", ret ? "," : "This perf binary isn't linked with libtraceevent, can't process", evsel__name(evsel)); 3226 ret = -ENOTSUP; 3227 } 3228 } 3229 3230 if (ret) { 3231 pr_err("\n"); 3232 goto out; 3233 } 3234 #endif 3235 3236 evsel = find_evsel(evlist, event_name); 3237 if (evsel == NULL) { 3238 pr_debug("Cannot find event: %s\n", event_name); 3239 ret = -ENOENT; 3240 goto out; 3241 } 3242 3243 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 3244 pr_debug("%s is not a tracepoint event\n", event_name); 3245 ret = -EINVAL; 3246 goto out; 3247 } 3248 3249 #ifdef HAVE_LIBTRACEEVENT 3250 if (!strcmp(field_name, "*")) { 3251 ret = add_evsel_fields(evsel, raw_trace, level); 3252 } else { 3253 struct tep_format_field *field = tep_find_any_field(evsel->tp_format, field_name); 3254 3255 if (field == NULL) { 3256 pr_debug("Cannot find event field for %s.%s\n", 3257 event_name, field_name); 3258 return -ENOENT; 3259 } 3260 3261 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 3262 } 3263 #else 3264 (void)level; 3265 (void)raw_trace; 3266 #endif /* HAVE_LIBTRACEEVENT */ 3267 3268 out: 3269 free(str); 3270 return ret; 3271 } 3272 3273 static int __sort_dimension__add(struct sort_dimension *sd, 3274 struct perf_hpp_list *list, 3275 int level) 3276 { 3277 if (sd->taken) 3278 return 0; 3279 3280 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 3281 return -1; 3282 3283 if (sd->entry->se_collapse) 3284 list->need_collapse = 1; 3285 3286 sd->taken = 1; 3287 3288 return 0; 3289 } 3290 3291 static int __hpp_dimension__add(struct hpp_dimension *hd, 3292 struct perf_hpp_list *list, 3293 int level) 3294 { 3295 struct perf_hpp_fmt *fmt; 3296 3297 if (hd->taken) 3298 return 0; 3299 3300 fmt = __hpp_dimension__alloc_hpp(hd, level); 3301 if (!fmt) 3302 return -1; 3303 3304 hd->taken = 1; 3305 perf_hpp_list__register_sort_field(list, fmt); 3306 return 0; 3307 } 3308 3309 static int __sort_dimension__add_output(struct perf_hpp_list *list, 3310 struct sort_dimension *sd) 3311 { 3312 if (sd->taken) 3313 return 0; 3314 3315 if (__sort_dimension__add_hpp_output(sd, list) < 0) 3316 return -1; 3317 3318 sd->taken = 1; 3319 return 0; 3320 } 3321 3322 static int __hpp_dimension__add_output(struct perf_hpp_list *list, 3323 struct hpp_dimension *hd) 3324 { 3325 struct perf_hpp_fmt *fmt; 3326 3327 if (hd->taken) 3328 return 0; 3329 3330 fmt = __hpp_dimension__alloc_hpp(hd, 0); 3331 if (!fmt) 3332 return -1; 3333 3334 hd->taken = 1; 3335 perf_hpp_list__column_register(list, fmt); 3336 return 0; 3337 } 3338 3339 int hpp_dimension__add_output(unsigned col) 3340 { 3341 BUG_ON(col >= PERF_HPP__MAX_INDEX); 3342 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 3343 } 3344 3345 int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 3346 struct evlist *evlist, 3347 int level) 3348 { 3349 unsigned int i, j; 3350 3351 /* 3352 * Check to see if there are any arch specific 3353 * sort dimensions not applicable for the current 3354 * architecture. If so, Skip that sort key since 3355 * we don't want to display it in the output fields. 3356 */ 3357 for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) { 3358 if (!strcmp(arch_specific_sort_keys[j], tok) && 3359 !arch_support_sort_key(tok)) { 3360 return 0; 3361 } 3362 } 3363 3364 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 3365 struct sort_dimension *sd = &common_sort_dimensions[i]; 3366 3367 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3368 continue; 3369 3370 for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) { 3371 if (sd->name && !strcmp(dynamic_headers[j], sd->name)) 3372 sort_dimension_add_dynamic_header(sd); 3373 } 3374 3375 if (sd->entry == &sort_parent && parent_pattern) { 3376 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 3377 if (ret) { 3378 char err[BUFSIZ]; 3379 3380 regerror(ret, &parent_regex, err, sizeof(err)); 3381 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 3382 return -EINVAL; 3383 } 3384 list->parent = 1; 3385 } else if (sd->entry == &sort_sym) { 3386 list->sym = 1; 3387 /* 3388 * perf diff displays the performance difference amongst 3389 * two or more perf.data files. Those files could come 3390 * from different binaries. So we should not compare 3391 * their ips, but the name of symbol. 3392 */ 3393 if (sort__mode == SORT_MODE__DIFF) 3394 sd->entry->se_collapse = sort__sym_sort; 3395 3396 } else if (sd->entry == &sort_dso) { 3397 list->dso = 1; 3398 } else if (sd->entry == &sort_socket) { 3399 list->socket = 1; 3400 } else if (sd->entry == &sort_thread) { 3401 list->thread = 1; 3402 } else if (sd->entry == &sort_comm) { 3403 list->comm = 1; 3404 } else if (sd->entry == &sort_type_offset) { 3405 symbol_conf.annotate_data_member = true; 3406 } 3407 3408 return __sort_dimension__add(sd, list, level); 3409 } 3410 3411 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 3412 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 3413 3414 if (strncasecmp(tok, hd->name, strlen(tok))) 3415 continue; 3416 3417 return __hpp_dimension__add(hd, list, level); 3418 } 3419 3420 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 3421 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 3422 3423 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3424 continue; 3425 3426 if (sort__mode != SORT_MODE__BRANCH) 3427 return -EINVAL; 3428 3429 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 3430 list->sym = 1; 3431 3432 __sort_dimension__add(sd, list, level); 3433 return 0; 3434 } 3435 3436 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 3437 struct sort_dimension *sd = &memory_sort_dimensions[i]; 3438 3439 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3440 continue; 3441 3442 if (sort__mode != SORT_MODE__MEMORY) 3443 return -EINVAL; 3444 3445 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0) 3446 return -EINVAL; 3447 3448 if (sd->entry == &sort_mem_daddr_sym) 3449 list->sym = 1; 3450 3451 __sort_dimension__add(sd, list, level); 3452 return 0; 3453 } 3454 3455 if (!add_dynamic_entry(evlist, tok, level)) 3456 return 0; 3457 3458 return -ESRCH; 3459 } 3460 3461 static int setup_sort_list(struct perf_hpp_list *list, char *str, 3462 struct evlist *evlist) 3463 { 3464 char *tmp, *tok; 3465 int ret = 0; 3466 int level = 0; 3467 int next_level = 1; 3468 bool in_group = false; 3469 3470 do { 3471 tok = str; 3472 tmp = strpbrk(str, "{}, "); 3473 if (tmp) { 3474 if (in_group) 3475 next_level = level; 3476 else 3477 next_level = level + 1; 3478 3479 if (*tmp == '{') 3480 in_group = true; 3481 else if (*tmp == '}') 3482 in_group = false; 3483 3484 *tmp = '\0'; 3485 str = tmp + 1; 3486 } 3487 3488 if (*tok) { 3489 ret = sort_dimension__add(list, tok, evlist, level); 3490 if (ret == -EINVAL) { 3491 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok))) 3492 ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 3493 else 3494 ui__error("Invalid --sort key: `%s'", tok); 3495 break; 3496 } else if (ret == -ESRCH) { 3497 ui__error("Unknown --sort key: `%s'", tok); 3498 break; 3499 } 3500 } 3501 3502 level = next_level; 3503 } while (tmp); 3504 3505 return ret; 3506 } 3507 3508 static const char *get_default_sort_order(struct evlist *evlist) 3509 { 3510 const char *default_sort_orders[] = { 3511 default_sort_order, 3512 default_branch_sort_order, 3513 default_mem_sort_order, 3514 default_top_sort_order, 3515 default_diff_sort_order, 3516 default_tracepoint_sort_order, 3517 }; 3518 bool use_trace = true; 3519 struct evsel *evsel; 3520 3521 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 3522 3523 if (evlist == NULL || evlist__empty(evlist)) 3524 goto out_no_evlist; 3525 3526 evlist__for_each_entry(evlist, evsel) { 3527 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 3528 use_trace = false; 3529 break; 3530 } 3531 } 3532 3533 if (use_trace) { 3534 sort__mode = SORT_MODE__TRACEPOINT; 3535 if (symbol_conf.raw_trace) 3536 return "trace_fields"; 3537 } 3538 out_no_evlist: 3539 return default_sort_orders[sort__mode]; 3540 } 3541 3542 static int setup_sort_order(struct evlist *evlist) 3543 { 3544 char *new_sort_order; 3545 3546 /* 3547 * Append '+'-prefixed sort order to the default sort 3548 * order string. 3549 */ 3550 if (!sort_order || is_strict_order(sort_order)) 3551 return 0; 3552 3553 if (sort_order[1] == '\0') { 3554 ui__error("Invalid --sort key: `+'"); 3555 return -EINVAL; 3556 } 3557 3558 /* 3559 * We allocate new sort_order string, but we never free it, 3560 * because it's checked over the rest of the code. 3561 */ 3562 if (asprintf(&new_sort_order, "%s,%s", 3563 get_default_sort_order(evlist), sort_order + 1) < 0) { 3564 pr_err("Not enough memory to set up --sort"); 3565 return -ENOMEM; 3566 } 3567 3568 sort_order = new_sort_order; 3569 return 0; 3570 } 3571 3572 /* 3573 * Adds 'pre,' prefix into 'str' is 'pre' is 3574 * not already part of 'str'. 3575 */ 3576 static char *prefix_if_not_in(const char *pre, char *str) 3577 { 3578 char *n; 3579 3580 if (!str || strstr(str, pre)) 3581 return str; 3582 3583 if (asprintf(&n, "%s,%s", pre, str) < 0) 3584 n = NULL; 3585 3586 free(str); 3587 return n; 3588 } 3589 3590 static char *setup_overhead(char *keys) 3591 { 3592 if (sort__mode == SORT_MODE__DIFF) 3593 return keys; 3594 3595 keys = prefix_if_not_in("overhead", keys); 3596 3597 if (symbol_conf.cumulate_callchain) 3598 keys = prefix_if_not_in("overhead_children", keys); 3599 3600 return keys; 3601 } 3602 3603 static int __setup_sorting(struct evlist *evlist) 3604 { 3605 char *str; 3606 const char *sort_keys; 3607 int ret = 0; 3608 3609 ret = setup_sort_order(evlist); 3610 if (ret) 3611 return ret; 3612 3613 sort_keys = sort_order; 3614 if (sort_keys == NULL) { 3615 if (is_strict_order(field_order)) { 3616 /* 3617 * If user specified field order but no sort order, 3618 * we'll honor it and not add default sort orders. 3619 */ 3620 return 0; 3621 } 3622 3623 sort_keys = get_default_sort_order(evlist); 3624 } 3625 3626 str = strdup(sort_keys); 3627 if (str == NULL) { 3628 pr_err("Not enough memory to setup sort keys"); 3629 return -ENOMEM; 3630 } 3631 3632 /* 3633 * Prepend overhead fields for backward compatibility. 3634 */ 3635 if (!is_strict_order(field_order)) { 3636 str = setup_overhead(str); 3637 if (str == NULL) { 3638 pr_err("Not enough memory to setup overhead keys"); 3639 return -ENOMEM; 3640 } 3641 } 3642 3643 ret = setup_sort_list(&perf_hpp_list, str, evlist); 3644 3645 free(str); 3646 return ret; 3647 } 3648 3649 void perf_hpp__set_elide(int idx, bool elide) 3650 { 3651 struct perf_hpp_fmt *fmt; 3652 struct hpp_sort_entry *hse; 3653 3654 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3655 if (!perf_hpp__is_sort_entry(fmt)) 3656 continue; 3657 3658 hse = container_of(fmt, struct hpp_sort_entry, hpp); 3659 if (hse->se->se_width_idx == idx) { 3660 fmt->elide = elide; 3661 break; 3662 } 3663 } 3664 } 3665 3666 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 3667 { 3668 if (list && strlist__nr_entries(list) == 1) { 3669 if (fp != NULL) 3670 fprintf(fp, "# %s: %s\n", list_name, 3671 strlist__entry(list, 0)->s); 3672 return true; 3673 } 3674 return false; 3675 } 3676 3677 static bool get_elide(int idx, FILE *output) 3678 { 3679 switch (idx) { 3680 case HISTC_SYMBOL: 3681 return __get_elide(symbol_conf.sym_list, "symbol", output); 3682 case HISTC_DSO: 3683 return __get_elide(symbol_conf.dso_list, "dso", output); 3684 case HISTC_COMM: 3685 return __get_elide(symbol_conf.comm_list, "comm", output); 3686 default: 3687 break; 3688 } 3689 3690 if (sort__mode != SORT_MODE__BRANCH) 3691 return false; 3692 3693 switch (idx) { 3694 case HISTC_SYMBOL_FROM: 3695 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 3696 case HISTC_SYMBOL_TO: 3697 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 3698 case HISTC_DSO_FROM: 3699 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 3700 case HISTC_DSO_TO: 3701 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 3702 case HISTC_ADDR_FROM: 3703 return __get_elide(symbol_conf.sym_from_list, "addr_from", output); 3704 case HISTC_ADDR_TO: 3705 return __get_elide(symbol_conf.sym_to_list, "addr_to", output); 3706 default: 3707 break; 3708 } 3709 3710 return false; 3711 } 3712 3713 void sort__setup_elide(FILE *output) 3714 { 3715 struct perf_hpp_fmt *fmt; 3716 struct hpp_sort_entry *hse; 3717 3718 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3719 if (!perf_hpp__is_sort_entry(fmt)) 3720 continue; 3721 3722 hse = container_of(fmt, struct hpp_sort_entry, hpp); 3723 fmt->elide = get_elide(hse->se->se_width_idx, output); 3724 } 3725 3726 /* 3727 * It makes no sense to elide all of sort entries. 3728 * Just revert them to show up again. 3729 */ 3730 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3731 if (!perf_hpp__is_sort_entry(fmt)) 3732 continue; 3733 3734 if (!fmt->elide) 3735 return; 3736 } 3737 3738 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3739 if (!perf_hpp__is_sort_entry(fmt)) 3740 continue; 3741 3742 fmt->elide = false; 3743 } 3744 } 3745 3746 int output_field_add(struct perf_hpp_list *list, char *tok) 3747 { 3748 unsigned int i; 3749 3750 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 3751 struct sort_dimension *sd = &common_sort_dimensions[i]; 3752 3753 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3754 continue; 3755 3756 return __sort_dimension__add_output(list, sd); 3757 } 3758 3759 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 3760 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 3761 3762 if (strncasecmp(tok, hd->name, strlen(tok))) 3763 continue; 3764 3765 return __hpp_dimension__add_output(list, hd); 3766 } 3767 3768 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 3769 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 3770 3771 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3772 continue; 3773 3774 if (sort__mode != SORT_MODE__BRANCH) 3775 return -EINVAL; 3776 3777 return __sort_dimension__add_output(list, sd); 3778 } 3779 3780 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 3781 struct sort_dimension *sd = &memory_sort_dimensions[i]; 3782 3783 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3784 continue; 3785 3786 if (sort__mode != SORT_MODE__MEMORY) 3787 return -EINVAL; 3788 3789 return __sort_dimension__add_output(list, sd); 3790 } 3791 3792 return -ESRCH; 3793 } 3794 3795 static int setup_output_list(struct perf_hpp_list *list, char *str) 3796 { 3797 char *tmp, *tok; 3798 int ret = 0; 3799 3800 for (tok = strtok_r(str, ", ", &tmp); 3801 tok; tok = strtok_r(NULL, ", ", &tmp)) { 3802 ret = output_field_add(list, tok); 3803 if (ret == -EINVAL) { 3804 ui__error("Invalid --fields key: `%s'", tok); 3805 break; 3806 } else if (ret == -ESRCH) { 3807 ui__error("Unknown --fields key: `%s'", tok); 3808 break; 3809 } 3810 } 3811 3812 return ret; 3813 } 3814 3815 void reset_dimensions(void) 3816 { 3817 unsigned int i; 3818 3819 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 3820 common_sort_dimensions[i].taken = 0; 3821 3822 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 3823 hpp_sort_dimensions[i].taken = 0; 3824 3825 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 3826 bstack_sort_dimensions[i].taken = 0; 3827 3828 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 3829 memory_sort_dimensions[i].taken = 0; 3830 } 3831 3832 bool is_strict_order(const char *order) 3833 { 3834 return order && (*order != '+'); 3835 } 3836 3837 static int __setup_output_field(void) 3838 { 3839 char *str, *strp; 3840 int ret = -EINVAL; 3841 3842 if (field_order == NULL) 3843 return 0; 3844 3845 strp = str = strdup(field_order); 3846 if (str == NULL) { 3847 pr_err("Not enough memory to setup output fields"); 3848 return -ENOMEM; 3849 } 3850 3851 if (!is_strict_order(field_order)) 3852 strp++; 3853 3854 if (!strlen(strp)) { 3855 ui__error("Invalid --fields key: `+'"); 3856 goto out; 3857 } 3858 3859 ret = setup_output_list(&perf_hpp_list, strp); 3860 3861 out: 3862 free(str); 3863 return ret; 3864 } 3865 3866 int setup_sorting(struct evlist *evlist) 3867 { 3868 int err; 3869 3870 err = __setup_sorting(evlist); 3871 if (err < 0) 3872 return err; 3873 3874 if (parent_pattern != default_parent_pattern) { 3875 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 3876 if (err < 0) 3877 return err; 3878 } 3879 3880 reset_dimensions(); 3881 3882 /* 3883 * perf diff doesn't use default hpp output fields. 3884 */ 3885 if (sort__mode != SORT_MODE__DIFF) 3886 perf_hpp__init(); 3887 3888 err = __setup_output_field(); 3889 if (err < 0) 3890 return err; 3891 3892 /* copy sort keys to output fields */ 3893 perf_hpp__setup_output_field(&perf_hpp_list); 3894 /* and then copy output fields to sort keys */ 3895 perf_hpp__append_sort_keys(&perf_hpp_list); 3896 3897 /* setup hists-specific output fields */ 3898 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 3899 return -1; 3900 3901 return 0; 3902 } 3903 3904 void reset_output_field(void) 3905 { 3906 perf_hpp_list.need_collapse = 0; 3907 perf_hpp_list.parent = 0; 3908 perf_hpp_list.sym = 0; 3909 perf_hpp_list.dso = 0; 3910 3911 field_order = NULL; 3912 sort_order = NULL; 3913 3914 reset_dimensions(); 3915 perf_hpp__reset_output_field(&perf_hpp_list); 3916 } 3917 3918 #define INDENT (3*8 + 1) 3919 3920 static void add_key(struct strbuf *sb, const char *str, int *llen) 3921 { 3922 if (!str) 3923 return; 3924 3925 if (*llen >= 75) { 3926 strbuf_addstr(sb, "\n\t\t\t "); 3927 *llen = INDENT; 3928 } 3929 strbuf_addf(sb, " %s", str); 3930 *llen += strlen(str) + 1; 3931 } 3932 3933 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n, 3934 int *llen) 3935 { 3936 int i; 3937 3938 for (i = 0; i < n; i++) 3939 add_key(sb, s[i].name, llen); 3940 } 3941 3942 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n, 3943 int *llen) 3944 { 3945 int i; 3946 3947 for (i = 0; i < n; i++) 3948 add_key(sb, s[i].name, llen); 3949 } 3950 3951 char *sort_help(const char *prefix) 3952 { 3953 struct strbuf sb; 3954 char *s; 3955 int len = strlen(prefix) + INDENT; 3956 3957 strbuf_init(&sb, 300); 3958 strbuf_addstr(&sb, prefix); 3959 add_hpp_sort_string(&sb, hpp_sort_dimensions, 3960 ARRAY_SIZE(hpp_sort_dimensions), &len); 3961 add_sort_string(&sb, common_sort_dimensions, 3962 ARRAY_SIZE(common_sort_dimensions), &len); 3963 add_sort_string(&sb, bstack_sort_dimensions, 3964 ARRAY_SIZE(bstack_sort_dimensions), &len); 3965 add_sort_string(&sb, memory_sort_dimensions, 3966 ARRAY_SIZE(memory_sort_dimensions), &len); 3967 s = strbuf_detach(&sb, NULL); 3968 strbuf_release(&sb); 3969 return s; 3970 } 3971