1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <regex.h> 5 #include <stdlib.h> 6 #include <linux/mman.h> 7 #include <linux/time64.h> 8 #include "debug.h" 9 #include "dso.h" 10 #include "sort.h" 11 #include "hist.h" 12 #include "cacheline.h" 13 #include "comm.h" 14 #include "map.h" 15 #include "maps.h" 16 #include "symbol.h" 17 #include "map_symbol.h" 18 #include "branch.h" 19 #include "thread.h" 20 #include "evsel.h" 21 #include "evlist.h" 22 #include "srcline.h" 23 #include "strlist.h" 24 #include "strbuf.h" 25 #include "mem-events.h" 26 #include "mem-info.h" 27 #include "annotate.h" 28 #include "annotate-data.h" 29 #include "event.h" 30 #include "time-utils.h" 31 #include "cgroup.h" 32 #include "machine.h" 33 #include "trace-event.h" 34 #include <linux/kernel.h> 35 #include <linux/string.h> 36 37 #ifdef HAVE_LIBTRACEEVENT 38 #include <traceevent/event-parse.h> 39 #endif 40 41 regex_t parent_regex; 42 const char default_parent_pattern[] = "^sys_|^do_page_fault"; 43 const char *parent_pattern = default_parent_pattern; 44 const char *default_sort_order = "comm,dso,symbol"; 45 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 46 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc"; 47 const char default_top_sort_order[] = "dso,symbol"; 48 const char default_diff_sort_order[] = "dso,symbol"; 49 const char default_tracepoint_sort_order[] = "trace"; 50 const char *sort_order; 51 const char *field_order; 52 regex_t ignore_callees_regex; 53 int have_ignore_callees = 0; 54 enum sort_mode sort__mode = SORT_MODE__NORMAL; 55 static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"}; 56 static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"}; 57 58 /* 59 * Some architectures have Adjacent Cacheline Prefetch feature, which 60 * behaves like the cacheline size is doubled. Enable this flag to 61 * check things in double cacheline granularity. 62 */ 63 bool chk_double_cl; 64 65 /* 66 * Replaces all occurrences of a char used with the: 67 * 68 * -t, --field-separator 69 * 70 * option, that uses a special separator character and don't pad with spaces, 71 * replacing all occurrences of this separator in symbol names (and other 72 * output) with a '.' character, that thus it's the only non valid separator. 73 */ 74 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 75 { 76 int n; 77 va_list ap; 78 79 va_start(ap, fmt); 80 n = vsnprintf(bf, size, fmt, ap); 81 if (symbol_conf.field_sep && n > 0) { 82 char *sep = bf; 83 84 while (1) { 85 sep = strchr(sep, *symbol_conf.field_sep); 86 if (sep == NULL) 87 break; 88 *sep = '.'; 89 } 90 } 91 va_end(ap); 92 93 if (n >= (int)size) 94 return size - 1; 95 return n; 96 } 97 98 static int64_t cmp_null(const void *l, const void *r) 99 { 100 if (!l && !r) 101 return 0; 102 else if (!l) 103 return -1; 104 else 105 return 1; 106 } 107 108 /* --sort pid */ 109 110 static int64_t 111 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 112 { 113 return thread__tid(right->thread) - thread__tid(left->thread); 114 } 115 116 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 117 size_t size, unsigned int width) 118 { 119 const char *comm = thread__comm_str(he->thread); 120 121 width = max(7U, width) - 8; 122 return repsep_snprintf(bf, size, "%7d:%-*.*s", thread__tid(he->thread), 123 width, width, comm ?: ""); 124 } 125 126 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 127 { 128 const struct thread *th = arg; 129 130 if (type != HIST_FILTER__THREAD) 131 return -1; 132 133 return th && !RC_CHK_EQUAL(he->thread, th); 134 } 135 136 struct sort_entry sort_thread = { 137 .se_header = " Pid:Command", 138 .se_cmp = sort__thread_cmp, 139 .se_snprintf = hist_entry__thread_snprintf, 140 .se_filter = hist_entry__thread_filter, 141 .se_width_idx = HISTC_THREAD, 142 }; 143 144 /* --sort simd */ 145 146 static int64_t 147 sort__simd_cmp(struct hist_entry *left, struct hist_entry *right) 148 { 149 if (left->simd_flags.arch != right->simd_flags.arch) 150 return (int64_t) left->simd_flags.arch - right->simd_flags.arch; 151 152 return (int64_t) left->simd_flags.pred - right->simd_flags.pred; 153 } 154 155 static const char *hist_entry__get_simd_name(struct simd_flags *simd_flags) 156 { 157 u64 arch = simd_flags->arch; 158 159 if (arch & SIMD_OP_FLAGS_ARCH_SVE) 160 return "SVE"; 161 else 162 return "n/a"; 163 } 164 165 static int hist_entry__simd_snprintf(struct hist_entry *he, char *bf, 166 size_t size, unsigned int width __maybe_unused) 167 { 168 const char *name; 169 170 if (!he->simd_flags.arch) 171 return repsep_snprintf(bf, size, ""); 172 173 name = hist_entry__get_simd_name(&he->simd_flags); 174 175 if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_EMPTY) 176 return repsep_snprintf(bf, size, "[e] %s", name); 177 else if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_PARTIAL) 178 return repsep_snprintf(bf, size, "[p] %s", name); 179 180 return repsep_snprintf(bf, size, "[.] %s", name); 181 } 182 183 struct sort_entry sort_simd = { 184 .se_header = "Simd ", 185 .se_cmp = sort__simd_cmp, 186 .se_snprintf = hist_entry__simd_snprintf, 187 .se_width_idx = HISTC_SIMD, 188 }; 189 190 /* --sort comm */ 191 192 /* 193 * We can't use pointer comparison in functions below, 194 * because it gives different results based on pointer 195 * values, which could break some sorting assumptions. 196 */ 197 static int64_t 198 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 199 { 200 return strcmp(comm__str(right->comm), comm__str(left->comm)); 201 } 202 203 static int64_t 204 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 205 { 206 return strcmp(comm__str(right->comm), comm__str(left->comm)); 207 } 208 209 static int64_t 210 sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 211 { 212 return strcmp(comm__str(right->comm), comm__str(left->comm)); 213 } 214 215 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 216 size_t size, unsigned int width) 217 { 218 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 219 } 220 221 struct sort_entry sort_comm = { 222 .se_header = "Command", 223 .se_cmp = sort__comm_cmp, 224 .se_collapse = sort__comm_collapse, 225 .se_sort = sort__comm_sort, 226 .se_snprintf = hist_entry__comm_snprintf, 227 .se_filter = hist_entry__thread_filter, 228 .se_width_idx = HISTC_COMM, 229 }; 230 231 /* --sort dso */ 232 233 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 234 { 235 struct dso *dso_l = map_l ? map__dso(map_l) : NULL; 236 struct dso *dso_r = map_r ? map__dso(map_r) : NULL; 237 const char *dso_name_l, *dso_name_r; 238 239 if (!dso_l || !dso_r) 240 return cmp_null(dso_r, dso_l); 241 242 if (verbose > 0) { 243 dso_name_l = dso__long_name(dso_l); 244 dso_name_r = dso__long_name(dso_r); 245 } else { 246 dso_name_l = dso__short_name(dso_l); 247 dso_name_r = dso__short_name(dso_r); 248 } 249 250 return strcmp(dso_name_l, dso_name_r); 251 } 252 253 static int64_t 254 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 255 { 256 return _sort__dso_cmp(right->ms.map, left->ms.map); 257 } 258 259 static int _hist_entry__dso_snprintf(struct map *map, char *bf, 260 size_t size, unsigned int width) 261 { 262 const struct dso *dso = map ? map__dso(map) : NULL; 263 const char *dso_name = "[unknown]"; 264 265 if (dso) 266 dso_name = verbose > 0 ? dso__long_name(dso) : dso__short_name(dso); 267 268 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 269 } 270 271 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 272 size_t size, unsigned int width) 273 { 274 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 275 } 276 277 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 278 { 279 const struct dso *dso = arg; 280 281 if (type != HIST_FILTER__DSO) 282 return -1; 283 284 return dso && (!he->ms.map || map__dso(he->ms.map) != dso); 285 } 286 287 struct sort_entry sort_dso = { 288 .se_header = "Shared Object", 289 .se_cmp = sort__dso_cmp, 290 .se_snprintf = hist_entry__dso_snprintf, 291 .se_filter = hist_entry__dso_filter, 292 .se_width_idx = HISTC_DSO, 293 }; 294 295 /* --sort symbol */ 296 297 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 298 { 299 return (int64_t)(right_ip - left_ip); 300 } 301 302 int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 303 { 304 if (!sym_l || !sym_r) 305 return cmp_null(sym_l, sym_r); 306 307 if (sym_l == sym_r) 308 return 0; 309 310 if (sym_l->inlined || sym_r->inlined) { 311 int ret = strcmp(sym_l->name, sym_r->name); 312 313 if (ret) 314 return ret; 315 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start)) 316 return 0; 317 } 318 319 if (sym_l->start != sym_r->start) 320 return (int64_t)(sym_r->start - sym_l->start); 321 322 return (int64_t)(sym_r->end - sym_l->end); 323 } 324 325 static int64_t 326 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 327 { 328 int64_t ret; 329 330 if (!left->ms.sym && !right->ms.sym) 331 return _sort__addr_cmp(left->ip, right->ip); 332 333 /* 334 * comparing symbol address alone is not enough since it's a 335 * relative address within a dso. 336 */ 337 if (!hists__has(left->hists, dso)) { 338 ret = sort__dso_cmp(left, right); 339 if (ret != 0) 340 return ret; 341 } 342 343 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 344 } 345 346 static int64_t 347 sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 348 { 349 if (!left->ms.sym || !right->ms.sym) 350 return cmp_null(left->ms.sym, right->ms.sym); 351 352 return strcmp(right->ms.sym->name, left->ms.sym->name); 353 } 354 355 static int _hist_entry__sym_snprintf(struct map_symbol *ms, 356 u64 ip, char level, char *bf, size_t size, 357 unsigned int width) 358 { 359 struct symbol *sym = ms->sym; 360 struct map *map = ms->map; 361 size_t ret = 0; 362 363 if (verbose > 0) { 364 struct dso *dso = map ? map__dso(map) : NULL; 365 char o = dso ? dso__symtab_origin(dso) : '!'; 366 u64 rip = ip; 367 368 if (dso && dso__kernel(dso) && dso__adjust_symbols(dso)) 369 rip = map__unmap_ip(map, ip); 370 371 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 372 BITS_PER_LONG / 4 + 2, rip, o); 373 } 374 375 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 376 if (sym && map) { 377 if (sym->type == STT_OBJECT) { 378 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 379 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 380 ip - map__unmap_ip(map, sym->start)); 381 } else { 382 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 383 width - ret, 384 sym->name); 385 if (sym->inlined) 386 ret += repsep_snprintf(bf + ret, size - ret, 387 " (inlined)"); 388 } 389 } else { 390 size_t len = BITS_PER_LONG / 4; 391 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 392 len, ip); 393 } 394 395 return ret; 396 } 397 398 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) 399 { 400 return _hist_entry__sym_snprintf(&he->ms, he->ip, 401 he->level, bf, size, width); 402 } 403 404 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 405 { 406 const char *sym = arg; 407 408 if (type != HIST_FILTER__SYMBOL) 409 return -1; 410 411 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 412 } 413 414 struct sort_entry sort_sym = { 415 .se_header = "Symbol", 416 .se_cmp = sort__sym_cmp, 417 .se_sort = sort__sym_sort, 418 .se_snprintf = hist_entry__sym_snprintf, 419 .se_filter = hist_entry__sym_filter, 420 .se_width_idx = HISTC_SYMBOL, 421 }; 422 423 /* --sort symoff */ 424 425 static int64_t 426 sort__symoff_cmp(struct hist_entry *left, struct hist_entry *right) 427 { 428 int64_t ret; 429 430 ret = sort__sym_cmp(left, right); 431 if (ret) 432 return ret; 433 434 return left->ip - right->ip; 435 } 436 437 static int64_t 438 sort__symoff_sort(struct hist_entry *left, struct hist_entry *right) 439 { 440 int64_t ret; 441 442 ret = sort__sym_sort(left, right); 443 if (ret) 444 return ret; 445 446 return left->ip - right->ip; 447 } 448 449 static int 450 hist_entry__symoff_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) 451 { 452 struct symbol *sym = he->ms.sym; 453 454 if (sym == NULL) 455 return repsep_snprintf(bf, size, "[%c] %-#.*llx", he->level, width - 4, he->ip); 456 457 return repsep_snprintf(bf, size, "[%c] %s+0x%llx", he->level, sym->name, he->ip - sym->start); 458 } 459 460 struct sort_entry sort_sym_offset = { 461 .se_header = "Symbol Offset", 462 .se_cmp = sort__symoff_cmp, 463 .se_sort = sort__symoff_sort, 464 .se_snprintf = hist_entry__symoff_snprintf, 465 .se_filter = hist_entry__sym_filter, 466 .se_width_idx = HISTC_SYMBOL_OFFSET, 467 }; 468 469 /* --sort srcline */ 470 471 char *hist_entry__srcline(struct hist_entry *he) 472 { 473 return map__srcline(he->ms.map, he->ip, he->ms.sym); 474 } 475 476 static int64_t 477 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 478 { 479 int64_t ret; 480 481 ret = _sort__addr_cmp(left->ip, right->ip); 482 if (ret) 483 return ret; 484 485 return sort__dso_cmp(left, right); 486 } 487 488 static int64_t 489 sort__srcline_collapse(struct hist_entry *left, struct hist_entry *right) 490 { 491 if (!left->srcline) 492 left->srcline = hist_entry__srcline(left); 493 if (!right->srcline) 494 right->srcline = hist_entry__srcline(right); 495 496 return strcmp(right->srcline, left->srcline); 497 } 498 499 static int64_t 500 sort__srcline_sort(struct hist_entry *left, struct hist_entry *right) 501 { 502 return sort__srcline_collapse(left, right); 503 } 504 505 static void 506 sort__srcline_init(struct hist_entry *he) 507 { 508 if (!he->srcline) 509 he->srcline = hist_entry__srcline(he); 510 } 511 512 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 513 size_t size, unsigned int width) 514 { 515 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 516 } 517 518 struct sort_entry sort_srcline = { 519 .se_header = "Source:Line", 520 .se_cmp = sort__srcline_cmp, 521 .se_collapse = sort__srcline_collapse, 522 .se_sort = sort__srcline_sort, 523 .se_init = sort__srcline_init, 524 .se_snprintf = hist_entry__srcline_snprintf, 525 .se_width_idx = HISTC_SRCLINE, 526 }; 527 528 /* --sort srcline_from */ 529 530 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams) 531 { 532 return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym); 533 } 534 535 static int64_t 536 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 537 { 538 return left->branch_info->from.addr - right->branch_info->from.addr; 539 } 540 541 static int64_t 542 sort__srcline_from_collapse(struct hist_entry *left, struct hist_entry *right) 543 { 544 if (!left->branch_info->srcline_from) 545 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from); 546 547 if (!right->branch_info->srcline_from) 548 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from); 549 550 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 551 } 552 553 static int64_t 554 sort__srcline_from_sort(struct hist_entry *left, struct hist_entry *right) 555 { 556 return sort__srcline_from_collapse(left, right); 557 } 558 559 static void sort__srcline_from_init(struct hist_entry *he) 560 { 561 if (!he->branch_info->srcline_from) 562 he->branch_info->srcline_from = addr_map_symbol__srcline(&he->branch_info->from); 563 } 564 565 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 566 size_t size, unsigned int width) 567 { 568 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 569 } 570 571 struct sort_entry sort_srcline_from = { 572 .se_header = "From Source:Line", 573 .se_cmp = sort__srcline_from_cmp, 574 .se_collapse = sort__srcline_from_collapse, 575 .se_sort = sort__srcline_from_sort, 576 .se_init = sort__srcline_from_init, 577 .se_snprintf = hist_entry__srcline_from_snprintf, 578 .se_width_idx = HISTC_SRCLINE_FROM, 579 }; 580 581 /* --sort srcline_to */ 582 583 static int64_t 584 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 585 { 586 return left->branch_info->to.addr - right->branch_info->to.addr; 587 } 588 589 static int64_t 590 sort__srcline_to_collapse(struct hist_entry *left, struct hist_entry *right) 591 { 592 if (!left->branch_info->srcline_to) 593 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to); 594 595 if (!right->branch_info->srcline_to) 596 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to); 597 598 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 599 } 600 601 static int64_t 602 sort__srcline_to_sort(struct hist_entry *left, struct hist_entry *right) 603 { 604 return sort__srcline_to_collapse(left, right); 605 } 606 607 static void sort__srcline_to_init(struct hist_entry *he) 608 { 609 if (!he->branch_info->srcline_to) 610 he->branch_info->srcline_to = addr_map_symbol__srcline(&he->branch_info->to); 611 } 612 613 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 614 size_t size, unsigned int width) 615 { 616 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 617 } 618 619 struct sort_entry sort_srcline_to = { 620 .se_header = "To Source:Line", 621 .se_cmp = sort__srcline_to_cmp, 622 .se_collapse = sort__srcline_to_collapse, 623 .se_sort = sort__srcline_to_sort, 624 .se_init = sort__srcline_to_init, 625 .se_snprintf = hist_entry__srcline_to_snprintf, 626 .se_width_idx = HISTC_SRCLINE_TO, 627 }; 628 629 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf, 630 size_t size, unsigned int width) 631 { 632 633 struct symbol *sym = he->ms.sym; 634 struct annotated_branch *branch; 635 double ipc = 0.0, coverage = 0.0; 636 char tmp[64]; 637 638 if (!sym) 639 return repsep_snprintf(bf, size, "%-*s", width, "-"); 640 641 branch = symbol__annotation(sym)->branch; 642 643 if (branch && branch->hit_cycles) 644 ipc = branch->hit_insn / ((double)branch->hit_cycles); 645 646 if (branch && branch->total_insn) { 647 coverage = branch->cover_insn * 100.0 / 648 ((double)branch->total_insn); 649 } 650 651 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage); 652 return repsep_snprintf(bf, size, "%-*s", width, tmp); 653 } 654 655 struct sort_entry sort_sym_ipc = { 656 .se_header = "IPC [IPC Coverage]", 657 .se_cmp = sort__sym_cmp, 658 .se_snprintf = hist_entry__sym_ipc_snprintf, 659 .se_width_idx = HISTC_SYMBOL_IPC, 660 }; 661 662 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he 663 __maybe_unused, 664 char *bf, size_t size, 665 unsigned int width) 666 { 667 char tmp[64]; 668 669 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-"); 670 return repsep_snprintf(bf, size, "%-*s", width, tmp); 671 } 672 673 struct sort_entry sort_sym_ipc_null = { 674 .se_header = "IPC [IPC Coverage]", 675 .se_cmp = sort__sym_cmp, 676 .se_snprintf = hist_entry__sym_ipc_null_snprintf, 677 .se_width_idx = HISTC_SYMBOL_IPC, 678 }; 679 680 /* --sort srcfile */ 681 682 static char no_srcfile[1]; 683 684 static char *hist_entry__get_srcfile(struct hist_entry *e) 685 { 686 char *sf, *p; 687 struct map *map = e->ms.map; 688 689 if (!map) 690 return no_srcfile; 691 692 sf = __get_srcline(map__dso(map), map__rip_2objdump(map, e->ip), 693 e->ms.sym, false, true, true, e->ip); 694 if (sf == SRCLINE_UNKNOWN) 695 return no_srcfile; 696 p = strchr(sf, ':'); 697 if (p && *sf) { 698 *p = 0; 699 return sf; 700 } 701 free(sf); 702 return no_srcfile; 703 } 704 705 static int64_t 706 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 707 { 708 return sort__srcline_cmp(left, right); 709 } 710 711 static int64_t 712 sort__srcfile_collapse(struct hist_entry *left, struct hist_entry *right) 713 { 714 if (!left->srcfile) 715 left->srcfile = hist_entry__get_srcfile(left); 716 if (!right->srcfile) 717 right->srcfile = hist_entry__get_srcfile(right); 718 719 return strcmp(right->srcfile, left->srcfile); 720 } 721 722 static int64_t 723 sort__srcfile_sort(struct hist_entry *left, struct hist_entry *right) 724 { 725 return sort__srcfile_collapse(left, right); 726 } 727 728 static void sort__srcfile_init(struct hist_entry *he) 729 { 730 if (!he->srcfile) 731 he->srcfile = hist_entry__get_srcfile(he); 732 } 733 734 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 735 size_t size, unsigned int width) 736 { 737 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 738 } 739 740 struct sort_entry sort_srcfile = { 741 .se_header = "Source File", 742 .se_cmp = sort__srcfile_cmp, 743 .se_collapse = sort__srcfile_collapse, 744 .se_sort = sort__srcfile_sort, 745 .se_init = sort__srcfile_init, 746 .se_snprintf = hist_entry__srcfile_snprintf, 747 .se_width_idx = HISTC_SRCFILE, 748 }; 749 750 /* --sort parent */ 751 752 static int64_t 753 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 754 { 755 struct symbol *sym_l = left->parent; 756 struct symbol *sym_r = right->parent; 757 758 if (!sym_l || !sym_r) 759 return cmp_null(sym_l, sym_r); 760 761 return strcmp(sym_r->name, sym_l->name); 762 } 763 764 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 765 size_t size, unsigned int width) 766 { 767 return repsep_snprintf(bf, size, "%-*.*s", width, width, 768 he->parent ? he->parent->name : "[other]"); 769 } 770 771 struct sort_entry sort_parent = { 772 .se_header = "Parent symbol", 773 .se_cmp = sort__parent_cmp, 774 .se_snprintf = hist_entry__parent_snprintf, 775 .se_width_idx = HISTC_PARENT, 776 }; 777 778 /* --sort cpu */ 779 780 static int64_t 781 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 782 { 783 return right->cpu - left->cpu; 784 } 785 786 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 787 size_t size, unsigned int width) 788 { 789 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 790 } 791 792 struct sort_entry sort_cpu = { 793 .se_header = "CPU", 794 .se_cmp = sort__cpu_cmp, 795 .se_snprintf = hist_entry__cpu_snprintf, 796 .se_width_idx = HISTC_CPU, 797 }; 798 799 /* --sort cgroup_id */ 800 801 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev) 802 { 803 return (int64_t)(right_dev - left_dev); 804 } 805 806 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino) 807 { 808 return (int64_t)(right_ino - left_ino); 809 } 810 811 static int64_t 812 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right) 813 { 814 int64_t ret; 815 816 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev); 817 if (ret != 0) 818 return ret; 819 820 return _sort__cgroup_inode_cmp(right->cgroup_id.ino, 821 left->cgroup_id.ino); 822 } 823 824 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he, 825 char *bf, size_t size, 826 unsigned int width __maybe_unused) 827 { 828 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev, 829 he->cgroup_id.ino); 830 } 831 832 struct sort_entry sort_cgroup_id = { 833 .se_header = "cgroup id (dev/inode)", 834 .se_cmp = sort__cgroup_id_cmp, 835 .se_snprintf = hist_entry__cgroup_id_snprintf, 836 .se_width_idx = HISTC_CGROUP_ID, 837 }; 838 839 /* --sort cgroup */ 840 841 static int64_t 842 sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right) 843 { 844 return right->cgroup - left->cgroup; 845 } 846 847 static int hist_entry__cgroup_snprintf(struct hist_entry *he, 848 char *bf, size_t size, 849 unsigned int width __maybe_unused) 850 { 851 const char *cgrp_name = "N/A"; 852 853 if (he->cgroup) { 854 struct cgroup *cgrp = cgroup__find(maps__machine(he->ms.maps)->env, 855 he->cgroup); 856 if (cgrp != NULL) 857 cgrp_name = cgrp->name; 858 else 859 cgrp_name = "unknown"; 860 } 861 862 return repsep_snprintf(bf, size, "%s", cgrp_name); 863 } 864 865 struct sort_entry sort_cgroup = { 866 .se_header = "Cgroup", 867 .se_cmp = sort__cgroup_cmp, 868 .se_snprintf = hist_entry__cgroup_snprintf, 869 .se_width_idx = HISTC_CGROUP, 870 }; 871 872 /* --sort socket */ 873 874 static int64_t 875 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 876 { 877 return right->socket - left->socket; 878 } 879 880 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 881 size_t size, unsigned int width) 882 { 883 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 884 } 885 886 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 887 { 888 int sk = *(const int *)arg; 889 890 if (type != HIST_FILTER__SOCKET) 891 return -1; 892 893 return sk >= 0 && he->socket != sk; 894 } 895 896 struct sort_entry sort_socket = { 897 .se_header = "Socket", 898 .se_cmp = sort__socket_cmp, 899 .se_snprintf = hist_entry__socket_snprintf, 900 .se_filter = hist_entry__socket_filter, 901 .se_width_idx = HISTC_SOCKET, 902 }; 903 904 /* --sort time */ 905 906 static int64_t 907 sort__time_cmp(struct hist_entry *left, struct hist_entry *right) 908 { 909 return right->time - left->time; 910 } 911 912 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf, 913 size_t size, unsigned int width) 914 { 915 char he_time[32]; 916 917 if (symbol_conf.nanosecs) 918 timestamp__scnprintf_nsec(he->time, he_time, 919 sizeof(he_time)); 920 else 921 timestamp__scnprintf_usec(he->time, he_time, 922 sizeof(he_time)); 923 924 return repsep_snprintf(bf, size, "%-.*s", width, he_time); 925 } 926 927 struct sort_entry sort_time = { 928 .se_header = "Time", 929 .se_cmp = sort__time_cmp, 930 .se_snprintf = hist_entry__time_snprintf, 931 .se_width_idx = HISTC_TIME, 932 }; 933 934 /* --sort trace */ 935 936 #ifdef HAVE_LIBTRACEEVENT 937 static char *get_trace_output(struct hist_entry *he) 938 { 939 struct trace_seq seq; 940 struct evsel *evsel; 941 struct tep_record rec = { 942 .data = he->raw_data, 943 .size = he->raw_size, 944 }; 945 946 evsel = hists_to_evsel(he->hists); 947 948 trace_seq_init(&seq); 949 if (symbol_conf.raw_trace) { 950 tep_print_fields(&seq, he->raw_data, he->raw_size, 951 evsel->tp_format); 952 } else { 953 tep_print_event(evsel->tp_format->tep, 954 &seq, &rec, "%s", TEP_PRINT_INFO); 955 } 956 /* 957 * Trim the buffer, it starts at 4KB and we're not going to 958 * add anything more to this buffer. 959 */ 960 return realloc(seq.buffer, seq.len + 1); 961 } 962 963 static int64_t 964 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 965 { 966 struct evsel *evsel; 967 968 evsel = hists_to_evsel(left->hists); 969 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 970 return 0; 971 972 if (left->trace_output == NULL) 973 left->trace_output = get_trace_output(left); 974 if (right->trace_output == NULL) 975 right->trace_output = get_trace_output(right); 976 977 return strcmp(right->trace_output, left->trace_output); 978 } 979 980 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 981 size_t size, unsigned int width) 982 { 983 struct evsel *evsel; 984 985 evsel = hists_to_evsel(he->hists); 986 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 987 return scnprintf(bf, size, "%-.*s", width, "N/A"); 988 989 if (he->trace_output == NULL) 990 he->trace_output = get_trace_output(he); 991 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 992 } 993 994 struct sort_entry sort_trace = { 995 .se_header = "Trace output", 996 .se_cmp = sort__trace_cmp, 997 .se_snprintf = hist_entry__trace_snprintf, 998 .se_width_idx = HISTC_TRACE, 999 }; 1000 #endif /* HAVE_LIBTRACEEVENT */ 1001 1002 /* sort keys for branch stacks */ 1003 1004 static int64_t 1005 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 1006 { 1007 if (!left->branch_info || !right->branch_info) 1008 return cmp_null(left->branch_info, right->branch_info); 1009 1010 return _sort__dso_cmp(left->branch_info->from.ms.map, 1011 right->branch_info->from.ms.map); 1012 } 1013 1014 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 1015 size_t size, unsigned int width) 1016 { 1017 if (he->branch_info) 1018 return _hist_entry__dso_snprintf(he->branch_info->from.ms.map, 1019 bf, size, width); 1020 else 1021 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1022 } 1023 1024 static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 1025 const void *arg) 1026 { 1027 const struct dso *dso = arg; 1028 1029 if (type != HIST_FILTER__DSO) 1030 return -1; 1031 1032 return dso && (!he->branch_info || !he->branch_info->from.ms.map || 1033 map__dso(he->branch_info->from.ms.map) != dso); 1034 } 1035 1036 static int64_t 1037 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 1038 { 1039 if (!left->branch_info || !right->branch_info) 1040 return cmp_null(left->branch_info, right->branch_info); 1041 1042 return _sort__dso_cmp(left->branch_info->to.ms.map, 1043 right->branch_info->to.ms.map); 1044 } 1045 1046 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 1047 size_t size, unsigned int width) 1048 { 1049 if (he->branch_info) 1050 return _hist_entry__dso_snprintf(he->branch_info->to.ms.map, 1051 bf, size, width); 1052 else 1053 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1054 } 1055 1056 static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 1057 const void *arg) 1058 { 1059 const struct dso *dso = arg; 1060 1061 if (type != HIST_FILTER__DSO) 1062 return -1; 1063 1064 return dso && (!he->branch_info || !he->branch_info->to.ms.map || 1065 map__dso(he->branch_info->to.ms.map) != dso); 1066 } 1067 1068 static int64_t 1069 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 1070 { 1071 struct addr_map_symbol *from_l, *from_r; 1072 1073 if (!left->branch_info || !right->branch_info) 1074 return cmp_null(left->branch_info, right->branch_info); 1075 1076 from_l = &left->branch_info->from; 1077 from_r = &right->branch_info->from; 1078 1079 if (!from_l->ms.sym && !from_r->ms.sym) 1080 return _sort__addr_cmp(from_l->addr, from_r->addr); 1081 1082 return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym); 1083 } 1084 1085 static int64_t 1086 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 1087 { 1088 struct addr_map_symbol *to_l, *to_r; 1089 1090 if (!left->branch_info || !right->branch_info) 1091 return cmp_null(left->branch_info, right->branch_info); 1092 1093 to_l = &left->branch_info->to; 1094 to_r = &right->branch_info->to; 1095 1096 if (!to_l->ms.sym && !to_r->ms.sym) 1097 return _sort__addr_cmp(to_l->addr, to_r->addr); 1098 1099 return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym); 1100 } 1101 1102 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 1103 size_t size, unsigned int width) 1104 { 1105 if (he->branch_info) { 1106 struct addr_map_symbol *from = &he->branch_info->from; 1107 1108 return _hist_entry__sym_snprintf(&from->ms, from->al_addr, 1109 from->al_level, bf, size, width); 1110 } 1111 1112 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1113 } 1114 1115 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 1116 size_t size, unsigned int width) 1117 { 1118 if (he->branch_info) { 1119 struct addr_map_symbol *to = &he->branch_info->to; 1120 1121 return _hist_entry__sym_snprintf(&to->ms, to->al_addr, 1122 to->al_level, bf, size, width); 1123 } 1124 1125 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1126 } 1127 1128 static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 1129 const void *arg) 1130 { 1131 const char *sym = arg; 1132 1133 if (type != HIST_FILTER__SYMBOL) 1134 return -1; 1135 1136 return sym && !(he->branch_info && he->branch_info->from.ms.sym && 1137 strstr(he->branch_info->from.ms.sym->name, sym)); 1138 } 1139 1140 static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 1141 const void *arg) 1142 { 1143 const char *sym = arg; 1144 1145 if (type != HIST_FILTER__SYMBOL) 1146 return -1; 1147 1148 return sym && !(he->branch_info && he->branch_info->to.ms.sym && 1149 strstr(he->branch_info->to.ms.sym->name, sym)); 1150 } 1151 1152 struct sort_entry sort_dso_from = { 1153 .se_header = "Source Shared Object", 1154 .se_cmp = sort__dso_from_cmp, 1155 .se_snprintf = hist_entry__dso_from_snprintf, 1156 .se_filter = hist_entry__dso_from_filter, 1157 .se_width_idx = HISTC_DSO_FROM, 1158 }; 1159 1160 struct sort_entry sort_dso_to = { 1161 .se_header = "Target Shared Object", 1162 .se_cmp = sort__dso_to_cmp, 1163 .se_snprintf = hist_entry__dso_to_snprintf, 1164 .se_filter = hist_entry__dso_to_filter, 1165 .se_width_idx = HISTC_DSO_TO, 1166 }; 1167 1168 struct sort_entry sort_sym_from = { 1169 .se_header = "Source Symbol", 1170 .se_cmp = sort__sym_from_cmp, 1171 .se_snprintf = hist_entry__sym_from_snprintf, 1172 .se_filter = hist_entry__sym_from_filter, 1173 .se_width_idx = HISTC_SYMBOL_FROM, 1174 }; 1175 1176 struct sort_entry sort_sym_to = { 1177 .se_header = "Target Symbol", 1178 .se_cmp = sort__sym_to_cmp, 1179 .se_snprintf = hist_entry__sym_to_snprintf, 1180 .se_filter = hist_entry__sym_to_filter, 1181 .se_width_idx = HISTC_SYMBOL_TO, 1182 }; 1183 1184 static int _hist_entry__addr_snprintf(struct map_symbol *ms, 1185 u64 ip, char level, char *bf, size_t size, 1186 unsigned int width) 1187 { 1188 struct symbol *sym = ms->sym; 1189 struct map *map = ms->map; 1190 size_t ret = 0, offs; 1191 1192 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 1193 if (sym && map) { 1194 if (sym->type == STT_OBJECT) { 1195 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 1196 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 1197 ip - map__unmap_ip(map, sym->start)); 1198 } else { 1199 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 1200 width - ret, 1201 sym->name); 1202 offs = ip - sym->start; 1203 if (offs) 1204 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", offs); 1205 } 1206 } else { 1207 size_t len = BITS_PER_LONG / 4; 1208 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 1209 len, ip); 1210 } 1211 1212 return ret; 1213 } 1214 1215 static int hist_entry__addr_from_snprintf(struct hist_entry *he, char *bf, 1216 size_t size, unsigned int width) 1217 { 1218 if (he->branch_info) { 1219 struct addr_map_symbol *from = &he->branch_info->from; 1220 1221 return _hist_entry__addr_snprintf(&from->ms, from->al_addr, 1222 he->level, bf, size, width); 1223 } 1224 1225 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1226 } 1227 1228 static int hist_entry__addr_to_snprintf(struct hist_entry *he, char *bf, 1229 size_t size, unsigned int width) 1230 { 1231 if (he->branch_info) { 1232 struct addr_map_symbol *to = &he->branch_info->to; 1233 1234 return _hist_entry__addr_snprintf(&to->ms, to->al_addr, 1235 he->level, bf, size, width); 1236 } 1237 1238 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1239 } 1240 1241 static int64_t 1242 sort__addr_from_cmp(struct hist_entry *left, struct hist_entry *right) 1243 { 1244 struct addr_map_symbol *from_l; 1245 struct addr_map_symbol *from_r; 1246 int64_t ret; 1247 1248 if (!left->branch_info || !right->branch_info) 1249 return cmp_null(left->branch_info, right->branch_info); 1250 1251 from_l = &left->branch_info->from; 1252 from_r = &right->branch_info->from; 1253 1254 /* 1255 * comparing symbol address alone is not enough since it's a 1256 * relative address within a dso. 1257 */ 1258 ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map); 1259 if (ret != 0) 1260 return ret; 1261 1262 return _sort__addr_cmp(from_l->addr, from_r->addr); 1263 } 1264 1265 static int64_t 1266 sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right) 1267 { 1268 struct addr_map_symbol *to_l; 1269 struct addr_map_symbol *to_r; 1270 int64_t ret; 1271 1272 if (!left->branch_info || !right->branch_info) 1273 return cmp_null(left->branch_info, right->branch_info); 1274 1275 to_l = &left->branch_info->to; 1276 to_r = &right->branch_info->to; 1277 1278 /* 1279 * comparing symbol address alone is not enough since it's a 1280 * relative address within a dso. 1281 */ 1282 ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map); 1283 if (ret != 0) 1284 return ret; 1285 1286 return _sort__addr_cmp(to_l->addr, to_r->addr); 1287 } 1288 1289 struct sort_entry sort_addr_from = { 1290 .se_header = "Source Address", 1291 .se_cmp = sort__addr_from_cmp, 1292 .se_snprintf = hist_entry__addr_from_snprintf, 1293 .se_filter = hist_entry__sym_from_filter, /* shared with sym_from */ 1294 .se_width_idx = HISTC_ADDR_FROM, 1295 }; 1296 1297 struct sort_entry sort_addr_to = { 1298 .se_header = "Target Address", 1299 .se_cmp = sort__addr_to_cmp, 1300 .se_snprintf = hist_entry__addr_to_snprintf, 1301 .se_filter = hist_entry__sym_to_filter, /* shared with sym_to */ 1302 .se_width_idx = HISTC_ADDR_TO, 1303 }; 1304 1305 1306 static int64_t 1307 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 1308 { 1309 unsigned char mp, p; 1310 1311 if (!left->branch_info || !right->branch_info) 1312 return cmp_null(left->branch_info, right->branch_info); 1313 1314 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 1315 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 1316 return mp || p; 1317 } 1318 1319 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 1320 size_t size, unsigned int width){ 1321 static const char *out = "N/A"; 1322 1323 if (he->branch_info) { 1324 if (he->branch_info->flags.predicted) 1325 out = "N"; 1326 else if (he->branch_info->flags.mispred) 1327 out = "Y"; 1328 } 1329 1330 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 1331 } 1332 1333 static int64_t 1334 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 1335 { 1336 if (!left->branch_info || !right->branch_info) 1337 return cmp_null(left->branch_info, right->branch_info); 1338 1339 return left->branch_info->flags.cycles - 1340 right->branch_info->flags.cycles; 1341 } 1342 1343 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 1344 size_t size, unsigned int width) 1345 { 1346 if (!he->branch_info) 1347 return scnprintf(bf, size, "%-.*s", width, "N/A"); 1348 if (he->branch_info->flags.cycles == 0) 1349 return repsep_snprintf(bf, size, "%-*s", width, "-"); 1350 return repsep_snprintf(bf, size, "%-*hd", width, 1351 he->branch_info->flags.cycles); 1352 } 1353 1354 struct sort_entry sort_cycles = { 1355 .se_header = "Basic Block Cycles", 1356 .se_cmp = sort__cycles_cmp, 1357 .se_snprintf = hist_entry__cycles_snprintf, 1358 .se_width_idx = HISTC_CYCLES, 1359 }; 1360 1361 /* --sort daddr_sym */ 1362 int64_t 1363 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1364 { 1365 uint64_t l = 0, r = 0; 1366 1367 if (left->mem_info) 1368 l = mem_info__daddr(left->mem_info)->addr; 1369 if (right->mem_info) 1370 r = mem_info__daddr(right->mem_info)->addr; 1371 1372 return (int64_t)(r - l); 1373 } 1374 1375 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 1376 size_t size, unsigned int width) 1377 { 1378 uint64_t addr = 0; 1379 struct map_symbol *ms = NULL; 1380 1381 if (he->mem_info) { 1382 addr = mem_info__daddr(he->mem_info)->addr; 1383 ms = &mem_info__daddr(he->mem_info)->ms; 1384 } 1385 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); 1386 } 1387 1388 int64_t 1389 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 1390 { 1391 uint64_t l = 0, r = 0; 1392 1393 if (left->mem_info) 1394 l = mem_info__iaddr(left->mem_info)->addr; 1395 if (right->mem_info) 1396 r = mem_info__iaddr(right->mem_info)->addr; 1397 1398 return (int64_t)(r - l); 1399 } 1400 1401 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 1402 size_t size, unsigned int width) 1403 { 1404 uint64_t addr = 0; 1405 struct map_symbol *ms = NULL; 1406 1407 if (he->mem_info) { 1408 addr = mem_info__iaddr(he->mem_info)->addr; 1409 ms = &mem_info__iaddr(he->mem_info)->ms; 1410 } 1411 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); 1412 } 1413 1414 static int64_t 1415 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1416 { 1417 struct map *map_l = NULL; 1418 struct map *map_r = NULL; 1419 1420 if (left->mem_info) 1421 map_l = mem_info__daddr(left->mem_info)->ms.map; 1422 if (right->mem_info) 1423 map_r = mem_info__daddr(right->mem_info)->ms.map; 1424 1425 return _sort__dso_cmp(map_l, map_r); 1426 } 1427 1428 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 1429 size_t size, unsigned int width) 1430 { 1431 struct map *map = NULL; 1432 1433 if (he->mem_info) 1434 map = mem_info__daddr(he->mem_info)->ms.map; 1435 1436 return _hist_entry__dso_snprintf(map, bf, size, width); 1437 } 1438 1439 static int64_t 1440 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 1441 { 1442 union perf_mem_data_src data_src_l; 1443 union perf_mem_data_src data_src_r; 1444 1445 if (left->mem_info) 1446 data_src_l = *mem_info__data_src(left->mem_info); 1447 else 1448 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 1449 1450 if (right->mem_info) 1451 data_src_r = *mem_info__data_src(right->mem_info); 1452 else 1453 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 1454 1455 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 1456 } 1457 1458 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 1459 size_t size, unsigned int width) 1460 { 1461 char out[10]; 1462 1463 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 1464 return repsep_snprintf(bf, size, "%.*s", width, out); 1465 } 1466 1467 static int64_t 1468 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 1469 { 1470 union perf_mem_data_src data_src_l; 1471 union perf_mem_data_src data_src_r; 1472 1473 if (left->mem_info) 1474 data_src_l = *mem_info__data_src(left->mem_info); 1475 else 1476 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 1477 1478 if (right->mem_info) 1479 data_src_r = *mem_info__data_src(right->mem_info); 1480 else 1481 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 1482 1483 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 1484 } 1485 1486 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1487 size_t size, unsigned int width) 1488 { 1489 char out[64]; 1490 1491 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1492 return repsep_snprintf(bf, size, "%-*s", width, out); 1493 } 1494 1495 static int64_t 1496 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1497 { 1498 union perf_mem_data_src data_src_l; 1499 union perf_mem_data_src data_src_r; 1500 1501 if (left->mem_info) 1502 data_src_l = *mem_info__data_src(left->mem_info); 1503 else 1504 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1505 1506 if (right->mem_info) 1507 data_src_r = *mem_info__data_src(right->mem_info); 1508 else 1509 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1510 1511 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1512 } 1513 1514 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1515 size_t size, unsigned int width) 1516 { 1517 char out[64]; 1518 1519 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1520 return repsep_snprintf(bf, size, "%-*s", width, out); 1521 } 1522 1523 static int64_t 1524 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1525 { 1526 union perf_mem_data_src data_src_l; 1527 union perf_mem_data_src data_src_r; 1528 1529 if (left->mem_info) 1530 data_src_l = *mem_info__data_src(left->mem_info); 1531 else 1532 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1533 1534 if (right->mem_info) 1535 data_src_r = *mem_info__data_src(right->mem_info); 1536 else 1537 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1538 1539 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1540 } 1541 1542 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1543 size_t size, unsigned int width) 1544 { 1545 char out[64]; 1546 1547 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1548 return repsep_snprintf(bf, size, "%-*s", width, out); 1549 } 1550 1551 int64_t 1552 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1553 { 1554 u64 l, r; 1555 struct map *l_map, *r_map; 1556 struct dso *l_dso, *r_dso; 1557 int rc; 1558 1559 if (!left->mem_info) return -1; 1560 if (!right->mem_info) return 1; 1561 1562 /* group event types together */ 1563 if (left->cpumode > right->cpumode) return -1; 1564 if (left->cpumode < right->cpumode) return 1; 1565 1566 l_map = mem_info__daddr(left->mem_info)->ms.map; 1567 r_map = mem_info__daddr(right->mem_info)->ms.map; 1568 1569 /* if both are NULL, jump to sort on al_addr instead */ 1570 if (!l_map && !r_map) 1571 goto addr; 1572 1573 if (!l_map) return -1; 1574 if (!r_map) return 1; 1575 1576 l_dso = map__dso(l_map); 1577 r_dso = map__dso(r_map); 1578 rc = dso__cmp_id(l_dso, r_dso); 1579 if (rc) 1580 return rc; 1581 /* 1582 * Addresses with no major/minor numbers are assumed to be 1583 * anonymous in userspace. Sort those on pid then address. 1584 * 1585 * The kernel and non-zero major/minor mapped areas are 1586 * assumed to be unity mapped. Sort those on address. 1587 */ 1588 1589 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1590 (!(map__flags(l_map) & MAP_SHARED)) && !dso__id(l_dso)->maj && !dso__id(l_dso)->min && 1591 !dso__id(l_dso)->ino && !dso__id(l_dso)->ino_generation) { 1592 /* userspace anonymous */ 1593 1594 if (thread__pid(left->thread) > thread__pid(right->thread)) 1595 return -1; 1596 if (thread__pid(left->thread) < thread__pid(right->thread)) 1597 return 1; 1598 } 1599 1600 addr: 1601 /* al_addr does all the right addr - start + offset calculations */ 1602 l = cl_address(mem_info__daddr(left->mem_info)->al_addr, chk_double_cl); 1603 r = cl_address(mem_info__daddr(right->mem_info)->al_addr, chk_double_cl); 1604 1605 if (l > r) return -1; 1606 if (l < r) return 1; 1607 1608 return 0; 1609 } 1610 1611 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1612 size_t size, unsigned int width) 1613 { 1614 1615 uint64_t addr = 0; 1616 struct map_symbol *ms = NULL; 1617 char level = he->level; 1618 1619 if (he->mem_info) { 1620 struct map *map = mem_info__daddr(he->mem_info)->ms.map; 1621 struct dso *dso = map ? map__dso(map) : NULL; 1622 1623 addr = cl_address(mem_info__daddr(he->mem_info)->al_addr, chk_double_cl); 1624 ms = &mem_info__daddr(he->mem_info)->ms; 1625 1626 /* print [s] for shared data mmaps */ 1627 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1628 map && !(map__prot(map) & PROT_EXEC) && 1629 (map__flags(map) & MAP_SHARED) && 1630 (dso__id(dso)->maj || dso__id(dso)->min || dso__id(dso)->ino || 1631 dso__id(dso)->ino_generation)) 1632 level = 's'; 1633 else if (!map) 1634 level = 'X'; 1635 } 1636 return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width); 1637 } 1638 1639 struct sort_entry sort_mispredict = { 1640 .se_header = "Branch Mispredicted", 1641 .se_cmp = sort__mispredict_cmp, 1642 .se_snprintf = hist_entry__mispredict_snprintf, 1643 .se_width_idx = HISTC_MISPREDICT, 1644 }; 1645 1646 static int64_t 1647 sort__weight_cmp(struct hist_entry *left, struct hist_entry *right) 1648 { 1649 return left->weight - right->weight; 1650 } 1651 1652 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1653 size_t size, unsigned int width) 1654 { 1655 return repsep_snprintf(bf, size, "%-*llu", width, he->weight); 1656 } 1657 1658 struct sort_entry sort_local_weight = { 1659 .se_header = "Local Weight", 1660 .se_cmp = sort__weight_cmp, 1661 .se_snprintf = hist_entry__local_weight_snprintf, 1662 .se_width_idx = HISTC_LOCAL_WEIGHT, 1663 }; 1664 1665 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1666 size_t size, unsigned int width) 1667 { 1668 return repsep_snprintf(bf, size, "%-*llu", width, 1669 he->weight * he->stat.nr_events); 1670 } 1671 1672 struct sort_entry sort_global_weight = { 1673 .se_header = "Weight", 1674 .se_cmp = sort__weight_cmp, 1675 .se_snprintf = hist_entry__global_weight_snprintf, 1676 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1677 }; 1678 1679 static int64_t 1680 sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right) 1681 { 1682 return left->ins_lat - right->ins_lat; 1683 } 1684 1685 static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf, 1686 size_t size, unsigned int width) 1687 { 1688 return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat); 1689 } 1690 1691 struct sort_entry sort_local_ins_lat = { 1692 .se_header = "Local INSTR Latency", 1693 .se_cmp = sort__ins_lat_cmp, 1694 .se_snprintf = hist_entry__local_ins_lat_snprintf, 1695 .se_width_idx = HISTC_LOCAL_INS_LAT, 1696 }; 1697 1698 static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf, 1699 size_t size, unsigned int width) 1700 { 1701 return repsep_snprintf(bf, size, "%-*u", width, 1702 he->ins_lat * he->stat.nr_events); 1703 } 1704 1705 struct sort_entry sort_global_ins_lat = { 1706 .se_header = "INSTR Latency", 1707 .se_cmp = sort__ins_lat_cmp, 1708 .se_snprintf = hist_entry__global_ins_lat_snprintf, 1709 .se_width_idx = HISTC_GLOBAL_INS_LAT, 1710 }; 1711 1712 static int64_t 1713 sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right) 1714 { 1715 return left->p_stage_cyc - right->p_stage_cyc; 1716 } 1717 1718 static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf, 1719 size_t size, unsigned int width) 1720 { 1721 return repsep_snprintf(bf, size, "%-*u", width, 1722 he->p_stage_cyc * he->stat.nr_events); 1723 } 1724 1725 1726 static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf, 1727 size_t size, unsigned int width) 1728 { 1729 return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc); 1730 } 1731 1732 struct sort_entry sort_local_p_stage_cyc = { 1733 .se_header = "Local Pipeline Stage Cycle", 1734 .se_cmp = sort__p_stage_cyc_cmp, 1735 .se_snprintf = hist_entry__p_stage_cyc_snprintf, 1736 .se_width_idx = HISTC_LOCAL_P_STAGE_CYC, 1737 }; 1738 1739 struct sort_entry sort_global_p_stage_cyc = { 1740 .se_header = "Pipeline Stage Cycle", 1741 .se_cmp = sort__p_stage_cyc_cmp, 1742 .se_snprintf = hist_entry__global_p_stage_cyc_snprintf, 1743 .se_width_idx = HISTC_GLOBAL_P_STAGE_CYC, 1744 }; 1745 1746 struct sort_entry sort_mem_daddr_sym = { 1747 .se_header = "Data Symbol", 1748 .se_cmp = sort__daddr_cmp, 1749 .se_snprintf = hist_entry__daddr_snprintf, 1750 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1751 }; 1752 1753 struct sort_entry sort_mem_iaddr_sym = { 1754 .se_header = "Code Symbol", 1755 .se_cmp = sort__iaddr_cmp, 1756 .se_snprintf = hist_entry__iaddr_snprintf, 1757 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1758 }; 1759 1760 struct sort_entry sort_mem_daddr_dso = { 1761 .se_header = "Data Object", 1762 .se_cmp = sort__dso_daddr_cmp, 1763 .se_snprintf = hist_entry__dso_daddr_snprintf, 1764 .se_width_idx = HISTC_MEM_DADDR_DSO, 1765 }; 1766 1767 struct sort_entry sort_mem_locked = { 1768 .se_header = "Locked", 1769 .se_cmp = sort__locked_cmp, 1770 .se_snprintf = hist_entry__locked_snprintf, 1771 .se_width_idx = HISTC_MEM_LOCKED, 1772 }; 1773 1774 struct sort_entry sort_mem_tlb = { 1775 .se_header = "TLB access", 1776 .se_cmp = sort__tlb_cmp, 1777 .se_snprintf = hist_entry__tlb_snprintf, 1778 .se_width_idx = HISTC_MEM_TLB, 1779 }; 1780 1781 struct sort_entry sort_mem_lvl = { 1782 .se_header = "Memory access", 1783 .se_cmp = sort__lvl_cmp, 1784 .se_snprintf = hist_entry__lvl_snprintf, 1785 .se_width_idx = HISTC_MEM_LVL, 1786 }; 1787 1788 struct sort_entry sort_mem_snoop = { 1789 .se_header = "Snoop", 1790 .se_cmp = sort__snoop_cmp, 1791 .se_snprintf = hist_entry__snoop_snprintf, 1792 .se_width_idx = HISTC_MEM_SNOOP, 1793 }; 1794 1795 struct sort_entry sort_mem_dcacheline = { 1796 .se_header = "Data Cacheline", 1797 .se_cmp = sort__dcacheline_cmp, 1798 .se_snprintf = hist_entry__dcacheline_snprintf, 1799 .se_width_idx = HISTC_MEM_DCACHELINE, 1800 }; 1801 1802 static int64_t 1803 sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right) 1804 { 1805 union perf_mem_data_src data_src_l; 1806 union perf_mem_data_src data_src_r; 1807 1808 if (left->mem_info) 1809 data_src_l = *mem_info__data_src(left->mem_info); 1810 else 1811 data_src_l.mem_blk = PERF_MEM_BLK_NA; 1812 1813 if (right->mem_info) 1814 data_src_r = *mem_info__data_src(right->mem_info); 1815 else 1816 data_src_r.mem_blk = PERF_MEM_BLK_NA; 1817 1818 return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk); 1819 } 1820 1821 static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf, 1822 size_t size, unsigned int width) 1823 { 1824 char out[16]; 1825 1826 perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info); 1827 return repsep_snprintf(bf, size, "%.*s", width, out); 1828 } 1829 1830 struct sort_entry sort_mem_blocked = { 1831 .se_header = "Blocked", 1832 .se_cmp = sort__blocked_cmp, 1833 .se_snprintf = hist_entry__blocked_snprintf, 1834 .se_width_idx = HISTC_MEM_BLOCKED, 1835 }; 1836 1837 static int64_t 1838 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1839 { 1840 uint64_t l = 0, r = 0; 1841 1842 if (left->mem_info) 1843 l = mem_info__daddr(left->mem_info)->phys_addr; 1844 if (right->mem_info) 1845 r = mem_info__daddr(right->mem_info)->phys_addr; 1846 1847 return (int64_t)(r - l); 1848 } 1849 1850 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf, 1851 size_t size, unsigned int width) 1852 { 1853 uint64_t addr = 0; 1854 size_t ret = 0; 1855 size_t len = BITS_PER_LONG / 4; 1856 1857 addr = mem_info__daddr(he->mem_info)->phys_addr; 1858 1859 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level); 1860 1861 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr); 1862 1863 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, ""); 1864 1865 if (ret > width) 1866 bf[width] = '\0'; 1867 1868 return width; 1869 } 1870 1871 struct sort_entry sort_mem_phys_daddr = { 1872 .se_header = "Data Physical Address", 1873 .se_cmp = sort__phys_daddr_cmp, 1874 .se_snprintf = hist_entry__phys_daddr_snprintf, 1875 .se_width_idx = HISTC_MEM_PHYS_DADDR, 1876 }; 1877 1878 static int64_t 1879 sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right) 1880 { 1881 uint64_t l = 0, r = 0; 1882 1883 if (left->mem_info) 1884 l = mem_info__daddr(left->mem_info)->data_page_size; 1885 if (right->mem_info) 1886 r = mem_info__daddr(right->mem_info)->data_page_size; 1887 1888 return (int64_t)(r - l); 1889 } 1890 1891 static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf, 1892 size_t size, unsigned int width) 1893 { 1894 char str[PAGE_SIZE_NAME_LEN]; 1895 1896 return repsep_snprintf(bf, size, "%-*s", width, 1897 get_page_size_name(mem_info__daddr(he->mem_info)->data_page_size, str)); 1898 } 1899 1900 struct sort_entry sort_mem_data_page_size = { 1901 .se_header = "Data Page Size", 1902 .se_cmp = sort__data_page_size_cmp, 1903 .se_snprintf = hist_entry__data_page_size_snprintf, 1904 .se_width_idx = HISTC_MEM_DATA_PAGE_SIZE, 1905 }; 1906 1907 static int64_t 1908 sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right) 1909 { 1910 uint64_t l = left->code_page_size; 1911 uint64_t r = right->code_page_size; 1912 1913 return (int64_t)(r - l); 1914 } 1915 1916 static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf, 1917 size_t size, unsigned int width) 1918 { 1919 char str[PAGE_SIZE_NAME_LEN]; 1920 1921 return repsep_snprintf(bf, size, "%-*s", width, 1922 get_page_size_name(he->code_page_size, str)); 1923 } 1924 1925 struct sort_entry sort_code_page_size = { 1926 .se_header = "Code Page Size", 1927 .se_cmp = sort__code_page_size_cmp, 1928 .se_snprintf = hist_entry__code_page_size_snprintf, 1929 .se_width_idx = HISTC_CODE_PAGE_SIZE, 1930 }; 1931 1932 static int64_t 1933 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1934 { 1935 if (!left->branch_info || !right->branch_info) 1936 return cmp_null(left->branch_info, right->branch_info); 1937 1938 return left->branch_info->flags.abort != 1939 right->branch_info->flags.abort; 1940 } 1941 1942 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1943 size_t size, unsigned int width) 1944 { 1945 static const char *out = "N/A"; 1946 1947 if (he->branch_info) { 1948 if (he->branch_info->flags.abort) 1949 out = "A"; 1950 else 1951 out = "."; 1952 } 1953 1954 return repsep_snprintf(bf, size, "%-*s", width, out); 1955 } 1956 1957 struct sort_entry sort_abort = { 1958 .se_header = "Transaction abort", 1959 .se_cmp = sort__abort_cmp, 1960 .se_snprintf = hist_entry__abort_snprintf, 1961 .se_width_idx = HISTC_ABORT, 1962 }; 1963 1964 static int64_t 1965 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1966 { 1967 if (!left->branch_info || !right->branch_info) 1968 return cmp_null(left->branch_info, right->branch_info); 1969 1970 return left->branch_info->flags.in_tx != 1971 right->branch_info->flags.in_tx; 1972 } 1973 1974 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1975 size_t size, unsigned int width) 1976 { 1977 static const char *out = "N/A"; 1978 1979 if (he->branch_info) { 1980 if (he->branch_info->flags.in_tx) 1981 out = "T"; 1982 else 1983 out = "."; 1984 } 1985 1986 return repsep_snprintf(bf, size, "%-*s", width, out); 1987 } 1988 1989 struct sort_entry sort_in_tx = { 1990 .se_header = "Branch in transaction", 1991 .se_cmp = sort__in_tx_cmp, 1992 .se_snprintf = hist_entry__in_tx_snprintf, 1993 .se_width_idx = HISTC_IN_TX, 1994 }; 1995 1996 static int64_t 1997 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1998 { 1999 return left->transaction - right->transaction; 2000 } 2001 2002 static inline char *add_str(char *p, const char *str) 2003 { 2004 strcpy(p, str); 2005 return p + strlen(str); 2006 } 2007 2008 static struct txbit { 2009 unsigned flag; 2010 const char *name; 2011 int skip_for_len; 2012 } txbits[] = { 2013 { PERF_TXN_ELISION, "EL ", 0 }, 2014 { PERF_TXN_TRANSACTION, "TX ", 1 }, 2015 { PERF_TXN_SYNC, "SYNC ", 1 }, 2016 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 2017 { PERF_TXN_RETRY, "RETRY ", 0 }, 2018 { PERF_TXN_CONFLICT, "CON ", 0 }, 2019 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 2020 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 2021 { 0, NULL, 0 } 2022 }; 2023 2024 int hist_entry__transaction_len(void) 2025 { 2026 int i; 2027 int len = 0; 2028 2029 for (i = 0; txbits[i].name; i++) { 2030 if (!txbits[i].skip_for_len) 2031 len += strlen(txbits[i].name); 2032 } 2033 len += 4; /* :XX<space> */ 2034 return len; 2035 } 2036 2037 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 2038 size_t size, unsigned int width) 2039 { 2040 u64 t = he->transaction; 2041 char buf[128]; 2042 char *p = buf; 2043 int i; 2044 2045 buf[0] = 0; 2046 for (i = 0; txbits[i].name; i++) 2047 if (txbits[i].flag & t) 2048 p = add_str(p, txbits[i].name); 2049 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 2050 p = add_str(p, "NEITHER "); 2051 if (t & PERF_TXN_ABORT_MASK) { 2052 sprintf(p, ":%" PRIx64, 2053 (t & PERF_TXN_ABORT_MASK) >> 2054 PERF_TXN_ABORT_SHIFT); 2055 p += strlen(p); 2056 } 2057 2058 return repsep_snprintf(bf, size, "%-*s", width, buf); 2059 } 2060 2061 struct sort_entry sort_transaction = { 2062 .se_header = "Transaction ", 2063 .se_cmp = sort__transaction_cmp, 2064 .se_snprintf = hist_entry__transaction_snprintf, 2065 .se_width_idx = HISTC_TRANSACTION, 2066 }; 2067 2068 /* --sort symbol_size */ 2069 2070 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r) 2071 { 2072 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0; 2073 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0; 2074 2075 return size_l < size_r ? -1 : 2076 size_l == size_r ? 0 : 1; 2077 } 2078 2079 static int64_t 2080 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right) 2081 { 2082 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym); 2083 } 2084 2085 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf, 2086 size_t bf_size, unsigned int width) 2087 { 2088 if (sym) 2089 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym)); 2090 2091 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 2092 } 2093 2094 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf, 2095 size_t size, unsigned int width) 2096 { 2097 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width); 2098 } 2099 2100 struct sort_entry sort_sym_size = { 2101 .se_header = "Symbol size", 2102 .se_cmp = sort__sym_size_cmp, 2103 .se_snprintf = hist_entry__sym_size_snprintf, 2104 .se_width_idx = HISTC_SYM_SIZE, 2105 }; 2106 2107 /* --sort dso_size */ 2108 2109 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r) 2110 { 2111 int64_t size_l = map_l != NULL ? map__size(map_l) : 0; 2112 int64_t size_r = map_r != NULL ? map__size(map_r) : 0; 2113 2114 return size_l < size_r ? -1 : 2115 size_l == size_r ? 0 : 1; 2116 } 2117 2118 static int64_t 2119 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right) 2120 { 2121 return _sort__dso_size_cmp(right->ms.map, left->ms.map); 2122 } 2123 2124 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf, 2125 size_t bf_size, unsigned int width) 2126 { 2127 if (map && map__dso(map)) 2128 return repsep_snprintf(bf, bf_size, "%*d", width, map__size(map)); 2129 2130 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 2131 } 2132 2133 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf, 2134 size_t size, unsigned int width) 2135 { 2136 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width); 2137 } 2138 2139 struct sort_entry sort_dso_size = { 2140 .se_header = "DSO size", 2141 .se_cmp = sort__dso_size_cmp, 2142 .se_snprintf = hist_entry__dso_size_snprintf, 2143 .se_width_idx = HISTC_DSO_SIZE, 2144 }; 2145 2146 /* --sort addr */ 2147 2148 static int64_t 2149 sort__addr_cmp(struct hist_entry *left, struct hist_entry *right) 2150 { 2151 u64 left_ip = left->ip; 2152 u64 right_ip = right->ip; 2153 struct map *left_map = left->ms.map; 2154 struct map *right_map = right->ms.map; 2155 2156 if (left_map) 2157 left_ip = map__unmap_ip(left_map, left_ip); 2158 if (right_map) 2159 right_ip = map__unmap_ip(right_map, right_ip); 2160 2161 return _sort__addr_cmp(left_ip, right_ip); 2162 } 2163 2164 static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf, 2165 size_t size, unsigned int width) 2166 { 2167 u64 ip = he->ip; 2168 struct map *map = he->ms.map; 2169 2170 if (map) 2171 ip = map__unmap_ip(map, ip); 2172 2173 return repsep_snprintf(bf, size, "%-#*llx", width, ip); 2174 } 2175 2176 struct sort_entry sort_addr = { 2177 .se_header = "Address", 2178 .se_cmp = sort__addr_cmp, 2179 .se_snprintf = hist_entry__addr_snprintf, 2180 .se_width_idx = HISTC_ADDR, 2181 }; 2182 2183 /* --sort type */ 2184 2185 struct annotated_data_type unknown_type = { 2186 .self = { 2187 .type_name = (char *)"(unknown)", 2188 .children = LIST_HEAD_INIT(unknown_type.self.children), 2189 }, 2190 }; 2191 2192 static int64_t 2193 sort__type_cmp(struct hist_entry *left, struct hist_entry *right) 2194 { 2195 return sort__addr_cmp(left, right); 2196 } 2197 2198 static void sort__type_init(struct hist_entry *he) 2199 { 2200 if (he->mem_type) 2201 return; 2202 2203 he->mem_type = hist_entry__get_data_type(he); 2204 if (he->mem_type == NULL) { 2205 he->mem_type = &unknown_type; 2206 he->mem_type_off = 0; 2207 } 2208 } 2209 2210 static int64_t 2211 sort__type_collapse(struct hist_entry *left, struct hist_entry *right) 2212 { 2213 struct annotated_data_type *left_type = left->mem_type; 2214 struct annotated_data_type *right_type = right->mem_type; 2215 2216 if (!left_type) { 2217 sort__type_init(left); 2218 left_type = left->mem_type; 2219 } 2220 2221 if (!right_type) { 2222 sort__type_init(right); 2223 right_type = right->mem_type; 2224 } 2225 2226 return strcmp(left_type->self.type_name, right_type->self.type_name); 2227 } 2228 2229 static int64_t 2230 sort__type_sort(struct hist_entry *left, struct hist_entry *right) 2231 { 2232 return sort__type_collapse(left, right); 2233 } 2234 2235 static int hist_entry__type_snprintf(struct hist_entry *he, char *bf, 2236 size_t size, unsigned int width) 2237 { 2238 return repsep_snprintf(bf, size, "%-*s", width, he->mem_type->self.type_name); 2239 } 2240 2241 struct sort_entry sort_type = { 2242 .se_header = "Data Type", 2243 .se_cmp = sort__type_cmp, 2244 .se_collapse = sort__type_collapse, 2245 .se_sort = sort__type_sort, 2246 .se_init = sort__type_init, 2247 .se_snprintf = hist_entry__type_snprintf, 2248 .se_width_idx = HISTC_TYPE, 2249 }; 2250 2251 /* --sort typeoff */ 2252 2253 static int64_t 2254 sort__typeoff_sort(struct hist_entry *left, struct hist_entry *right) 2255 { 2256 struct annotated_data_type *left_type = left->mem_type; 2257 struct annotated_data_type *right_type = right->mem_type; 2258 int64_t ret; 2259 2260 if (!left_type) { 2261 sort__type_init(left); 2262 left_type = left->mem_type; 2263 } 2264 2265 if (!right_type) { 2266 sort__type_init(right); 2267 right_type = right->mem_type; 2268 } 2269 2270 ret = strcmp(left_type->self.type_name, right_type->self.type_name); 2271 if (ret) 2272 return ret; 2273 return left->mem_type_off - right->mem_type_off; 2274 } 2275 2276 static void fill_member_name(char *buf, size_t sz, struct annotated_member *m, 2277 int offset, bool first) 2278 { 2279 struct annotated_member *child; 2280 2281 if (list_empty(&m->children)) 2282 return; 2283 2284 list_for_each_entry(child, &m->children, node) { 2285 if (child->offset <= offset && offset < child->offset + child->size) { 2286 int len = 0; 2287 2288 /* It can have anonymous struct/union members */ 2289 if (child->var_name) { 2290 len = scnprintf(buf, sz, "%s%s", 2291 first ? "" : ".", child->var_name); 2292 first = false; 2293 } 2294 2295 fill_member_name(buf + len, sz - len, child, offset, first); 2296 return; 2297 } 2298 } 2299 } 2300 2301 static int hist_entry__typeoff_snprintf(struct hist_entry *he, char *bf, 2302 size_t size, unsigned int width __maybe_unused) 2303 { 2304 struct annotated_data_type *he_type = he->mem_type; 2305 char buf[4096]; 2306 2307 buf[0] = '\0'; 2308 if (list_empty(&he_type->self.children)) 2309 snprintf(buf, sizeof(buf), "no field"); 2310 else 2311 fill_member_name(buf, sizeof(buf), &he_type->self, 2312 he->mem_type_off, true); 2313 buf[4095] = '\0'; 2314 2315 return repsep_snprintf(bf, size, "%s +%#x (%s)", he_type->self.type_name, 2316 he->mem_type_off, buf); 2317 } 2318 2319 struct sort_entry sort_type_offset = { 2320 .se_header = "Data Type Offset", 2321 .se_cmp = sort__type_cmp, 2322 .se_collapse = sort__typeoff_sort, 2323 .se_sort = sort__typeoff_sort, 2324 .se_init = sort__type_init, 2325 .se_snprintf = hist_entry__typeoff_snprintf, 2326 .se_width_idx = HISTC_TYPE_OFFSET, 2327 }; 2328 2329 /* --sort typecln */ 2330 2331 /* TODO: use actual value in the system */ 2332 #define TYPE_CACHELINE_SIZE 64 2333 2334 static int64_t 2335 sort__typecln_sort(struct hist_entry *left, struct hist_entry *right) 2336 { 2337 struct annotated_data_type *left_type = left->mem_type; 2338 struct annotated_data_type *right_type = right->mem_type; 2339 int64_t left_cln, right_cln; 2340 int64_t ret; 2341 2342 if (!left_type) { 2343 sort__type_init(left); 2344 left_type = left->mem_type; 2345 } 2346 2347 if (!right_type) { 2348 sort__type_init(right); 2349 right_type = right->mem_type; 2350 } 2351 2352 ret = strcmp(left_type->self.type_name, right_type->self.type_name); 2353 if (ret) 2354 return ret; 2355 2356 left_cln = left->mem_type_off / TYPE_CACHELINE_SIZE; 2357 right_cln = right->mem_type_off / TYPE_CACHELINE_SIZE; 2358 return left_cln - right_cln; 2359 } 2360 2361 static int hist_entry__typecln_snprintf(struct hist_entry *he, char *bf, 2362 size_t size, unsigned int width __maybe_unused) 2363 { 2364 struct annotated_data_type *he_type = he->mem_type; 2365 2366 return repsep_snprintf(bf, size, "%s: cache-line %d", he_type->self.type_name, 2367 he->mem_type_off / TYPE_CACHELINE_SIZE); 2368 } 2369 2370 struct sort_entry sort_type_cacheline = { 2371 .se_header = "Data Type Cacheline", 2372 .se_cmp = sort__type_cmp, 2373 .se_collapse = sort__typecln_sort, 2374 .se_sort = sort__typecln_sort, 2375 .se_init = sort__type_init, 2376 .se_snprintf = hist_entry__typecln_snprintf, 2377 .se_width_idx = HISTC_TYPE_CACHELINE, 2378 }; 2379 2380 2381 struct sort_dimension { 2382 const char *name; 2383 struct sort_entry *entry; 2384 int taken; 2385 }; 2386 2387 int __weak arch_support_sort_key(const char *sort_key __maybe_unused) 2388 { 2389 return 0; 2390 } 2391 2392 const char * __weak arch_perf_header_entry(const char *se_header) 2393 { 2394 return se_header; 2395 } 2396 2397 static void sort_dimension_add_dynamic_header(struct sort_dimension *sd) 2398 { 2399 sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header); 2400 } 2401 2402 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 2403 2404 static struct sort_dimension common_sort_dimensions[] = { 2405 DIM(SORT_PID, "pid", sort_thread), 2406 DIM(SORT_COMM, "comm", sort_comm), 2407 DIM(SORT_DSO, "dso", sort_dso), 2408 DIM(SORT_SYM, "symbol", sort_sym), 2409 DIM(SORT_PARENT, "parent", sort_parent), 2410 DIM(SORT_CPU, "cpu", sort_cpu), 2411 DIM(SORT_SOCKET, "socket", sort_socket), 2412 DIM(SORT_SRCLINE, "srcline", sort_srcline), 2413 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 2414 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 2415 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 2416 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 2417 #ifdef HAVE_LIBTRACEEVENT 2418 DIM(SORT_TRACE, "trace", sort_trace), 2419 #endif 2420 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 2421 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size), 2422 DIM(SORT_CGROUP, "cgroup", sort_cgroup), 2423 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 2424 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null), 2425 DIM(SORT_TIME, "time", sort_time), 2426 DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size), 2427 DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat), 2428 DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat), 2429 DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc), 2430 DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc), 2431 DIM(SORT_ADDR, "addr", sort_addr), 2432 DIM(SORT_LOCAL_RETIRE_LAT, "local_retire_lat", sort_local_p_stage_cyc), 2433 DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc), 2434 DIM(SORT_SIMD, "simd", sort_simd), 2435 DIM(SORT_ANNOTATE_DATA_TYPE, "type", sort_type), 2436 DIM(SORT_ANNOTATE_DATA_TYPE_OFFSET, "typeoff", sort_type_offset), 2437 DIM(SORT_SYM_OFFSET, "symoff", sort_sym_offset), 2438 DIM(SORT_ANNOTATE_DATA_TYPE_CACHELINE, "typecln", sort_type_cacheline), 2439 }; 2440 2441 #undef DIM 2442 2443 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 2444 2445 static struct sort_dimension bstack_sort_dimensions[] = { 2446 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 2447 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 2448 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 2449 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 2450 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 2451 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 2452 DIM(SORT_ABORT, "abort", sort_abort), 2453 DIM(SORT_CYCLES, "cycles", sort_cycles), 2454 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 2455 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 2456 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc), 2457 DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from), 2458 DIM(SORT_ADDR_TO, "addr_to", sort_addr_to), 2459 }; 2460 2461 #undef DIM 2462 2463 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 2464 2465 static struct sort_dimension memory_sort_dimensions[] = { 2466 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 2467 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 2468 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 2469 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 2470 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 2471 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 2472 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 2473 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 2474 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr), 2475 DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size), 2476 DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked), 2477 }; 2478 2479 #undef DIM 2480 2481 struct hpp_dimension { 2482 const char *name; 2483 struct perf_hpp_fmt *fmt; 2484 int taken; 2485 }; 2486 2487 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 2488 2489 static struct hpp_dimension hpp_sort_dimensions[] = { 2490 DIM(PERF_HPP__OVERHEAD, "overhead"), 2491 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 2492 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 2493 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 2494 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 2495 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 2496 DIM(PERF_HPP__SAMPLES, "sample"), 2497 DIM(PERF_HPP__PERIOD, "period"), 2498 DIM(PERF_HPP__WEIGHT1, "weight1"), 2499 DIM(PERF_HPP__WEIGHT2, "weight2"), 2500 DIM(PERF_HPP__WEIGHT3, "weight3"), 2501 /* aliases for weight_struct */ 2502 DIM(PERF_HPP__WEIGHT2, "ins_lat"), 2503 DIM(PERF_HPP__WEIGHT3, "retire_lat"), 2504 DIM(PERF_HPP__WEIGHT3, "p_stage_cyc"), 2505 }; 2506 2507 #undef DIM 2508 2509 struct hpp_sort_entry { 2510 struct perf_hpp_fmt hpp; 2511 struct sort_entry *se; 2512 }; 2513 2514 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 2515 { 2516 struct hpp_sort_entry *hse; 2517 2518 if (!perf_hpp__is_sort_entry(fmt)) 2519 return; 2520 2521 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2522 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 2523 } 2524 2525 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2526 struct hists *hists, int line __maybe_unused, 2527 int *span __maybe_unused) 2528 { 2529 struct hpp_sort_entry *hse; 2530 size_t len = fmt->user_len; 2531 2532 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2533 2534 if (!len) 2535 len = hists__col_len(hists, hse->se->se_width_idx); 2536 2537 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 2538 } 2539 2540 static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 2541 struct perf_hpp *hpp __maybe_unused, 2542 struct hists *hists) 2543 { 2544 struct hpp_sort_entry *hse; 2545 size_t len = fmt->user_len; 2546 2547 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2548 2549 if (!len) 2550 len = hists__col_len(hists, hse->se->se_width_idx); 2551 2552 return len; 2553 } 2554 2555 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2556 struct hist_entry *he) 2557 { 2558 struct hpp_sort_entry *hse; 2559 size_t len = fmt->user_len; 2560 2561 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2562 2563 if (!len) 2564 len = hists__col_len(he->hists, hse->se->se_width_idx); 2565 2566 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 2567 } 2568 2569 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 2570 struct hist_entry *a, struct hist_entry *b) 2571 { 2572 struct hpp_sort_entry *hse; 2573 2574 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2575 return hse->se->se_cmp(a, b); 2576 } 2577 2578 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 2579 struct hist_entry *a, struct hist_entry *b) 2580 { 2581 struct hpp_sort_entry *hse; 2582 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 2583 2584 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2585 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 2586 return collapse_fn(a, b); 2587 } 2588 2589 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 2590 struct hist_entry *a, struct hist_entry *b) 2591 { 2592 struct hpp_sort_entry *hse; 2593 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 2594 2595 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2596 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 2597 return sort_fn(a, b); 2598 } 2599 2600 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 2601 { 2602 return format->header == __sort__hpp_header; 2603 } 2604 2605 #define MK_SORT_ENTRY_CHK(key) \ 2606 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 2607 { \ 2608 struct hpp_sort_entry *hse; \ 2609 \ 2610 if (!perf_hpp__is_sort_entry(fmt)) \ 2611 return false; \ 2612 \ 2613 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 2614 return hse->se == &sort_ ## key ; \ 2615 } 2616 2617 #ifdef HAVE_LIBTRACEEVENT 2618 MK_SORT_ENTRY_CHK(trace) 2619 #else 2620 bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt __maybe_unused) 2621 { 2622 return false; 2623 } 2624 #endif 2625 MK_SORT_ENTRY_CHK(srcline) 2626 MK_SORT_ENTRY_CHK(srcfile) 2627 MK_SORT_ENTRY_CHK(thread) 2628 MK_SORT_ENTRY_CHK(comm) 2629 MK_SORT_ENTRY_CHK(dso) 2630 MK_SORT_ENTRY_CHK(sym) 2631 2632 2633 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2634 { 2635 struct hpp_sort_entry *hse_a; 2636 struct hpp_sort_entry *hse_b; 2637 2638 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 2639 return false; 2640 2641 hse_a = container_of(a, struct hpp_sort_entry, hpp); 2642 hse_b = container_of(b, struct hpp_sort_entry, hpp); 2643 2644 return hse_a->se == hse_b->se; 2645 } 2646 2647 static void hse_free(struct perf_hpp_fmt *fmt) 2648 { 2649 struct hpp_sort_entry *hse; 2650 2651 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2652 free(hse); 2653 } 2654 2655 static void hse_init(struct perf_hpp_fmt *fmt, struct hist_entry *he) 2656 { 2657 struct hpp_sort_entry *hse; 2658 2659 if (!perf_hpp__is_sort_entry(fmt)) 2660 return; 2661 2662 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2663 2664 if (hse->se->se_init) 2665 hse->se->se_init(he); 2666 } 2667 2668 static struct hpp_sort_entry * 2669 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 2670 { 2671 struct hpp_sort_entry *hse; 2672 2673 hse = malloc(sizeof(*hse)); 2674 if (hse == NULL) { 2675 pr_err("Memory allocation failed\n"); 2676 return NULL; 2677 } 2678 2679 hse->se = sd->entry; 2680 hse->hpp.name = sd->entry->se_header; 2681 hse->hpp.header = __sort__hpp_header; 2682 hse->hpp.width = __sort__hpp_width; 2683 hse->hpp.entry = __sort__hpp_entry; 2684 hse->hpp.color = NULL; 2685 2686 hse->hpp.cmp = __sort__hpp_cmp; 2687 hse->hpp.collapse = __sort__hpp_collapse; 2688 hse->hpp.sort = __sort__hpp_sort; 2689 hse->hpp.equal = __sort__hpp_equal; 2690 hse->hpp.free = hse_free; 2691 hse->hpp.init = hse_init; 2692 2693 INIT_LIST_HEAD(&hse->hpp.list); 2694 INIT_LIST_HEAD(&hse->hpp.sort_list); 2695 hse->hpp.elide = false; 2696 hse->hpp.len = 0; 2697 hse->hpp.user_len = 0; 2698 hse->hpp.level = level; 2699 2700 return hse; 2701 } 2702 2703 static void hpp_free(struct perf_hpp_fmt *fmt) 2704 { 2705 free(fmt); 2706 } 2707 2708 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 2709 int level) 2710 { 2711 struct perf_hpp_fmt *fmt; 2712 2713 fmt = memdup(hd->fmt, sizeof(*fmt)); 2714 if (fmt) { 2715 INIT_LIST_HEAD(&fmt->list); 2716 INIT_LIST_HEAD(&fmt->sort_list); 2717 fmt->free = hpp_free; 2718 fmt->level = level; 2719 } 2720 2721 return fmt; 2722 } 2723 2724 int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 2725 { 2726 struct perf_hpp_fmt *fmt; 2727 struct hpp_sort_entry *hse; 2728 int ret = -1; 2729 int r; 2730 2731 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 2732 if (!perf_hpp__is_sort_entry(fmt)) 2733 continue; 2734 2735 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2736 if (hse->se->se_filter == NULL) 2737 continue; 2738 2739 /* 2740 * hist entry is filtered if any of sort key in the hpp list 2741 * is applied. But it should skip non-matched filter types. 2742 */ 2743 r = hse->se->se_filter(he, type, arg); 2744 if (r >= 0) { 2745 if (ret < 0) 2746 ret = 0; 2747 ret |= r; 2748 } 2749 } 2750 2751 return ret; 2752 } 2753 2754 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 2755 struct perf_hpp_list *list, 2756 int level) 2757 { 2758 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 2759 2760 if (hse == NULL) 2761 return -1; 2762 2763 perf_hpp_list__register_sort_field(list, &hse->hpp); 2764 return 0; 2765 } 2766 2767 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 2768 struct perf_hpp_list *list) 2769 { 2770 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 2771 2772 if (hse == NULL) 2773 return -1; 2774 2775 perf_hpp_list__column_register(list, &hse->hpp); 2776 return 0; 2777 } 2778 2779 #ifndef HAVE_LIBTRACEEVENT 2780 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused) 2781 { 2782 return false; 2783 } 2784 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused, 2785 struct hists *hists __maybe_unused) 2786 { 2787 return false; 2788 } 2789 #else 2790 struct hpp_dynamic_entry { 2791 struct perf_hpp_fmt hpp; 2792 struct evsel *evsel; 2793 struct tep_format_field *field; 2794 unsigned dynamic_len; 2795 bool raw_trace; 2796 }; 2797 2798 static int hde_width(struct hpp_dynamic_entry *hde) 2799 { 2800 if (!hde->hpp.len) { 2801 int len = hde->dynamic_len; 2802 int namelen = strlen(hde->field->name); 2803 int fieldlen = hde->field->size; 2804 2805 if (namelen > len) 2806 len = namelen; 2807 2808 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) { 2809 /* length for print hex numbers */ 2810 fieldlen = hde->field->size * 2 + 2; 2811 } 2812 if (fieldlen > len) 2813 len = fieldlen; 2814 2815 hde->hpp.len = len; 2816 } 2817 return hde->hpp.len; 2818 } 2819 2820 static void update_dynamic_len(struct hpp_dynamic_entry *hde, 2821 struct hist_entry *he) 2822 { 2823 char *str, *pos; 2824 struct tep_format_field *field = hde->field; 2825 size_t namelen; 2826 bool last = false; 2827 2828 if (hde->raw_trace) 2829 return; 2830 2831 /* parse pretty print result and update max length */ 2832 if (!he->trace_output) 2833 he->trace_output = get_trace_output(he); 2834 2835 namelen = strlen(field->name); 2836 str = he->trace_output; 2837 2838 while (str) { 2839 pos = strchr(str, ' '); 2840 if (pos == NULL) { 2841 last = true; 2842 pos = str + strlen(str); 2843 } 2844 2845 if (!strncmp(str, field->name, namelen)) { 2846 size_t len; 2847 2848 str += namelen + 1; 2849 len = pos - str; 2850 2851 if (len > hde->dynamic_len) 2852 hde->dynamic_len = len; 2853 break; 2854 } 2855 2856 if (last) 2857 str = NULL; 2858 else 2859 str = pos + 1; 2860 } 2861 } 2862 2863 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2864 struct hists *hists __maybe_unused, 2865 int line __maybe_unused, 2866 int *span __maybe_unused) 2867 { 2868 struct hpp_dynamic_entry *hde; 2869 size_t len = fmt->user_len; 2870 2871 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2872 2873 if (!len) 2874 len = hde_width(hde); 2875 2876 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 2877 } 2878 2879 static int __sort__hde_width(struct perf_hpp_fmt *fmt, 2880 struct perf_hpp *hpp __maybe_unused, 2881 struct hists *hists __maybe_unused) 2882 { 2883 struct hpp_dynamic_entry *hde; 2884 size_t len = fmt->user_len; 2885 2886 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2887 2888 if (!len) 2889 len = hde_width(hde); 2890 2891 return len; 2892 } 2893 2894 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 2895 { 2896 struct hpp_dynamic_entry *hde; 2897 2898 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2899 2900 return hists_to_evsel(hists) == hde->evsel; 2901 } 2902 2903 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2904 struct hist_entry *he) 2905 { 2906 struct hpp_dynamic_entry *hde; 2907 size_t len = fmt->user_len; 2908 char *str, *pos; 2909 struct tep_format_field *field; 2910 size_t namelen; 2911 bool last = false; 2912 int ret; 2913 2914 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2915 2916 if (!len) 2917 len = hde_width(hde); 2918 2919 if (hde->raw_trace) 2920 goto raw_field; 2921 2922 if (!he->trace_output) 2923 he->trace_output = get_trace_output(he); 2924 2925 field = hde->field; 2926 namelen = strlen(field->name); 2927 str = he->trace_output; 2928 2929 while (str) { 2930 pos = strchr(str, ' '); 2931 if (pos == NULL) { 2932 last = true; 2933 pos = str + strlen(str); 2934 } 2935 2936 if (!strncmp(str, field->name, namelen)) { 2937 str += namelen + 1; 2938 str = strndup(str, pos - str); 2939 2940 if (str == NULL) 2941 return scnprintf(hpp->buf, hpp->size, 2942 "%*.*s", len, len, "ERROR"); 2943 break; 2944 } 2945 2946 if (last) 2947 str = NULL; 2948 else 2949 str = pos + 1; 2950 } 2951 2952 if (str == NULL) { 2953 struct trace_seq seq; 2954 raw_field: 2955 trace_seq_init(&seq); 2956 tep_print_field(&seq, he->raw_data, hde->field); 2957 str = seq.buffer; 2958 } 2959 2960 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 2961 free(str); 2962 return ret; 2963 } 2964 2965 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 2966 struct hist_entry *a, struct hist_entry *b) 2967 { 2968 struct hpp_dynamic_entry *hde; 2969 struct tep_format_field *field; 2970 unsigned offset, size; 2971 2972 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2973 2974 field = hde->field; 2975 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2976 unsigned long long dyn; 2977 2978 tep_read_number_field(field, a->raw_data, &dyn); 2979 offset = dyn & 0xffff; 2980 size = (dyn >> 16) & 0xffff; 2981 if (tep_field_is_relative(field->flags)) 2982 offset += field->offset + field->size; 2983 /* record max width for output */ 2984 if (size > hde->dynamic_len) 2985 hde->dynamic_len = size; 2986 } else { 2987 offset = field->offset; 2988 size = field->size; 2989 } 2990 2991 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 2992 } 2993 2994 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 2995 { 2996 return fmt->cmp == __sort__hde_cmp; 2997 } 2998 2999 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 3000 { 3001 struct hpp_dynamic_entry *hde_a; 3002 struct hpp_dynamic_entry *hde_b; 3003 3004 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 3005 return false; 3006 3007 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 3008 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 3009 3010 return hde_a->field == hde_b->field; 3011 } 3012 3013 static void hde_free(struct perf_hpp_fmt *fmt) 3014 { 3015 struct hpp_dynamic_entry *hde; 3016 3017 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 3018 free(hde); 3019 } 3020 3021 static void __sort__hde_init(struct perf_hpp_fmt *fmt, struct hist_entry *he) 3022 { 3023 struct hpp_dynamic_entry *hde; 3024 3025 if (!perf_hpp__is_dynamic_entry(fmt)) 3026 return; 3027 3028 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 3029 update_dynamic_len(hde, he); 3030 } 3031 3032 static struct hpp_dynamic_entry * 3033 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field, 3034 int level) 3035 { 3036 struct hpp_dynamic_entry *hde; 3037 3038 hde = malloc(sizeof(*hde)); 3039 if (hde == NULL) { 3040 pr_debug("Memory allocation failed\n"); 3041 return NULL; 3042 } 3043 3044 hde->evsel = evsel; 3045 hde->field = field; 3046 hde->dynamic_len = 0; 3047 3048 hde->hpp.name = field->name; 3049 hde->hpp.header = __sort__hde_header; 3050 hde->hpp.width = __sort__hde_width; 3051 hde->hpp.entry = __sort__hde_entry; 3052 hde->hpp.color = NULL; 3053 3054 hde->hpp.init = __sort__hde_init; 3055 hde->hpp.cmp = __sort__hde_cmp; 3056 hde->hpp.collapse = __sort__hde_cmp; 3057 hde->hpp.sort = __sort__hde_cmp; 3058 hde->hpp.equal = __sort__hde_equal; 3059 hde->hpp.free = hde_free; 3060 3061 INIT_LIST_HEAD(&hde->hpp.list); 3062 INIT_LIST_HEAD(&hde->hpp.sort_list); 3063 hde->hpp.elide = false; 3064 hde->hpp.len = 0; 3065 hde->hpp.user_len = 0; 3066 hde->hpp.level = level; 3067 3068 return hde; 3069 } 3070 #endif /* HAVE_LIBTRACEEVENT */ 3071 3072 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 3073 { 3074 struct perf_hpp_fmt *new_fmt = NULL; 3075 3076 if (perf_hpp__is_sort_entry(fmt)) { 3077 struct hpp_sort_entry *hse, *new_hse; 3078 3079 hse = container_of(fmt, struct hpp_sort_entry, hpp); 3080 new_hse = memdup(hse, sizeof(*hse)); 3081 if (new_hse) 3082 new_fmt = &new_hse->hpp; 3083 #ifdef HAVE_LIBTRACEEVENT 3084 } else if (perf_hpp__is_dynamic_entry(fmt)) { 3085 struct hpp_dynamic_entry *hde, *new_hde; 3086 3087 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 3088 new_hde = memdup(hde, sizeof(*hde)); 3089 if (new_hde) 3090 new_fmt = &new_hde->hpp; 3091 #endif 3092 } else { 3093 new_fmt = memdup(fmt, sizeof(*fmt)); 3094 } 3095 3096 INIT_LIST_HEAD(&new_fmt->list); 3097 INIT_LIST_HEAD(&new_fmt->sort_list); 3098 3099 return new_fmt; 3100 } 3101 3102 static int parse_field_name(char *str, char **event, char **field, char **opt) 3103 { 3104 char *event_name, *field_name, *opt_name; 3105 3106 event_name = str; 3107 field_name = strchr(str, '.'); 3108 3109 if (field_name) { 3110 *field_name++ = '\0'; 3111 } else { 3112 event_name = NULL; 3113 field_name = str; 3114 } 3115 3116 opt_name = strchr(field_name, '/'); 3117 if (opt_name) 3118 *opt_name++ = '\0'; 3119 3120 *event = event_name; 3121 *field = field_name; 3122 *opt = opt_name; 3123 3124 return 0; 3125 } 3126 3127 /* find match evsel using a given event name. The event name can be: 3128 * 1. '%' + event index (e.g. '%1' for first event) 3129 * 2. full event name (e.g. sched:sched_switch) 3130 * 3. partial event name (should not contain ':') 3131 */ 3132 static struct evsel *find_evsel(struct evlist *evlist, char *event_name) 3133 { 3134 struct evsel *evsel = NULL; 3135 struct evsel *pos; 3136 bool full_name; 3137 3138 /* case 1 */ 3139 if (event_name[0] == '%') { 3140 int nr = strtol(event_name+1, NULL, 0); 3141 3142 if (nr > evlist->core.nr_entries) 3143 return NULL; 3144 3145 evsel = evlist__first(evlist); 3146 while (--nr > 0) 3147 evsel = evsel__next(evsel); 3148 3149 return evsel; 3150 } 3151 3152 full_name = !!strchr(event_name, ':'); 3153 evlist__for_each_entry(evlist, pos) { 3154 /* case 2 */ 3155 if (full_name && evsel__name_is(pos, event_name)) 3156 return pos; 3157 /* case 3 */ 3158 if (!full_name && strstr(pos->name, event_name)) { 3159 if (evsel) { 3160 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 3161 event_name, evsel->name, pos->name); 3162 return NULL; 3163 } 3164 evsel = pos; 3165 } 3166 } 3167 3168 return evsel; 3169 } 3170 3171 #ifdef HAVE_LIBTRACEEVENT 3172 static int __dynamic_dimension__add(struct evsel *evsel, 3173 struct tep_format_field *field, 3174 bool raw_trace, int level) 3175 { 3176 struct hpp_dynamic_entry *hde; 3177 3178 hde = __alloc_dynamic_entry(evsel, field, level); 3179 if (hde == NULL) 3180 return -ENOMEM; 3181 3182 hde->raw_trace = raw_trace; 3183 3184 perf_hpp__register_sort_field(&hde->hpp); 3185 return 0; 3186 } 3187 3188 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level) 3189 { 3190 int ret; 3191 struct tep_format_field *field; 3192 3193 field = evsel->tp_format->format.fields; 3194 while (field) { 3195 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 3196 if (ret < 0) 3197 return ret; 3198 3199 field = field->next; 3200 } 3201 return 0; 3202 } 3203 3204 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace, 3205 int level) 3206 { 3207 int ret; 3208 struct evsel *evsel; 3209 3210 evlist__for_each_entry(evlist, evsel) { 3211 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 3212 continue; 3213 3214 ret = add_evsel_fields(evsel, raw_trace, level); 3215 if (ret < 0) 3216 return ret; 3217 } 3218 return 0; 3219 } 3220 3221 static int add_all_matching_fields(struct evlist *evlist, 3222 char *field_name, bool raw_trace, int level) 3223 { 3224 int ret = -ESRCH; 3225 struct evsel *evsel; 3226 struct tep_format_field *field; 3227 3228 evlist__for_each_entry(evlist, evsel) { 3229 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 3230 continue; 3231 3232 field = tep_find_any_field(evsel->tp_format, field_name); 3233 if (field == NULL) 3234 continue; 3235 3236 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 3237 if (ret < 0) 3238 break; 3239 } 3240 return ret; 3241 } 3242 #endif /* HAVE_LIBTRACEEVENT */ 3243 3244 static int add_dynamic_entry(struct evlist *evlist, const char *tok, 3245 int level) 3246 { 3247 char *str, *event_name, *field_name, *opt_name; 3248 struct evsel *evsel; 3249 bool raw_trace = symbol_conf.raw_trace; 3250 int ret = 0; 3251 3252 if (evlist == NULL) 3253 return -ENOENT; 3254 3255 str = strdup(tok); 3256 if (str == NULL) 3257 return -ENOMEM; 3258 3259 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 3260 ret = -EINVAL; 3261 goto out; 3262 } 3263 3264 if (opt_name) { 3265 if (strcmp(opt_name, "raw")) { 3266 pr_debug("unsupported field option %s\n", opt_name); 3267 ret = -EINVAL; 3268 goto out; 3269 } 3270 raw_trace = true; 3271 } 3272 3273 #ifdef HAVE_LIBTRACEEVENT 3274 if (!strcmp(field_name, "trace_fields")) { 3275 ret = add_all_dynamic_fields(evlist, raw_trace, level); 3276 goto out; 3277 } 3278 3279 if (event_name == NULL) { 3280 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 3281 goto out; 3282 } 3283 #else 3284 evlist__for_each_entry(evlist, evsel) { 3285 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) { 3286 pr_err("%s %s", ret ? "," : "This perf binary isn't linked with libtraceevent, can't process", evsel__name(evsel)); 3287 ret = -ENOTSUP; 3288 } 3289 } 3290 3291 if (ret) { 3292 pr_err("\n"); 3293 goto out; 3294 } 3295 #endif 3296 3297 evsel = find_evsel(evlist, event_name); 3298 if (evsel == NULL) { 3299 pr_debug("Cannot find event: %s\n", event_name); 3300 ret = -ENOENT; 3301 goto out; 3302 } 3303 3304 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 3305 pr_debug("%s is not a tracepoint event\n", event_name); 3306 ret = -EINVAL; 3307 goto out; 3308 } 3309 3310 #ifdef HAVE_LIBTRACEEVENT 3311 if (!strcmp(field_name, "*")) { 3312 ret = add_evsel_fields(evsel, raw_trace, level); 3313 } else { 3314 struct tep_format_field *field = tep_find_any_field(evsel->tp_format, field_name); 3315 3316 if (field == NULL) { 3317 pr_debug("Cannot find event field for %s.%s\n", 3318 event_name, field_name); 3319 return -ENOENT; 3320 } 3321 3322 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 3323 } 3324 #else 3325 (void)level; 3326 (void)raw_trace; 3327 #endif /* HAVE_LIBTRACEEVENT */ 3328 3329 out: 3330 free(str); 3331 return ret; 3332 } 3333 3334 static int __sort_dimension__add(struct sort_dimension *sd, 3335 struct perf_hpp_list *list, 3336 int level) 3337 { 3338 if (sd->taken) 3339 return 0; 3340 3341 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 3342 return -1; 3343 3344 if (sd->entry->se_collapse) 3345 list->need_collapse = 1; 3346 3347 sd->taken = 1; 3348 3349 return 0; 3350 } 3351 3352 static int __hpp_dimension__add(struct hpp_dimension *hd, 3353 struct perf_hpp_list *list, 3354 int level) 3355 { 3356 struct perf_hpp_fmt *fmt; 3357 3358 if (hd->taken) 3359 return 0; 3360 3361 fmt = __hpp_dimension__alloc_hpp(hd, level); 3362 if (!fmt) 3363 return -1; 3364 3365 hd->taken = 1; 3366 perf_hpp_list__register_sort_field(list, fmt); 3367 return 0; 3368 } 3369 3370 static int __sort_dimension__add_output(struct perf_hpp_list *list, 3371 struct sort_dimension *sd) 3372 { 3373 if (sd->taken) 3374 return 0; 3375 3376 if (__sort_dimension__add_hpp_output(sd, list) < 0) 3377 return -1; 3378 3379 sd->taken = 1; 3380 return 0; 3381 } 3382 3383 static int __hpp_dimension__add_output(struct perf_hpp_list *list, 3384 struct hpp_dimension *hd) 3385 { 3386 struct perf_hpp_fmt *fmt; 3387 3388 if (hd->taken) 3389 return 0; 3390 3391 fmt = __hpp_dimension__alloc_hpp(hd, 0); 3392 if (!fmt) 3393 return -1; 3394 3395 hd->taken = 1; 3396 perf_hpp_list__column_register(list, fmt); 3397 return 0; 3398 } 3399 3400 int hpp_dimension__add_output(unsigned col) 3401 { 3402 BUG_ON(col >= PERF_HPP__MAX_INDEX); 3403 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 3404 } 3405 3406 int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 3407 struct evlist *evlist, 3408 int level) 3409 { 3410 unsigned int i, j; 3411 3412 /* 3413 * Check to see if there are any arch specific 3414 * sort dimensions not applicable for the current 3415 * architecture. If so, Skip that sort key since 3416 * we don't want to display it in the output fields. 3417 */ 3418 for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) { 3419 if (!strcmp(arch_specific_sort_keys[j], tok) && 3420 !arch_support_sort_key(tok)) { 3421 return 0; 3422 } 3423 } 3424 3425 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 3426 struct sort_dimension *sd = &common_sort_dimensions[i]; 3427 3428 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3429 continue; 3430 3431 for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) { 3432 if (sd->name && !strcmp(dynamic_headers[j], sd->name)) 3433 sort_dimension_add_dynamic_header(sd); 3434 } 3435 3436 if (sd->entry == &sort_parent && parent_pattern) { 3437 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 3438 if (ret) { 3439 char err[BUFSIZ]; 3440 3441 regerror(ret, &parent_regex, err, sizeof(err)); 3442 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 3443 return -EINVAL; 3444 } 3445 list->parent = 1; 3446 } else if (sd->entry == &sort_sym) { 3447 list->sym = 1; 3448 /* 3449 * perf diff displays the performance difference amongst 3450 * two or more perf.data files. Those files could come 3451 * from different binaries. So we should not compare 3452 * their ips, but the name of symbol. 3453 */ 3454 if (sort__mode == SORT_MODE__DIFF) 3455 sd->entry->se_collapse = sort__sym_sort; 3456 3457 } else if (sd->entry == &sort_dso) { 3458 list->dso = 1; 3459 } else if (sd->entry == &sort_socket) { 3460 list->socket = 1; 3461 } else if (sd->entry == &sort_thread) { 3462 list->thread = 1; 3463 } else if (sd->entry == &sort_comm) { 3464 list->comm = 1; 3465 } else if (sd->entry == &sort_type_offset) { 3466 symbol_conf.annotate_data_member = true; 3467 } 3468 3469 return __sort_dimension__add(sd, list, level); 3470 } 3471 3472 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 3473 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 3474 3475 if (strncasecmp(tok, hd->name, strlen(tok))) 3476 continue; 3477 3478 return __hpp_dimension__add(hd, list, level); 3479 } 3480 3481 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 3482 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 3483 3484 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3485 continue; 3486 3487 if (sort__mode != SORT_MODE__BRANCH) 3488 return -EINVAL; 3489 3490 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 3491 list->sym = 1; 3492 3493 __sort_dimension__add(sd, list, level); 3494 return 0; 3495 } 3496 3497 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 3498 struct sort_dimension *sd = &memory_sort_dimensions[i]; 3499 3500 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3501 continue; 3502 3503 if (sort__mode != SORT_MODE__MEMORY) 3504 return -EINVAL; 3505 3506 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0) 3507 return -EINVAL; 3508 3509 if (sd->entry == &sort_mem_daddr_sym) 3510 list->sym = 1; 3511 3512 __sort_dimension__add(sd, list, level); 3513 return 0; 3514 } 3515 3516 if (!add_dynamic_entry(evlist, tok, level)) 3517 return 0; 3518 3519 return -ESRCH; 3520 } 3521 3522 static int setup_sort_list(struct perf_hpp_list *list, char *str, 3523 struct evlist *evlist) 3524 { 3525 char *tmp, *tok; 3526 int ret = 0; 3527 int level = 0; 3528 int next_level = 1; 3529 bool in_group = false; 3530 3531 do { 3532 tok = str; 3533 tmp = strpbrk(str, "{}, "); 3534 if (tmp) { 3535 if (in_group) 3536 next_level = level; 3537 else 3538 next_level = level + 1; 3539 3540 if (*tmp == '{') 3541 in_group = true; 3542 else if (*tmp == '}') 3543 in_group = false; 3544 3545 *tmp = '\0'; 3546 str = tmp + 1; 3547 } 3548 3549 if (*tok) { 3550 ret = sort_dimension__add(list, tok, evlist, level); 3551 if (ret == -EINVAL) { 3552 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok))) 3553 ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 3554 else 3555 ui__error("Invalid --sort key: `%s'", tok); 3556 break; 3557 } else if (ret == -ESRCH) { 3558 ui__error("Unknown --sort key: `%s'", tok); 3559 break; 3560 } 3561 } 3562 3563 level = next_level; 3564 } while (tmp); 3565 3566 return ret; 3567 } 3568 3569 static const char *get_default_sort_order(struct evlist *evlist) 3570 { 3571 const char *default_sort_orders[] = { 3572 default_sort_order, 3573 default_branch_sort_order, 3574 default_mem_sort_order, 3575 default_top_sort_order, 3576 default_diff_sort_order, 3577 default_tracepoint_sort_order, 3578 }; 3579 bool use_trace = true; 3580 struct evsel *evsel; 3581 3582 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 3583 3584 if (evlist == NULL || evlist__empty(evlist)) 3585 goto out_no_evlist; 3586 3587 evlist__for_each_entry(evlist, evsel) { 3588 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 3589 use_trace = false; 3590 break; 3591 } 3592 } 3593 3594 if (use_trace) { 3595 sort__mode = SORT_MODE__TRACEPOINT; 3596 if (symbol_conf.raw_trace) 3597 return "trace_fields"; 3598 } 3599 out_no_evlist: 3600 return default_sort_orders[sort__mode]; 3601 } 3602 3603 static int setup_sort_order(struct evlist *evlist) 3604 { 3605 char *new_sort_order; 3606 3607 /* 3608 * Append '+'-prefixed sort order to the default sort 3609 * order string. 3610 */ 3611 if (!sort_order || is_strict_order(sort_order)) 3612 return 0; 3613 3614 if (sort_order[1] == '\0') { 3615 ui__error("Invalid --sort key: `+'"); 3616 return -EINVAL; 3617 } 3618 3619 /* 3620 * We allocate new sort_order string, but we never free it, 3621 * because it's checked over the rest of the code. 3622 */ 3623 if (asprintf(&new_sort_order, "%s,%s", 3624 get_default_sort_order(evlist), sort_order + 1) < 0) { 3625 pr_err("Not enough memory to set up --sort"); 3626 return -ENOMEM; 3627 } 3628 3629 sort_order = new_sort_order; 3630 return 0; 3631 } 3632 3633 /* 3634 * Adds 'pre,' prefix into 'str' is 'pre' is 3635 * not already part of 'str'. 3636 */ 3637 static char *prefix_if_not_in(const char *pre, char *str) 3638 { 3639 char *n; 3640 3641 if (!str || strstr(str, pre)) 3642 return str; 3643 3644 if (asprintf(&n, "%s,%s", pre, str) < 0) 3645 n = NULL; 3646 3647 free(str); 3648 return n; 3649 } 3650 3651 static char *setup_overhead(char *keys) 3652 { 3653 if (sort__mode == SORT_MODE__DIFF) 3654 return keys; 3655 3656 keys = prefix_if_not_in("overhead", keys); 3657 3658 if (symbol_conf.cumulate_callchain) 3659 keys = prefix_if_not_in("overhead_children", keys); 3660 3661 return keys; 3662 } 3663 3664 static int __setup_sorting(struct evlist *evlist) 3665 { 3666 char *str; 3667 const char *sort_keys; 3668 int ret = 0; 3669 3670 ret = setup_sort_order(evlist); 3671 if (ret) 3672 return ret; 3673 3674 sort_keys = sort_order; 3675 if (sort_keys == NULL) { 3676 if (is_strict_order(field_order)) { 3677 /* 3678 * If user specified field order but no sort order, 3679 * we'll honor it and not add default sort orders. 3680 */ 3681 return 0; 3682 } 3683 3684 sort_keys = get_default_sort_order(evlist); 3685 } 3686 3687 str = strdup(sort_keys); 3688 if (str == NULL) { 3689 pr_err("Not enough memory to setup sort keys"); 3690 return -ENOMEM; 3691 } 3692 3693 /* 3694 * Prepend overhead fields for backward compatibility. 3695 */ 3696 if (!is_strict_order(field_order)) { 3697 str = setup_overhead(str); 3698 if (str == NULL) { 3699 pr_err("Not enough memory to setup overhead keys"); 3700 return -ENOMEM; 3701 } 3702 } 3703 3704 ret = setup_sort_list(&perf_hpp_list, str, evlist); 3705 3706 free(str); 3707 return ret; 3708 } 3709 3710 void perf_hpp__set_elide(int idx, bool elide) 3711 { 3712 struct perf_hpp_fmt *fmt; 3713 struct hpp_sort_entry *hse; 3714 3715 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3716 if (!perf_hpp__is_sort_entry(fmt)) 3717 continue; 3718 3719 hse = container_of(fmt, struct hpp_sort_entry, hpp); 3720 if (hse->se->se_width_idx == idx) { 3721 fmt->elide = elide; 3722 break; 3723 } 3724 } 3725 } 3726 3727 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 3728 { 3729 if (list && strlist__nr_entries(list) == 1) { 3730 if (fp != NULL) 3731 fprintf(fp, "# %s: %s\n", list_name, 3732 strlist__entry(list, 0)->s); 3733 return true; 3734 } 3735 return false; 3736 } 3737 3738 static bool get_elide(int idx, FILE *output) 3739 { 3740 switch (idx) { 3741 case HISTC_SYMBOL: 3742 return __get_elide(symbol_conf.sym_list, "symbol", output); 3743 case HISTC_DSO: 3744 return __get_elide(symbol_conf.dso_list, "dso", output); 3745 case HISTC_COMM: 3746 return __get_elide(symbol_conf.comm_list, "comm", output); 3747 default: 3748 break; 3749 } 3750 3751 if (sort__mode != SORT_MODE__BRANCH) 3752 return false; 3753 3754 switch (idx) { 3755 case HISTC_SYMBOL_FROM: 3756 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 3757 case HISTC_SYMBOL_TO: 3758 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 3759 case HISTC_DSO_FROM: 3760 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 3761 case HISTC_DSO_TO: 3762 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 3763 case HISTC_ADDR_FROM: 3764 return __get_elide(symbol_conf.sym_from_list, "addr_from", output); 3765 case HISTC_ADDR_TO: 3766 return __get_elide(symbol_conf.sym_to_list, "addr_to", output); 3767 default: 3768 break; 3769 } 3770 3771 return false; 3772 } 3773 3774 void sort__setup_elide(FILE *output) 3775 { 3776 struct perf_hpp_fmt *fmt; 3777 struct hpp_sort_entry *hse; 3778 3779 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3780 if (!perf_hpp__is_sort_entry(fmt)) 3781 continue; 3782 3783 hse = container_of(fmt, struct hpp_sort_entry, hpp); 3784 fmt->elide = get_elide(hse->se->se_width_idx, output); 3785 } 3786 3787 /* 3788 * It makes no sense to elide all of sort entries. 3789 * Just revert them to show up again. 3790 */ 3791 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3792 if (!perf_hpp__is_sort_entry(fmt)) 3793 continue; 3794 3795 if (!fmt->elide) 3796 return; 3797 } 3798 3799 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3800 if (!perf_hpp__is_sort_entry(fmt)) 3801 continue; 3802 3803 fmt->elide = false; 3804 } 3805 } 3806 3807 int output_field_add(struct perf_hpp_list *list, const char *tok) 3808 { 3809 unsigned int i; 3810 3811 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 3812 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 3813 3814 if (strncasecmp(tok, hd->name, strlen(tok))) 3815 continue; 3816 3817 if (!strcasecmp(tok, "weight")) 3818 ui__warning("--fields weight shows the average value unlike in the --sort key.\n"); 3819 3820 return __hpp_dimension__add_output(list, hd); 3821 } 3822 3823 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 3824 struct sort_dimension *sd = &common_sort_dimensions[i]; 3825 3826 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3827 continue; 3828 3829 return __sort_dimension__add_output(list, sd); 3830 } 3831 3832 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 3833 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 3834 3835 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3836 continue; 3837 3838 if (sort__mode != SORT_MODE__BRANCH) 3839 return -EINVAL; 3840 3841 return __sort_dimension__add_output(list, sd); 3842 } 3843 3844 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 3845 struct sort_dimension *sd = &memory_sort_dimensions[i]; 3846 3847 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3848 continue; 3849 3850 if (sort__mode != SORT_MODE__MEMORY) 3851 return -EINVAL; 3852 3853 return __sort_dimension__add_output(list, sd); 3854 } 3855 3856 return -ESRCH; 3857 } 3858 3859 static int setup_output_list(struct perf_hpp_list *list, char *str) 3860 { 3861 char *tmp, *tok; 3862 int ret = 0; 3863 3864 for (tok = strtok_r(str, ", ", &tmp); 3865 tok; tok = strtok_r(NULL, ", ", &tmp)) { 3866 ret = output_field_add(list, tok); 3867 if (ret == -EINVAL) { 3868 ui__error("Invalid --fields key: `%s'", tok); 3869 break; 3870 } else if (ret == -ESRCH) { 3871 ui__error("Unknown --fields key: `%s'", tok); 3872 break; 3873 } 3874 } 3875 3876 return ret; 3877 } 3878 3879 void reset_dimensions(void) 3880 { 3881 unsigned int i; 3882 3883 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 3884 common_sort_dimensions[i].taken = 0; 3885 3886 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 3887 hpp_sort_dimensions[i].taken = 0; 3888 3889 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 3890 bstack_sort_dimensions[i].taken = 0; 3891 3892 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 3893 memory_sort_dimensions[i].taken = 0; 3894 } 3895 3896 bool is_strict_order(const char *order) 3897 { 3898 return order && (*order != '+'); 3899 } 3900 3901 static int __setup_output_field(void) 3902 { 3903 char *str, *strp; 3904 int ret = -EINVAL; 3905 3906 if (field_order == NULL) 3907 return 0; 3908 3909 strp = str = strdup(field_order); 3910 if (str == NULL) { 3911 pr_err("Not enough memory to setup output fields"); 3912 return -ENOMEM; 3913 } 3914 3915 if (!is_strict_order(field_order)) 3916 strp++; 3917 3918 if (!strlen(strp)) { 3919 ui__error("Invalid --fields key: `+'"); 3920 goto out; 3921 } 3922 3923 ret = setup_output_list(&perf_hpp_list, strp); 3924 3925 out: 3926 free(str); 3927 return ret; 3928 } 3929 3930 int setup_sorting(struct evlist *evlist) 3931 { 3932 int err; 3933 3934 err = __setup_sorting(evlist); 3935 if (err < 0) 3936 return err; 3937 3938 if (parent_pattern != default_parent_pattern) { 3939 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 3940 if (err < 0) 3941 return err; 3942 } 3943 3944 reset_dimensions(); 3945 3946 /* 3947 * perf diff doesn't use default hpp output fields. 3948 */ 3949 if (sort__mode != SORT_MODE__DIFF) 3950 perf_hpp__init(); 3951 3952 err = __setup_output_field(); 3953 if (err < 0) 3954 return err; 3955 3956 /* copy sort keys to output fields */ 3957 perf_hpp__setup_output_field(&perf_hpp_list); 3958 /* and then copy output fields to sort keys */ 3959 perf_hpp__append_sort_keys(&perf_hpp_list); 3960 3961 /* setup hists-specific output fields */ 3962 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 3963 return -1; 3964 3965 return 0; 3966 } 3967 3968 void reset_output_field(void) 3969 { 3970 perf_hpp_list.need_collapse = 0; 3971 perf_hpp_list.parent = 0; 3972 perf_hpp_list.sym = 0; 3973 perf_hpp_list.dso = 0; 3974 3975 field_order = NULL; 3976 sort_order = NULL; 3977 3978 reset_dimensions(); 3979 perf_hpp__reset_output_field(&perf_hpp_list); 3980 } 3981 3982 #define INDENT (3*8 + 1) 3983 3984 static void add_key(struct strbuf *sb, const char *str, int *llen) 3985 { 3986 if (!str) 3987 return; 3988 3989 if (*llen >= 75) { 3990 strbuf_addstr(sb, "\n\t\t\t "); 3991 *llen = INDENT; 3992 } 3993 strbuf_addf(sb, " %s", str); 3994 *llen += strlen(str) + 1; 3995 } 3996 3997 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n, 3998 int *llen) 3999 { 4000 int i; 4001 4002 for (i = 0; i < n; i++) 4003 add_key(sb, s[i].name, llen); 4004 } 4005 4006 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n, 4007 int *llen) 4008 { 4009 int i; 4010 4011 for (i = 0; i < n; i++) 4012 add_key(sb, s[i].name, llen); 4013 } 4014 4015 char *sort_help(const char *prefix, enum sort_mode mode) 4016 { 4017 struct strbuf sb; 4018 char *s; 4019 int len = strlen(prefix) + INDENT; 4020 4021 strbuf_init(&sb, 300); 4022 strbuf_addstr(&sb, prefix); 4023 add_hpp_sort_string(&sb, hpp_sort_dimensions, 4024 ARRAY_SIZE(hpp_sort_dimensions), &len); 4025 add_sort_string(&sb, common_sort_dimensions, 4026 ARRAY_SIZE(common_sort_dimensions), &len); 4027 if (mode == SORT_MODE__NORMAL || mode == SORT_MODE__BRANCH) 4028 add_sort_string(&sb, bstack_sort_dimensions, 4029 ARRAY_SIZE(bstack_sort_dimensions), &len); 4030 if (mode == SORT_MODE__NORMAL || mode == SORT_MODE__MEMORY) 4031 add_sort_string(&sb, memory_sort_dimensions, 4032 ARRAY_SIZE(memory_sort_dimensions), &len); 4033 s = strbuf_detach(&sb, NULL); 4034 strbuf_release(&sb); 4035 return s; 4036 } 4037