1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <regex.h> 5 #include <stdlib.h> 6 #include <linux/mman.h> 7 #include <linux/time64.h> 8 #include "debug.h" 9 #include "dso.h" 10 #include "sort.h" 11 #include "hist.h" 12 #include "cacheline.h" 13 #include "comm.h" 14 #include "map.h" 15 #include "maps.h" 16 #include "symbol.h" 17 #include "map_symbol.h" 18 #include "branch.h" 19 #include "thread.h" 20 #include "evsel.h" 21 #include "evlist.h" 22 #include "srcline.h" 23 #include "strlist.h" 24 #include "strbuf.h" 25 #include "mem-events.h" 26 #include "annotate.h" 27 #include "event.h" 28 #include "time-utils.h" 29 #include "cgroup.h" 30 #include "machine.h" 31 #include "trace-event.h" 32 #include <linux/kernel.h> 33 #include <linux/string.h> 34 35 #ifdef HAVE_LIBTRACEEVENT 36 #include <traceevent/event-parse.h> 37 #endif 38 39 regex_t parent_regex; 40 const char default_parent_pattern[] = "^sys_|^do_page_fault"; 41 const char *parent_pattern = default_parent_pattern; 42 const char *default_sort_order = "comm,dso,symbol"; 43 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 44 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc"; 45 const char default_top_sort_order[] = "dso,symbol"; 46 const char default_diff_sort_order[] = "dso,symbol"; 47 const char default_tracepoint_sort_order[] = "trace"; 48 const char *sort_order; 49 const char *field_order; 50 regex_t ignore_callees_regex; 51 int have_ignore_callees = 0; 52 enum sort_mode sort__mode = SORT_MODE__NORMAL; 53 static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"}; 54 static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"}; 55 56 /* 57 * Some architectures have Adjacent Cacheline Prefetch feature, which 58 * behaves like the cacheline size is doubled. Enable this flag to 59 * check things in double cacheline granularity. 60 */ 61 bool chk_double_cl; 62 63 /* 64 * Replaces all occurrences of a char used with the: 65 * 66 * -t, --field-separator 67 * 68 * option, that uses a special separator character and don't pad with spaces, 69 * replacing all occurrences of this separator in symbol names (and other 70 * output) with a '.' character, that thus it's the only non valid separator. 71 */ 72 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 73 { 74 int n; 75 va_list ap; 76 77 va_start(ap, fmt); 78 n = vsnprintf(bf, size, fmt, ap); 79 if (symbol_conf.field_sep && n > 0) { 80 char *sep = bf; 81 82 while (1) { 83 sep = strchr(sep, *symbol_conf.field_sep); 84 if (sep == NULL) 85 break; 86 *sep = '.'; 87 } 88 } 89 va_end(ap); 90 91 if (n >= (int)size) 92 return size - 1; 93 return n; 94 } 95 96 static int64_t cmp_null(const void *l, const void *r) 97 { 98 if (!l && !r) 99 return 0; 100 else if (!l) 101 return -1; 102 else 103 return 1; 104 } 105 106 /* --sort pid */ 107 108 static int64_t 109 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 110 { 111 return right->thread->tid - left->thread->tid; 112 } 113 114 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 115 size_t size, unsigned int width) 116 { 117 const char *comm = thread__comm_str(he->thread); 118 119 width = max(7U, width) - 8; 120 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid, 121 width, width, comm ?: ""); 122 } 123 124 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 125 { 126 const struct thread *th = arg; 127 128 if (type != HIST_FILTER__THREAD) 129 return -1; 130 131 return th && he->thread != th; 132 } 133 134 struct sort_entry sort_thread = { 135 .se_header = " Pid:Command", 136 .se_cmp = sort__thread_cmp, 137 .se_snprintf = hist_entry__thread_snprintf, 138 .se_filter = hist_entry__thread_filter, 139 .se_width_idx = HISTC_THREAD, 140 }; 141 142 /* --sort simd */ 143 144 static int64_t 145 sort__simd_cmp(struct hist_entry *left, struct hist_entry *right) 146 { 147 if (left->simd_flags.arch != right->simd_flags.arch) 148 return (int64_t) left->simd_flags.arch - right->simd_flags.arch; 149 150 return (int64_t) left->simd_flags.pred - right->simd_flags.pred; 151 } 152 153 static const char *hist_entry__get_simd_name(struct simd_flags *simd_flags) 154 { 155 u64 arch = simd_flags->arch; 156 157 if (arch & SIMD_OP_FLAGS_ARCH_SVE) 158 return "SVE"; 159 else 160 return "n/a"; 161 } 162 163 static int hist_entry__simd_snprintf(struct hist_entry *he, char *bf, 164 size_t size, unsigned int width __maybe_unused) 165 { 166 const char *name; 167 168 if (!he->simd_flags.arch) 169 return repsep_snprintf(bf, size, ""); 170 171 name = hist_entry__get_simd_name(&he->simd_flags); 172 173 if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_EMPTY) 174 return repsep_snprintf(bf, size, "[e] %s", name); 175 else if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_PARTIAL) 176 return repsep_snprintf(bf, size, "[p] %s", name); 177 178 return repsep_snprintf(bf, size, "[.] %s", name); 179 } 180 181 struct sort_entry sort_simd = { 182 .se_header = "Simd ", 183 .se_cmp = sort__simd_cmp, 184 .se_snprintf = hist_entry__simd_snprintf, 185 .se_width_idx = HISTC_SIMD, 186 }; 187 188 /* --sort comm */ 189 190 /* 191 * We can't use pointer comparison in functions below, 192 * because it gives different results based on pointer 193 * values, which could break some sorting assumptions. 194 */ 195 static int64_t 196 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 197 { 198 return strcmp(comm__str(right->comm), comm__str(left->comm)); 199 } 200 201 static int64_t 202 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 203 { 204 return strcmp(comm__str(right->comm), comm__str(left->comm)); 205 } 206 207 static int64_t 208 sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 209 { 210 return strcmp(comm__str(right->comm), comm__str(left->comm)); 211 } 212 213 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 214 size_t size, unsigned int width) 215 { 216 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 217 } 218 219 struct sort_entry sort_comm = { 220 .se_header = "Command", 221 .se_cmp = sort__comm_cmp, 222 .se_collapse = sort__comm_collapse, 223 .se_sort = sort__comm_sort, 224 .se_snprintf = hist_entry__comm_snprintf, 225 .se_filter = hist_entry__thread_filter, 226 .se_width_idx = HISTC_COMM, 227 }; 228 229 /* --sort dso */ 230 231 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 232 { 233 struct dso *dso_l = map_l ? map__dso(map_l) : NULL; 234 struct dso *dso_r = map_r ? map__dso(map_r) : NULL; 235 const char *dso_name_l, *dso_name_r; 236 237 if (!dso_l || !dso_r) 238 return cmp_null(dso_r, dso_l); 239 240 if (verbose > 0) { 241 dso_name_l = dso_l->long_name; 242 dso_name_r = dso_r->long_name; 243 } else { 244 dso_name_l = dso_l->short_name; 245 dso_name_r = dso_r->short_name; 246 } 247 248 return strcmp(dso_name_l, dso_name_r); 249 } 250 251 static int64_t 252 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 253 { 254 return _sort__dso_cmp(right->ms.map, left->ms.map); 255 } 256 257 static int _hist_entry__dso_snprintf(struct map *map, char *bf, 258 size_t size, unsigned int width) 259 { 260 const struct dso *dso = map ? map__dso(map) : NULL; 261 const char *dso_name = "[unknown]"; 262 263 if (dso) 264 dso_name = verbose > 0 ? dso->long_name : dso->short_name; 265 266 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 267 } 268 269 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 270 size_t size, unsigned int width) 271 { 272 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 273 } 274 275 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 276 { 277 const struct dso *dso = arg; 278 279 if (type != HIST_FILTER__DSO) 280 return -1; 281 282 return dso && (!he->ms.map || map__dso(he->ms.map) != dso); 283 } 284 285 struct sort_entry sort_dso = { 286 .se_header = "Shared Object", 287 .se_cmp = sort__dso_cmp, 288 .se_snprintf = hist_entry__dso_snprintf, 289 .se_filter = hist_entry__dso_filter, 290 .se_width_idx = HISTC_DSO, 291 }; 292 293 /* --sort symbol */ 294 295 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 296 { 297 return (int64_t)(right_ip - left_ip); 298 } 299 300 int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 301 { 302 if (!sym_l || !sym_r) 303 return cmp_null(sym_l, sym_r); 304 305 if (sym_l == sym_r) 306 return 0; 307 308 if (sym_l->inlined || sym_r->inlined) { 309 int ret = strcmp(sym_l->name, sym_r->name); 310 311 if (ret) 312 return ret; 313 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start)) 314 return 0; 315 } 316 317 if (sym_l->start != sym_r->start) 318 return (int64_t)(sym_r->start - sym_l->start); 319 320 return (int64_t)(sym_r->end - sym_l->end); 321 } 322 323 static int64_t 324 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 325 { 326 int64_t ret; 327 328 if (!left->ms.sym && !right->ms.sym) 329 return _sort__addr_cmp(left->ip, right->ip); 330 331 /* 332 * comparing symbol address alone is not enough since it's a 333 * relative address within a dso. 334 */ 335 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) { 336 ret = sort__dso_cmp(left, right); 337 if (ret != 0) 338 return ret; 339 } 340 341 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 342 } 343 344 static int64_t 345 sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 346 { 347 if (!left->ms.sym || !right->ms.sym) 348 return cmp_null(left->ms.sym, right->ms.sym); 349 350 return strcmp(right->ms.sym->name, left->ms.sym->name); 351 } 352 353 static int _hist_entry__sym_snprintf(struct map_symbol *ms, 354 u64 ip, char level, char *bf, size_t size, 355 unsigned int width) 356 { 357 struct symbol *sym = ms->sym; 358 struct map *map = ms->map; 359 size_t ret = 0; 360 361 if (verbose > 0) { 362 struct dso *dso = map ? map__dso(map) : NULL; 363 char o = dso ? dso__symtab_origin(dso) : '!'; 364 u64 rip = ip; 365 366 if (dso && dso->kernel && dso->adjust_symbols) 367 rip = map->unmap_ip(map, ip); 368 369 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 370 BITS_PER_LONG / 4 + 2, rip, o); 371 } 372 373 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 374 if (sym && map) { 375 if (sym->type == STT_OBJECT) { 376 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 377 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 378 ip - map->unmap_ip(map, sym->start)); 379 } else { 380 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 381 width - ret, 382 sym->name); 383 if (sym->inlined) 384 ret += repsep_snprintf(bf + ret, size - ret, 385 " (inlined)"); 386 } 387 } else { 388 size_t len = BITS_PER_LONG / 4; 389 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 390 len, ip); 391 } 392 393 return ret; 394 } 395 396 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) 397 { 398 return _hist_entry__sym_snprintf(&he->ms, he->ip, 399 he->level, bf, size, width); 400 } 401 402 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 403 { 404 const char *sym = arg; 405 406 if (type != HIST_FILTER__SYMBOL) 407 return -1; 408 409 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 410 } 411 412 struct sort_entry sort_sym = { 413 .se_header = "Symbol", 414 .se_cmp = sort__sym_cmp, 415 .se_sort = sort__sym_sort, 416 .se_snprintf = hist_entry__sym_snprintf, 417 .se_filter = hist_entry__sym_filter, 418 .se_width_idx = HISTC_SYMBOL, 419 }; 420 421 /* --sort srcline */ 422 423 char *hist_entry__srcline(struct hist_entry *he) 424 { 425 return map__srcline(he->ms.map, he->ip, he->ms.sym); 426 } 427 428 static int64_t 429 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 430 { 431 int64_t ret; 432 433 ret = _sort__addr_cmp(left->ip, right->ip); 434 if (ret) 435 return ret; 436 437 return sort__dso_cmp(left, right); 438 } 439 440 static int64_t 441 sort__srcline_collapse(struct hist_entry *left, struct hist_entry *right) 442 { 443 if (!left->srcline) 444 left->srcline = hist_entry__srcline(left); 445 if (!right->srcline) 446 right->srcline = hist_entry__srcline(right); 447 448 return strcmp(right->srcline, left->srcline); 449 } 450 451 static int64_t 452 sort__srcline_sort(struct hist_entry *left, struct hist_entry *right) 453 { 454 return sort__srcline_collapse(left, right); 455 } 456 457 static void 458 sort__srcline_init(struct hist_entry *he) 459 { 460 if (!he->srcline) 461 he->srcline = hist_entry__srcline(he); 462 } 463 464 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 465 size_t size, unsigned int width) 466 { 467 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 468 } 469 470 struct sort_entry sort_srcline = { 471 .se_header = "Source:Line", 472 .se_cmp = sort__srcline_cmp, 473 .se_collapse = sort__srcline_collapse, 474 .se_sort = sort__srcline_sort, 475 .se_init = sort__srcline_init, 476 .se_snprintf = hist_entry__srcline_snprintf, 477 .se_width_idx = HISTC_SRCLINE, 478 }; 479 480 /* --sort srcline_from */ 481 482 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams) 483 { 484 return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym); 485 } 486 487 static int64_t 488 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 489 { 490 return left->branch_info->from.addr - right->branch_info->from.addr; 491 } 492 493 static int64_t 494 sort__srcline_from_collapse(struct hist_entry *left, struct hist_entry *right) 495 { 496 if (!left->branch_info->srcline_from) 497 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from); 498 499 if (!right->branch_info->srcline_from) 500 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from); 501 502 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 503 } 504 505 static int64_t 506 sort__srcline_from_sort(struct hist_entry *left, struct hist_entry *right) 507 { 508 return sort__srcline_from_collapse(left, right); 509 } 510 511 static void sort__srcline_from_init(struct hist_entry *he) 512 { 513 if (!he->branch_info->srcline_from) 514 he->branch_info->srcline_from = addr_map_symbol__srcline(&he->branch_info->from); 515 } 516 517 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 518 size_t size, unsigned int width) 519 { 520 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 521 } 522 523 struct sort_entry sort_srcline_from = { 524 .se_header = "From Source:Line", 525 .se_cmp = sort__srcline_from_cmp, 526 .se_collapse = sort__srcline_from_collapse, 527 .se_sort = sort__srcline_from_sort, 528 .se_init = sort__srcline_from_init, 529 .se_snprintf = hist_entry__srcline_from_snprintf, 530 .se_width_idx = HISTC_SRCLINE_FROM, 531 }; 532 533 /* --sort srcline_to */ 534 535 static int64_t 536 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 537 { 538 return left->branch_info->to.addr - right->branch_info->to.addr; 539 } 540 541 static int64_t 542 sort__srcline_to_collapse(struct hist_entry *left, struct hist_entry *right) 543 { 544 if (!left->branch_info->srcline_to) 545 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to); 546 547 if (!right->branch_info->srcline_to) 548 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to); 549 550 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 551 } 552 553 static int64_t 554 sort__srcline_to_sort(struct hist_entry *left, struct hist_entry *right) 555 { 556 return sort__srcline_to_collapse(left, right); 557 } 558 559 static void sort__srcline_to_init(struct hist_entry *he) 560 { 561 if (!he->branch_info->srcline_to) 562 he->branch_info->srcline_to = addr_map_symbol__srcline(&he->branch_info->to); 563 } 564 565 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 566 size_t size, unsigned int width) 567 { 568 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 569 } 570 571 struct sort_entry sort_srcline_to = { 572 .se_header = "To Source:Line", 573 .se_cmp = sort__srcline_to_cmp, 574 .se_collapse = sort__srcline_to_collapse, 575 .se_sort = sort__srcline_to_sort, 576 .se_init = sort__srcline_to_init, 577 .se_snprintf = hist_entry__srcline_to_snprintf, 578 .se_width_idx = HISTC_SRCLINE_TO, 579 }; 580 581 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf, 582 size_t size, unsigned int width) 583 { 584 585 struct symbol *sym = he->ms.sym; 586 struct annotation *notes; 587 double ipc = 0.0, coverage = 0.0; 588 char tmp[64]; 589 590 if (!sym) 591 return repsep_snprintf(bf, size, "%-*s", width, "-"); 592 593 notes = symbol__annotation(sym); 594 595 if (notes->hit_cycles) 596 ipc = notes->hit_insn / ((double)notes->hit_cycles); 597 598 if (notes->total_insn) { 599 coverage = notes->cover_insn * 100.0 / 600 ((double)notes->total_insn); 601 } 602 603 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage); 604 return repsep_snprintf(bf, size, "%-*s", width, tmp); 605 } 606 607 struct sort_entry sort_sym_ipc = { 608 .se_header = "IPC [IPC Coverage]", 609 .se_cmp = sort__sym_cmp, 610 .se_snprintf = hist_entry__sym_ipc_snprintf, 611 .se_width_idx = HISTC_SYMBOL_IPC, 612 }; 613 614 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he 615 __maybe_unused, 616 char *bf, size_t size, 617 unsigned int width) 618 { 619 char tmp[64]; 620 621 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-"); 622 return repsep_snprintf(bf, size, "%-*s", width, tmp); 623 } 624 625 struct sort_entry sort_sym_ipc_null = { 626 .se_header = "IPC [IPC Coverage]", 627 .se_cmp = sort__sym_cmp, 628 .se_snprintf = hist_entry__sym_ipc_null_snprintf, 629 .se_width_idx = HISTC_SYMBOL_IPC, 630 }; 631 632 /* --sort srcfile */ 633 634 static char no_srcfile[1]; 635 636 static char *hist_entry__get_srcfile(struct hist_entry *e) 637 { 638 char *sf, *p; 639 struct map *map = e->ms.map; 640 641 if (!map) 642 return no_srcfile; 643 644 sf = __get_srcline(map__dso(map), map__rip_2objdump(map, e->ip), 645 e->ms.sym, false, true, true, e->ip); 646 if (!strcmp(sf, SRCLINE_UNKNOWN)) 647 return no_srcfile; 648 p = strchr(sf, ':'); 649 if (p && *sf) { 650 *p = 0; 651 return sf; 652 } 653 free(sf); 654 return no_srcfile; 655 } 656 657 static int64_t 658 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 659 { 660 return sort__srcline_cmp(left, right); 661 } 662 663 static int64_t 664 sort__srcfile_collapse(struct hist_entry *left, struct hist_entry *right) 665 { 666 if (!left->srcfile) 667 left->srcfile = hist_entry__get_srcfile(left); 668 if (!right->srcfile) 669 right->srcfile = hist_entry__get_srcfile(right); 670 671 return strcmp(right->srcfile, left->srcfile); 672 } 673 674 static int64_t 675 sort__srcfile_sort(struct hist_entry *left, struct hist_entry *right) 676 { 677 return sort__srcfile_collapse(left, right); 678 } 679 680 static void sort__srcfile_init(struct hist_entry *he) 681 { 682 if (!he->srcfile) 683 he->srcfile = hist_entry__get_srcfile(he); 684 } 685 686 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 687 size_t size, unsigned int width) 688 { 689 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 690 } 691 692 struct sort_entry sort_srcfile = { 693 .se_header = "Source File", 694 .se_cmp = sort__srcfile_cmp, 695 .se_collapse = sort__srcfile_collapse, 696 .se_sort = sort__srcfile_sort, 697 .se_init = sort__srcfile_init, 698 .se_snprintf = hist_entry__srcfile_snprintf, 699 .se_width_idx = HISTC_SRCFILE, 700 }; 701 702 /* --sort parent */ 703 704 static int64_t 705 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 706 { 707 struct symbol *sym_l = left->parent; 708 struct symbol *sym_r = right->parent; 709 710 if (!sym_l || !sym_r) 711 return cmp_null(sym_l, sym_r); 712 713 return strcmp(sym_r->name, sym_l->name); 714 } 715 716 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 717 size_t size, unsigned int width) 718 { 719 return repsep_snprintf(bf, size, "%-*.*s", width, width, 720 he->parent ? he->parent->name : "[other]"); 721 } 722 723 struct sort_entry sort_parent = { 724 .se_header = "Parent symbol", 725 .se_cmp = sort__parent_cmp, 726 .se_snprintf = hist_entry__parent_snprintf, 727 .se_width_idx = HISTC_PARENT, 728 }; 729 730 /* --sort cpu */ 731 732 static int64_t 733 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 734 { 735 return right->cpu - left->cpu; 736 } 737 738 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 739 size_t size, unsigned int width) 740 { 741 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 742 } 743 744 struct sort_entry sort_cpu = { 745 .se_header = "CPU", 746 .se_cmp = sort__cpu_cmp, 747 .se_snprintf = hist_entry__cpu_snprintf, 748 .se_width_idx = HISTC_CPU, 749 }; 750 751 /* --sort cgroup_id */ 752 753 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev) 754 { 755 return (int64_t)(right_dev - left_dev); 756 } 757 758 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino) 759 { 760 return (int64_t)(right_ino - left_ino); 761 } 762 763 static int64_t 764 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right) 765 { 766 int64_t ret; 767 768 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev); 769 if (ret != 0) 770 return ret; 771 772 return _sort__cgroup_inode_cmp(right->cgroup_id.ino, 773 left->cgroup_id.ino); 774 } 775 776 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he, 777 char *bf, size_t size, 778 unsigned int width __maybe_unused) 779 { 780 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev, 781 he->cgroup_id.ino); 782 } 783 784 struct sort_entry sort_cgroup_id = { 785 .se_header = "cgroup id (dev/inode)", 786 .se_cmp = sort__cgroup_id_cmp, 787 .se_snprintf = hist_entry__cgroup_id_snprintf, 788 .se_width_idx = HISTC_CGROUP_ID, 789 }; 790 791 /* --sort cgroup */ 792 793 static int64_t 794 sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right) 795 { 796 return right->cgroup - left->cgroup; 797 } 798 799 static int hist_entry__cgroup_snprintf(struct hist_entry *he, 800 char *bf, size_t size, 801 unsigned int width __maybe_unused) 802 { 803 const char *cgrp_name = "N/A"; 804 805 if (he->cgroup) { 806 struct cgroup *cgrp = cgroup__find(maps__machine(he->ms.maps)->env, 807 he->cgroup); 808 if (cgrp != NULL) 809 cgrp_name = cgrp->name; 810 else 811 cgrp_name = "unknown"; 812 } 813 814 return repsep_snprintf(bf, size, "%s", cgrp_name); 815 } 816 817 struct sort_entry sort_cgroup = { 818 .se_header = "Cgroup", 819 .se_cmp = sort__cgroup_cmp, 820 .se_snprintf = hist_entry__cgroup_snprintf, 821 .se_width_idx = HISTC_CGROUP, 822 }; 823 824 /* --sort socket */ 825 826 static int64_t 827 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 828 { 829 return right->socket - left->socket; 830 } 831 832 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 833 size_t size, unsigned int width) 834 { 835 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 836 } 837 838 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 839 { 840 int sk = *(const int *)arg; 841 842 if (type != HIST_FILTER__SOCKET) 843 return -1; 844 845 return sk >= 0 && he->socket != sk; 846 } 847 848 struct sort_entry sort_socket = { 849 .se_header = "Socket", 850 .se_cmp = sort__socket_cmp, 851 .se_snprintf = hist_entry__socket_snprintf, 852 .se_filter = hist_entry__socket_filter, 853 .se_width_idx = HISTC_SOCKET, 854 }; 855 856 /* --sort time */ 857 858 static int64_t 859 sort__time_cmp(struct hist_entry *left, struct hist_entry *right) 860 { 861 return right->time - left->time; 862 } 863 864 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf, 865 size_t size, unsigned int width) 866 { 867 char he_time[32]; 868 869 if (symbol_conf.nanosecs) 870 timestamp__scnprintf_nsec(he->time, he_time, 871 sizeof(he_time)); 872 else 873 timestamp__scnprintf_usec(he->time, he_time, 874 sizeof(he_time)); 875 876 return repsep_snprintf(bf, size, "%-.*s", width, he_time); 877 } 878 879 struct sort_entry sort_time = { 880 .se_header = "Time", 881 .se_cmp = sort__time_cmp, 882 .se_snprintf = hist_entry__time_snprintf, 883 .se_width_idx = HISTC_TIME, 884 }; 885 886 /* --sort trace */ 887 888 #ifdef HAVE_LIBTRACEEVENT 889 static char *get_trace_output(struct hist_entry *he) 890 { 891 struct trace_seq seq; 892 struct evsel *evsel; 893 struct tep_record rec = { 894 .data = he->raw_data, 895 .size = he->raw_size, 896 }; 897 898 evsel = hists_to_evsel(he->hists); 899 900 trace_seq_init(&seq); 901 if (symbol_conf.raw_trace) { 902 tep_print_fields(&seq, he->raw_data, he->raw_size, 903 evsel->tp_format); 904 } else { 905 tep_print_event(evsel->tp_format->tep, 906 &seq, &rec, "%s", TEP_PRINT_INFO); 907 } 908 /* 909 * Trim the buffer, it starts at 4KB and we're not going to 910 * add anything more to this buffer. 911 */ 912 return realloc(seq.buffer, seq.len + 1); 913 } 914 915 static int64_t 916 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 917 { 918 struct evsel *evsel; 919 920 evsel = hists_to_evsel(left->hists); 921 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 922 return 0; 923 924 if (left->trace_output == NULL) 925 left->trace_output = get_trace_output(left); 926 if (right->trace_output == NULL) 927 right->trace_output = get_trace_output(right); 928 929 return strcmp(right->trace_output, left->trace_output); 930 } 931 932 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 933 size_t size, unsigned int width) 934 { 935 struct evsel *evsel; 936 937 evsel = hists_to_evsel(he->hists); 938 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 939 return scnprintf(bf, size, "%-.*s", width, "N/A"); 940 941 if (he->trace_output == NULL) 942 he->trace_output = get_trace_output(he); 943 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 944 } 945 946 struct sort_entry sort_trace = { 947 .se_header = "Trace output", 948 .se_cmp = sort__trace_cmp, 949 .se_snprintf = hist_entry__trace_snprintf, 950 .se_width_idx = HISTC_TRACE, 951 }; 952 #endif /* HAVE_LIBTRACEEVENT */ 953 954 /* sort keys for branch stacks */ 955 956 static int64_t 957 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 958 { 959 if (!left->branch_info || !right->branch_info) 960 return cmp_null(left->branch_info, right->branch_info); 961 962 return _sort__dso_cmp(left->branch_info->from.ms.map, 963 right->branch_info->from.ms.map); 964 } 965 966 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 967 size_t size, unsigned int width) 968 { 969 if (he->branch_info) 970 return _hist_entry__dso_snprintf(he->branch_info->from.ms.map, 971 bf, size, width); 972 else 973 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 974 } 975 976 static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 977 const void *arg) 978 { 979 const struct dso *dso = arg; 980 981 if (type != HIST_FILTER__DSO) 982 return -1; 983 984 return dso && (!he->branch_info || !he->branch_info->from.ms.map || 985 map__dso(he->branch_info->from.ms.map) != dso); 986 } 987 988 static int64_t 989 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 990 { 991 if (!left->branch_info || !right->branch_info) 992 return cmp_null(left->branch_info, right->branch_info); 993 994 return _sort__dso_cmp(left->branch_info->to.ms.map, 995 right->branch_info->to.ms.map); 996 } 997 998 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 999 size_t size, unsigned int width) 1000 { 1001 if (he->branch_info) 1002 return _hist_entry__dso_snprintf(he->branch_info->to.ms.map, 1003 bf, size, width); 1004 else 1005 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1006 } 1007 1008 static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 1009 const void *arg) 1010 { 1011 const struct dso *dso = arg; 1012 1013 if (type != HIST_FILTER__DSO) 1014 return -1; 1015 1016 return dso && (!he->branch_info || !he->branch_info->to.ms.map || 1017 map__dso(he->branch_info->to.ms.map) != dso); 1018 } 1019 1020 static int64_t 1021 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 1022 { 1023 struct addr_map_symbol *from_l = &left->branch_info->from; 1024 struct addr_map_symbol *from_r = &right->branch_info->from; 1025 1026 if (!left->branch_info || !right->branch_info) 1027 return cmp_null(left->branch_info, right->branch_info); 1028 1029 from_l = &left->branch_info->from; 1030 from_r = &right->branch_info->from; 1031 1032 if (!from_l->ms.sym && !from_r->ms.sym) 1033 return _sort__addr_cmp(from_l->addr, from_r->addr); 1034 1035 return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym); 1036 } 1037 1038 static int64_t 1039 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 1040 { 1041 struct addr_map_symbol *to_l, *to_r; 1042 1043 if (!left->branch_info || !right->branch_info) 1044 return cmp_null(left->branch_info, right->branch_info); 1045 1046 to_l = &left->branch_info->to; 1047 to_r = &right->branch_info->to; 1048 1049 if (!to_l->ms.sym && !to_r->ms.sym) 1050 return _sort__addr_cmp(to_l->addr, to_r->addr); 1051 1052 return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym); 1053 } 1054 1055 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 1056 size_t size, unsigned int width) 1057 { 1058 if (he->branch_info) { 1059 struct addr_map_symbol *from = &he->branch_info->from; 1060 1061 return _hist_entry__sym_snprintf(&from->ms, from->al_addr, 1062 from->al_level, bf, size, width); 1063 } 1064 1065 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1066 } 1067 1068 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 1069 size_t size, unsigned int width) 1070 { 1071 if (he->branch_info) { 1072 struct addr_map_symbol *to = &he->branch_info->to; 1073 1074 return _hist_entry__sym_snprintf(&to->ms, to->al_addr, 1075 to->al_level, bf, size, width); 1076 } 1077 1078 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1079 } 1080 1081 static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 1082 const void *arg) 1083 { 1084 const char *sym = arg; 1085 1086 if (type != HIST_FILTER__SYMBOL) 1087 return -1; 1088 1089 return sym && !(he->branch_info && he->branch_info->from.ms.sym && 1090 strstr(he->branch_info->from.ms.sym->name, sym)); 1091 } 1092 1093 static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 1094 const void *arg) 1095 { 1096 const char *sym = arg; 1097 1098 if (type != HIST_FILTER__SYMBOL) 1099 return -1; 1100 1101 return sym && !(he->branch_info && he->branch_info->to.ms.sym && 1102 strstr(he->branch_info->to.ms.sym->name, sym)); 1103 } 1104 1105 struct sort_entry sort_dso_from = { 1106 .se_header = "Source Shared Object", 1107 .se_cmp = sort__dso_from_cmp, 1108 .se_snprintf = hist_entry__dso_from_snprintf, 1109 .se_filter = hist_entry__dso_from_filter, 1110 .se_width_idx = HISTC_DSO_FROM, 1111 }; 1112 1113 struct sort_entry sort_dso_to = { 1114 .se_header = "Target Shared Object", 1115 .se_cmp = sort__dso_to_cmp, 1116 .se_snprintf = hist_entry__dso_to_snprintf, 1117 .se_filter = hist_entry__dso_to_filter, 1118 .se_width_idx = HISTC_DSO_TO, 1119 }; 1120 1121 struct sort_entry sort_sym_from = { 1122 .se_header = "Source Symbol", 1123 .se_cmp = sort__sym_from_cmp, 1124 .se_snprintf = hist_entry__sym_from_snprintf, 1125 .se_filter = hist_entry__sym_from_filter, 1126 .se_width_idx = HISTC_SYMBOL_FROM, 1127 }; 1128 1129 struct sort_entry sort_sym_to = { 1130 .se_header = "Target Symbol", 1131 .se_cmp = sort__sym_to_cmp, 1132 .se_snprintf = hist_entry__sym_to_snprintf, 1133 .se_filter = hist_entry__sym_to_filter, 1134 .se_width_idx = HISTC_SYMBOL_TO, 1135 }; 1136 1137 static int _hist_entry__addr_snprintf(struct map_symbol *ms, 1138 u64 ip, char level, char *bf, size_t size, 1139 unsigned int width) 1140 { 1141 struct symbol *sym = ms->sym; 1142 struct map *map = ms->map; 1143 size_t ret = 0, offs; 1144 1145 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 1146 if (sym && map) { 1147 if (sym->type == STT_OBJECT) { 1148 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 1149 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 1150 ip - map->unmap_ip(map, sym->start)); 1151 } else { 1152 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 1153 width - ret, 1154 sym->name); 1155 offs = ip - sym->start; 1156 if (offs) 1157 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", offs); 1158 } 1159 } else { 1160 size_t len = BITS_PER_LONG / 4; 1161 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 1162 len, ip); 1163 } 1164 1165 return ret; 1166 } 1167 1168 static int hist_entry__addr_from_snprintf(struct hist_entry *he, char *bf, 1169 size_t size, unsigned int width) 1170 { 1171 if (he->branch_info) { 1172 struct addr_map_symbol *from = &he->branch_info->from; 1173 1174 return _hist_entry__addr_snprintf(&from->ms, from->al_addr, 1175 he->level, bf, size, width); 1176 } 1177 1178 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1179 } 1180 1181 static int hist_entry__addr_to_snprintf(struct hist_entry *he, char *bf, 1182 size_t size, unsigned int width) 1183 { 1184 if (he->branch_info) { 1185 struct addr_map_symbol *to = &he->branch_info->to; 1186 1187 return _hist_entry__addr_snprintf(&to->ms, to->al_addr, 1188 he->level, bf, size, width); 1189 } 1190 1191 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1192 } 1193 1194 static int64_t 1195 sort__addr_from_cmp(struct hist_entry *left, struct hist_entry *right) 1196 { 1197 struct addr_map_symbol *from_l; 1198 struct addr_map_symbol *from_r; 1199 int64_t ret; 1200 1201 if (!left->branch_info || !right->branch_info) 1202 return cmp_null(left->branch_info, right->branch_info); 1203 1204 from_l = &left->branch_info->from; 1205 from_r = &right->branch_info->from; 1206 1207 /* 1208 * comparing symbol address alone is not enough since it's a 1209 * relative address within a dso. 1210 */ 1211 ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map); 1212 if (ret != 0) 1213 return ret; 1214 1215 return _sort__addr_cmp(from_l->addr, from_r->addr); 1216 } 1217 1218 static int64_t 1219 sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right) 1220 { 1221 struct addr_map_symbol *to_l; 1222 struct addr_map_symbol *to_r; 1223 int64_t ret; 1224 1225 if (!left->branch_info || !right->branch_info) 1226 return cmp_null(left->branch_info, right->branch_info); 1227 1228 to_l = &left->branch_info->to; 1229 to_r = &right->branch_info->to; 1230 1231 /* 1232 * comparing symbol address alone is not enough since it's a 1233 * relative address within a dso. 1234 */ 1235 ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map); 1236 if (ret != 0) 1237 return ret; 1238 1239 return _sort__addr_cmp(to_l->addr, to_r->addr); 1240 } 1241 1242 struct sort_entry sort_addr_from = { 1243 .se_header = "Source Address", 1244 .se_cmp = sort__addr_from_cmp, 1245 .se_snprintf = hist_entry__addr_from_snprintf, 1246 .se_filter = hist_entry__sym_from_filter, /* shared with sym_from */ 1247 .se_width_idx = HISTC_ADDR_FROM, 1248 }; 1249 1250 struct sort_entry sort_addr_to = { 1251 .se_header = "Target Address", 1252 .se_cmp = sort__addr_to_cmp, 1253 .se_snprintf = hist_entry__addr_to_snprintf, 1254 .se_filter = hist_entry__sym_to_filter, /* shared with sym_to */ 1255 .se_width_idx = HISTC_ADDR_TO, 1256 }; 1257 1258 1259 static int64_t 1260 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 1261 { 1262 unsigned char mp, p; 1263 1264 if (!left->branch_info || !right->branch_info) 1265 return cmp_null(left->branch_info, right->branch_info); 1266 1267 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 1268 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 1269 return mp || p; 1270 } 1271 1272 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 1273 size_t size, unsigned int width){ 1274 static const char *out = "N/A"; 1275 1276 if (he->branch_info) { 1277 if (he->branch_info->flags.predicted) 1278 out = "N"; 1279 else if (he->branch_info->flags.mispred) 1280 out = "Y"; 1281 } 1282 1283 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 1284 } 1285 1286 static int64_t 1287 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 1288 { 1289 if (!left->branch_info || !right->branch_info) 1290 return cmp_null(left->branch_info, right->branch_info); 1291 1292 return left->branch_info->flags.cycles - 1293 right->branch_info->flags.cycles; 1294 } 1295 1296 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 1297 size_t size, unsigned int width) 1298 { 1299 if (!he->branch_info) 1300 return scnprintf(bf, size, "%-.*s", width, "N/A"); 1301 if (he->branch_info->flags.cycles == 0) 1302 return repsep_snprintf(bf, size, "%-*s", width, "-"); 1303 return repsep_snprintf(bf, size, "%-*hd", width, 1304 he->branch_info->flags.cycles); 1305 } 1306 1307 struct sort_entry sort_cycles = { 1308 .se_header = "Basic Block Cycles", 1309 .se_cmp = sort__cycles_cmp, 1310 .se_snprintf = hist_entry__cycles_snprintf, 1311 .se_width_idx = HISTC_CYCLES, 1312 }; 1313 1314 /* --sort daddr_sym */ 1315 int64_t 1316 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1317 { 1318 uint64_t l = 0, r = 0; 1319 1320 if (left->mem_info) 1321 l = left->mem_info->daddr.addr; 1322 if (right->mem_info) 1323 r = right->mem_info->daddr.addr; 1324 1325 return (int64_t)(r - l); 1326 } 1327 1328 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 1329 size_t size, unsigned int width) 1330 { 1331 uint64_t addr = 0; 1332 struct map_symbol *ms = NULL; 1333 1334 if (he->mem_info) { 1335 addr = he->mem_info->daddr.addr; 1336 ms = &he->mem_info->daddr.ms; 1337 } 1338 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); 1339 } 1340 1341 int64_t 1342 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 1343 { 1344 uint64_t l = 0, r = 0; 1345 1346 if (left->mem_info) 1347 l = left->mem_info->iaddr.addr; 1348 if (right->mem_info) 1349 r = right->mem_info->iaddr.addr; 1350 1351 return (int64_t)(r - l); 1352 } 1353 1354 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 1355 size_t size, unsigned int width) 1356 { 1357 uint64_t addr = 0; 1358 struct map_symbol *ms = NULL; 1359 1360 if (he->mem_info) { 1361 addr = he->mem_info->iaddr.addr; 1362 ms = &he->mem_info->iaddr.ms; 1363 } 1364 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); 1365 } 1366 1367 static int64_t 1368 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1369 { 1370 struct map *map_l = NULL; 1371 struct map *map_r = NULL; 1372 1373 if (left->mem_info) 1374 map_l = left->mem_info->daddr.ms.map; 1375 if (right->mem_info) 1376 map_r = right->mem_info->daddr.ms.map; 1377 1378 return _sort__dso_cmp(map_l, map_r); 1379 } 1380 1381 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 1382 size_t size, unsigned int width) 1383 { 1384 struct map *map = NULL; 1385 1386 if (he->mem_info) 1387 map = he->mem_info->daddr.ms.map; 1388 1389 return _hist_entry__dso_snprintf(map, bf, size, width); 1390 } 1391 1392 static int64_t 1393 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 1394 { 1395 union perf_mem_data_src data_src_l; 1396 union perf_mem_data_src data_src_r; 1397 1398 if (left->mem_info) 1399 data_src_l = left->mem_info->data_src; 1400 else 1401 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 1402 1403 if (right->mem_info) 1404 data_src_r = right->mem_info->data_src; 1405 else 1406 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 1407 1408 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 1409 } 1410 1411 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 1412 size_t size, unsigned int width) 1413 { 1414 char out[10]; 1415 1416 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 1417 return repsep_snprintf(bf, size, "%.*s", width, out); 1418 } 1419 1420 static int64_t 1421 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 1422 { 1423 union perf_mem_data_src data_src_l; 1424 union perf_mem_data_src data_src_r; 1425 1426 if (left->mem_info) 1427 data_src_l = left->mem_info->data_src; 1428 else 1429 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 1430 1431 if (right->mem_info) 1432 data_src_r = right->mem_info->data_src; 1433 else 1434 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 1435 1436 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 1437 } 1438 1439 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1440 size_t size, unsigned int width) 1441 { 1442 char out[64]; 1443 1444 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1445 return repsep_snprintf(bf, size, "%-*s", width, out); 1446 } 1447 1448 static int64_t 1449 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1450 { 1451 union perf_mem_data_src data_src_l; 1452 union perf_mem_data_src data_src_r; 1453 1454 if (left->mem_info) 1455 data_src_l = left->mem_info->data_src; 1456 else 1457 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1458 1459 if (right->mem_info) 1460 data_src_r = right->mem_info->data_src; 1461 else 1462 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1463 1464 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1465 } 1466 1467 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1468 size_t size, unsigned int width) 1469 { 1470 char out[64]; 1471 1472 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1473 return repsep_snprintf(bf, size, "%-*s", width, out); 1474 } 1475 1476 static int64_t 1477 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1478 { 1479 union perf_mem_data_src data_src_l; 1480 union perf_mem_data_src data_src_r; 1481 1482 if (left->mem_info) 1483 data_src_l = left->mem_info->data_src; 1484 else 1485 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1486 1487 if (right->mem_info) 1488 data_src_r = right->mem_info->data_src; 1489 else 1490 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1491 1492 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1493 } 1494 1495 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1496 size_t size, unsigned int width) 1497 { 1498 char out[64]; 1499 1500 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1501 return repsep_snprintf(bf, size, "%-*s", width, out); 1502 } 1503 1504 int64_t 1505 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1506 { 1507 u64 l, r; 1508 struct map *l_map, *r_map; 1509 struct dso *l_dso, *r_dso; 1510 int rc; 1511 1512 if (!left->mem_info) return -1; 1513 if (!right->mem_info) return 1; 1514 1515 /* group event types together */ 1516 if (left->cpumode > right->cpumode) return -1; 1517 if (left->cpumode < right->cpumode) return 1; 1518 1519 l_map = left->mem_info->daddr.ms.map; 1520 r_map = right->mem_info->daddr.ms.map; 1521 1522 /* if both are NULL, jump to sort on al_addr instead */ 1523 if (!l_map && !r_map) 1524 goto addr; 1525 1526 if (!l_map) return -1; 1527 if (!r_map) return 1; 1528 1529 l_dso = map__dso(l_map); 1530 r_dso = map__dso(r_map); 1531 rc = dso__cmp_id(l_dso, r_dso); 1532 if (rc) 1533 return rc; 1534 /* 1535 * Addresses with no major/minor numbers are assumed to be 1536 * anonymous in userspace. Sort those on pid then address. 1537 * 1538 * The kernel and non-zero major/minor mapped areas are 1539 * assumed to be unity mapped. Sort those on address. 1540 */ 1541 1542 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1543 (!(l_map->flags & MAP_SHARED)) && !l_dso->id.maj && !l_dso->id.min && 1544 !l_dso->id.ino && !l_dso->id.ino_generation) { 1545 /* userspace anonymous */ 1546 1547 if (left->thread->pid_ > right->thread->pid_) return -1; 1548 if (left->thread->pid_ < right->thread->pid_) return 1; 1549 } 1550 1551 addr: 1552 /* al_addr does all the right addr - start + offset calculations */ 1553 l = cl_address(left->mem_info->daddr.al_addr, chk_double_cl); 1554 r = cl_address(right->mem_info->daddr.al_addr, chk_double_cl); 1555 1556 if (l > r) return -1; 1557 if (l < r) return 1; 1558 1559 return 0; 1560 } 1561 1562 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1563 size_t size, unsigned int width) 1564 { 1565 1566 uint64_t addr = 0; 1567 struct map_symbol *ms = NULL; 1568 char level = he->level; 1569 1570 if (he->mem_info) { 1571 struct map *map = he->mem_info->daddr.ms.map; 1572 struct dso *dso = map__dso(map); 1573 1574 addr = cl_address(he->mem_info->daddr.al_addr, chk_double_cl); 1575 ms = &he->mem_info->daddr.ms; 1576 1577 /* print [s] for shared data mmaps */ 1578 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1579 map && !(map->prot & PROT_EXEC) && 1580 (map->flags & MAP_SHARED) && 1581 (dso->id.maj || dso->id.min || dso->id.ino || dso->id.ino_generation)) 1582 level = 's'; 1583 else if (!map) 1584 level = 'X'; 1585 } 1586 return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width); 1587 } 1588 1589 struct sort_entry sort_mispredict = { 1590 .se_header = "Branch Mispredicted", 1591 .se_cmp = sort__mispredict_cmp, 1592 .se_snprintf = hist_entry__mispredict_snprintf, 1593 .se_width_idx = HISTC_MISPREDICT, 1594 }; 1595 1596 static int64_t 1597 sort__weight_cmp(struct hist_entry *left, struct hist_entry *right) 1598 { 1599 return left->weight - right->weight; 1600 } 1601 1602 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1603 size_t size, unsigned int width) 1604 { 1605 return repsep_snprintf(bf, size, "%-*llu", width, he->weight); 1606 } 1607 1608 struct sort_entry sort_local_weight = { 1609 .se_header = "Local Weight", 1610 .se_cmp = sort__weight_cmp, 1611 .se_snprintf = hist_entry__local_weight_snprintf, 1612 .se_width_idx = HISTC_LOCAL_WEIGHT, 1613 }; 1614 1615 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1616 size_t size, unsigned int width) 1617 { 1618 return repsep_snprintf(bf, size, "%-*llu", width, 1619 he->weight * he->stat.nr_events); 1620 } 1621 1622 struct sort_entry sort_global_weight = { 1623 .se_header = "Weight", 1624 .se_cmp = sort__weight_cmp, 1625 .se_snprintf = hist_entry__global_weight_snprintf, 1626 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1627 }; 1628 1629 static int64_t 1630 sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right) 1631 { 1632 return left->ins_lat - right->ins_lat; 1633 } 1634 1635 static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf, 1636 size_t size, unsigned int width) 1637 { 1638 return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat); 1639 } 1640 1641 struct sort_entry sort_local_ins_lat = { 1642 .se_header = "Local INSTR Latency", 1643 .se_cmp = sort__ins_lat_cmp, 1644 .se_snprintf = hist_entry__local_ins_lat_snprintf, 1645 .se_width_idx = HISTC_LOCAL_INS_LAT, 1646 }; 1647 1648 static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf, 1649 size_t size, unsigned int width) 1650 { 1651 return repsep_snprintf(bf, size, "%-*u", width, 1652 he->ins_lat * he->stat.nr_events); 1653 } 1654 1655 struct sort_entry sort_global_ins_lat = { 1656 .se_header = "INSTR Latency", 1657 .se_cmp = sort__ins_lat_cmp, 1658 .se_snprintf = hist_entry__global_ins_lat_snprintf, 1659 .se_width_idx = HISTC_GLOBAL_INS_LAT, 1660 }; 1661 1662 static int64_t 1663 sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right) 1664 { 1665 return left->p_stage_cyc - right->p_stage_cyc; 1666 } 1667 1668 static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf, 1669 size_t size, unsigned int width) 1670 { 1671 return repsep_snprintf(bf, size, "%-*u", width, 1672 he->p_stage_cyc * he->stat.nr_events); 1673 } 1674 1675 1676 static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf, 1677 size_t size, unsigned int width) 1678 { 1679 return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc); 1680 } 1681 1682 struct sort_entry sort_local_p_stage_cyc = { 1683 .se_header = "Local Pipeline Stage Cycle", 1684 .se_cmp = sort__p_stage_cyc_cmp, 1685 .se_snprintf = hist_entry__p_stage_cyc_snprintf, 1686 .se_width_idx = HISTC_LOCAL_P_STAGE_CYC, 1687 }; 1688 1689 struct sort_entry sort_global_p_stage_cyc = { 1690 .se_header = "Pipeline Stage Cycle", 1691 .se_cmp = sort__p_stage_cyc_cmp, 1692 .se_snprintf = hist_entry__global_p_stage_cyc_snprintf, 1693 .se_width_idx = HISTC_GLOBAL_P_STAGE_CYC, 1694 }; 1695 1696 struct sort_entry sort_mem_daddr_sym = { 1697 .se_header = "Data Symbol", 1698 .se_cmp = sort__daddr_cmp, 1699 .se_snprintf = hist_entry__daddr_snprintf, 1700 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1701 }; 1702 1703 struct sort_entry sort_mem_iaddr_sym = { 1704 .se_header = "Code Symbol", 1705 .se_cmp = sort__iaddr_cmp, 1706 .se_snprintf = hist_entry__iaddr_snprintf, 1707 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1708 }; 1709 1710 struct sort_entry sort_mem_daddr_dso = { 1711 .se_header = "Data Object", 1712 .se_cmp = sort__dso_daddr_cmp, 1713 .se_snprintf = hist_entry__dso_daddr_snprintf, 1714 .se_width_idx = HISTC_MEM_DADDR_DSO, 1715 }; 1716 1717 struct sort_entry sort_mem_locked = { 1718 .se_header = "Locked", 1719 .se_cmp = sort__locked_cmp, 1720 .se_snprintf = hist_entry__locked_snprintf, 1721 .se_width_idx = HISTC_MEM_LOCKED, 1722 }; 1723 1724 struct sort_entry sort_mem_tlb = { 1725 .se_header = "TLB access", 1726 .se_cmp = sort__tlb_cmp, 1727 .se_snprintf = hist_entry__tlb_snprintf, 1728 .se_width_idx = HISTC_MEM_TLB, 1729 }; 1730 1731 struct sort_entry sort_mem_lvl = { 1732 .se_header = "Memory access", 1733 .se_cmp = sort__lvl_cmp, 1734 .se_snprintf = hist_entry__lvl_snprintf, 1735 .se_width_idx = HISTC_MEM_LVL, 1736 }; 1737 1738 struct sort_entry sort_mem_snoop = { 1739 .se_header = "Snoop", 1740 .se_cmp = sort__snoop_cmp, 1741 .se_snprintf = hist_entry__snoop_snprintf, 1742 .se_width_idx = HISTC_MEM_SNOOP, 1743 }; 1744 1745 struct sort_entry sort_mem_dcacheline = { 1746 .se_header = "Data Cacheline", 1747 .se_cmp = sort__dcacheline_cmp, 1748 .se_snprintf = hist_entry__dcacheline_snprintf, 1749 .se_width_idx = HISTC_MEM_DCACHELINE, 1750 }; 1751 1752 static int64_t 1753 sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right) 1754 { 1755 union perf_mem_data_src data_src_l; 1756 union perf_mem_data_src data_src_r; 1757 1758 if (left->mem_info) 1759 data_src_l = left->mem_info->data_src; 1760 else 1761 data_src_l.mem_blk = PERF_MEM_BLK_NA; 1762 1763 if (right->mem_info) 1764 data_src_r = right->mem_info->data_src; 1765 else 1766 data_src_r.mem_blk = PERF_MEM_BLK_NA; 1767 1768 return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk); 1769 } 1770 1771 static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf, 1772 size_t size, unsigned int width) 1773 { 1774 char out[16]; 1775 1776 perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info); 1777 return repsep_snprintf(bf, size, "%.*s", width, out); 1778 } 1779 1780 struct sort_entry sort_mem_blocked = { 1781 .se_header = "Blocked", 1782 .se_cmp = sort__blocked_cmp, 1783 .se_snprintf = hist_entry__blocked_snprintf, 1784 .se_width_idx = HISTC_MEM_BLOCKED, 1785 }; 1786 1787 static int64_t 1788 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1789 { 1790 uint64_t l = 0, r = 0; 1791 1792 if (left->mem_info) 1793 l = left->mem_info->daddr.phys_addr; 1794 if (right->mem_info) 1795 r = right->mem_info->daddr.phys_addr; 1796 1797 return (int64_t)(r - l); 1798 } 1799 1800 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf, 1801 size_t size, unsigned int width) 1802 { 1803 uint64_t addr = 0; 1804 size_t ret = 0; 1805 size_t len = BITS_PER_LONG / 4; 1806 1807 addr = he->mem_info->daddr.phys_addr; 1808 1809 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level); 1810 1811 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr); 1812 1813 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, ""); 1814 1815 if (ret > width) 1816 bf[width] = '\0'; 1817 1818 return width; 1819 } 1820 1821 struct sort_entry sort_mem_phys_daddr = { 1822 .se_header = "Data Physical Address", 1823 .se_cmp = sort__phys_daddr_cmp, 1824 .se_snprintf = hist_entry__phys_daddr_snprintf, 1825 .se_width_idx = HISTC_MEM_PHYS_DADDR, 1826 }; 1827 1828 static int64_t 1829 sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right) 1830 { 1831 uint64_t l = 0, r = 0; 1832 1833 if (left->mem_info) 1834 l = left->mem_info->daddr.data_page_size; 1835 if (right->mem_info) 1836 r = right->mem_info->daddr.data_page_size; 1837 1838 return (int64_t)(r - l); 1839 } 1840 1841 static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf, 1842 size_t size, unsigned int width) 1843 { 1844 char str[PAGE_SIZE_NAME_LEN]; 1845 1846 return repsep_snprintf(bf, size, "%-*s", width, 1847 get_page_size_name(he->mem_info->daddr.data_page_size, str)); 1848 } 1849 1850 struct sort_entry sort_mem_data_page_size = { 1851 .se_header = "Data Page Size", 1852 .se_cmp = sort__data_page_size_cmp, 1853 .se_snprintf = hist_entry__data_page_size_snprintf, 1854 .se_width_idx = HISTC_MEM_DATA_PAGE_SIZE, 1855 }; 1856 1857 static int64_t 1858 sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right) 1859 { 1860 uint64_t l = left->code_page_size; 1861 uint64_t r = right->code_page_size; 1862 1863 return (int64_t)(r - l); 1864 } 1865 1866 static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf, 1867 size_t size, unsigned int width) 1868 { 1869 char str[PAGE_SIZE_NAME_LEN]; 1870 1871 return repsep_snprintf(bf, size, "%-*s", width, 1872 get_page_size_name(he->code_page_size, str)); 1873 } 1874 1875 struct sort_entry sort_code_page_size = { 1876 .se_header = "Code Page Size", 1877 .se_cmp = sort__code_page_size_cmp, 1878 .se_snprintf = hist_entry__code_page_size_snprintf, 1879 .se_width_idx = HISTC_CODE_PAGE_SIZE, 1880 }; 1881 1882 static int64_t 1883 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1884 { 1885 if (!left->branch_info || !right->branch_info) 1886 return cmp_null(left->branch_info, right->branch_info); 1887 1888 return left->branch_info->flags.abort != 1889 right->branch_info->flags.abort; 1890 } 1891 1892 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1893 size_t size, unsigned int width) 1894 { 1895 static const char *out = "N/A"; 1896 1897 if (he->branch_info) { 1898 if (he->branch_info->flags.abort) 1899 out = "A"; 1900 else 1901 out = "."; 1902 } 1903 1904 return repsep_snprintf(bf, size, "%-*s", width, out); 1905 } 1906 1907 struct sort_entry sort_abort = { 1908 .se_header = "Transaction abort", 1909 .se_cmp = sort__abort_cmp, 1910 .se_snprintf = hist_entry__abort_snprintf, 1911 .se_width_idx = HISTC_ABORT, 1912 }; 1913 1914 static int64_t 1915 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1916 { 1917 if (!left->branch_info || !right->branch_info) 1918 return cmp_null(left->branch_info, right->branch_info); 1919 1920 return left->branch_info->flags.in_tx != 1921 right->branch_info->flags.in_tx; 1922 } 1923 1924 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1925 size_t size, unsigned int width) 1926 { 1927 static const char *out = "N/A"; 1928 1929 if (he->branch_info) { 1930 if (he->branch_info->flags.in_tx) 1931 out = "T"; 1932 else 1933 out = "."; 1934 } 1935 1936 return repsep_snprintf(bf, size, "%-*s", width, out); 1937 } 1938 1939 struct sort_entry sort_in_tx = { 1940 .se_header = "Branch in transaction", 1941 .se_cmp = sort__in_tx_cmp, 1942 .se_snprintf = hist_entry__in_tx_snprintf, 1943 .se_width_idx = HISTC_IN_TX, 1944 }; 1945 1946 static int64_t 1947 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1948 { 1949 return left->transaction - right->transaction; 1950 } 1951 1952 static inline char *add_str(char *p, const char *str) 1953 { 1954 strcpy(p, str); 1955 return p + strlen(str); 1956 } 1957 1958 static struct txbit { 1959 unsigned flag; 1960 const char *name; 1961 int skip_for_len; 1962 } txbits[] = { 1963 { PERF_TXN_ELISION, "EL ", 0 }, 1964 { PERF_TXN_TRANSACTION, "TX ", 1 }, 1965 { PERF_TXN_SYNC, "SYNC ", 1 }, 1966 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 1967 { PERF_TXN_RETRY, "RETRY ", 0 }, 1968 { PERF_TXN_CONFLICT, "CON ", 0 }, 1969 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 1970 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 1971 { 0, NULL, 0 } 1972 }; 1973 1974 int hist_entry__transaction_len(void) 1975 { 1976 int i; 1977 int len = 0; 1978 1979 for (i = 0; txbits[i].name; i++) { 1980 if (!txbits[i].skip_for_len) 1981 len += strlen(txbits[i].name); 1982 } 1983 len += 4; /* :XX<space> */ 1984 return len; 1985 } 1986 1987 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 1988 size_t size, unsigned int width) 1989 { 1990 u64 t = he->transaction; 1991 char buf[128]; 1992 char *p = buf; 1993 int i; 1994 1995 buf[0] = 0; 1996 for (i = 0; txbits[i].name; i++) 1997 if (txbits[i].flag & t) 1998 p = add_str(p, txbits[i].name); 1999 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 2000 p = add_str(p, "NEITHER "); 2001 if (t & PERF_TXN_ABORT_MASK) { 2002 sprintf(p, ":%" PRIx64, 2003 (t & PERF_TXN_ABORT_MASK) >> 2004 PERF_TXN_ABORT_SHIFT); 2005 p += strlen(p); 2006 } 2007 2008 return repsep_snprintf(bf, size, "%-*s", width, buf); 2009 } 2010 2011 struct sort_entry sort_transaction = { 2012 .se_header = "Transaction ", 2013 .se_cmp = sort__transaction_cmp, 2014 .se_snprintf = hist_entry__transaction_snprintf, 2015 .se_width_idx = HISTC_TRANSACTION, 2016 }; 2017 2018 /* --sort symbol_size */ 2019 2020 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r) 2021 { 2022 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0; 2023 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0; 2024 2025 return size_l < size_r ? -1 : 2026 size_l == size_r ? 0 : 1; 2027 } 2028 2029 static int64_t 2030 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right) 2031 { 2032 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym); 2033 } 2034 2035 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf, 2036 size_t bf_size, unsigned int width) 2037 { 2038 if (sym) 2039 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym)); 2040 2041 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 2042 } 2043 2044 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf, 2045 size_t size, unsigned int width) 2046 { 2047 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width); 2048 } 2049 2050 struct sort_entry sort_sym_size = { 2051 .se_header = "Symbol size", 2052 .se_cmp = sort__sym_size_cmp, 2053 .se_snprintf = hist_entry__sym_size_snprintf, 2054 .se_width_idx = HISTC_SYM_SIZE, 2055 }; 2056 2057 /* --sort dso_size */ 2058 2059 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r) 2060 { 2061 int64_t size_l = map_l != NULL ? map__size(map_l) : 0; 2062 int64_t size_r = map_r != NULL ? map__size(map_r) : 0; 2063 2064 return size_l < size_r ? -1 : 2065 size_l == size_r ? 0 : 1; 2066 } 2067 2068 static int64_t 2069 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right) 2070 { 2071 return _sort__dso_size_cmp(right->ms.map, left->ms.map); 2072 } 2073 2074 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf, 2075 size_t bf_size, unsigned int width) 2076 { 2077 if (map && map__dso(map)) 2078 return repsep_snprintf(bf, bf_size, "%*d", width, map__size(map)); 2079 2080 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 2081 } 2082 2083 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf, 2084 size_t size, unsigned int width) 2085 { 2086 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width); 2087 } 2088 2089 struct sort_entry sort_dso_size = { 2090 .se_header = "DSO size", 2091 .se_cmp = sort__dso_size_cmp, 2092 .se_snprintf = hist_entry__dso_size_snprintf, 2093 .se_width_idx = HISTC_DSO_SIZE, 2094 }; 2095 2096 /* --sort dso_size */ 2097 2098 static int64_t 2099 sort__addr_cmp(struct hist_entry *left, struct hist_entry *right) 2100 { 2101 u64 left_ip = left->ip; 2102 u64 right_ip = right->ip; 2103 struct map *left_map = left->ms.map; 2104 struct map *right_map = right->ms.map; 2105 2106 if (left_map) 2107 left_ip = left_map->unmap_ip(left_map, left_ip); 2108 if (right_map) 2109 right_ip = right_map->unmap_ip(right_map, right_ip); 2110 2111 return _sort__addr_cmp(left_ip, right_ip); 2112 } 2113 2114 static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf, 2115 size_t size, unsigned int width) 2116 { 2117 u64 ip = he->ip; 2118 struct map *map = he->ms.map; 2119 2120 if (map) 2121 ip = map->unmap_ip(map, ip); 2122 2123 return repsep_snprintf(bf, size, "%-#*llx", width, ip); 2124 } 2125 2126 struct sort_entry sort_addr = { 2127 .se_header = "Address", 2128 .se_cmp = sort__addr_cmp, 2129 .se_snprintf = hist_entry__addr_snprintf, 2130 .se_width_idx = HISTC_ADDR, 2131 }; 2132 2133 2134 struct sort_dimension { 2135 const char *name; 2136 struct sort_entry *entry; 2137 int taken; 2138 }; 2139 2140 int __weak arch_support_sort_key(const char *sort_key __maybe_unused) 2141 { 2142 return 0; 2143 } 2144 2145 const char * __weak arch_perf_header_entry(const char *se_header) 2146 { 2147 return se_header; 2148 } 2149 2150 static void sort_dimension_add_dynamic_header(struct sort_dimension *sd) 2151 { 2152 sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header); 2153 } 2154 2155 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 2156 2157 static struct sort_dimension common_sort_dimensions[] = { 2158 DIM(SORT_PID, "pid", sort_thread), 2159 DIM(SORT_COMM, "comm", sort_comm), 2160 DIM(SORT_DSO, "dso", sort_dso), 2161 DIM(SORT_SYM, "symbol", sort_sym), 2162 DIM(SORT_PARENT, "parent", sort_parent), 2163 DIM(SORT_CPU, "cpu", sort_cpu), 2164 DIM(SORT_SOCKET, "socket", sort_socket), 2165 DIM(SORT_SRCLINE, "srcline", sort_srcline), 2166 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 2167 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 2168 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 2169 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 2170 #ifdef HAVE_LIBTRACEEVENT 2171 DIM(SORT_TRACE, "trace", sort_trace), 2172 #endif 2173 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 2174 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size), 2175 DIM(SORT_CGROUP, "cgroup", sort_cgroup), 2176 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 2177 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null), 2178 DIM(SORT_TIME, "time", sort_time), 2179 DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size), 2180 DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat), 2181 DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat), 2182 DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc), 2183 DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc), 2184 DIM(SORT_ADDR, "addr", sort_addr), 2185 DIM(SORT_LOCAL_RETIRE_LAT, "local_retire_lat", sort_local_p_stage_cyc), 2186 DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc), 2187 DIM(SORT_SIMD, "simd", sort_simd) 2188 }; 2189 2190 #undef DIM 2191 2192 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 2193 2194 static struct sort_dimension bstack_sort_dimensions[] = { 2195 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 2196 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 2197 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 2198 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 2199 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 2200 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 2201 DIM(SORT_ABORT, "abort", sort_abort), 2202 DIM(SORT_CYCLES, "cycles", sort_cycles), 2203 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 2204 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 2205 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc), 2206 DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from), 2207 DIM(SORT_ADDR_TO, "addr_to", sort_addr_to), 2208 }; 2209 2210 #undef DIM 2211 2212 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 2213 2214 static struct sort_dimension memory_sort_dimensions[] = { 2215 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 2216 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 2217 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 2218 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 2219 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 2220 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 2221 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 2222 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 2223 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr), 2224 DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size), 2225 DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked), 2226 }; 2227 2228 #undef DIM 2229 2230 struct hpp_dimension { 2231 const char *name; 2232 struct perf_hpp_fmt *fmt; 2233 int taken; 2234 }; 2235 2236 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 2237 2238 static struct hpp_dimension hpp_sort_dimensions[] = { 2239 DIM(PERF_HPP__OVERHEAD, "overhead"), 2240 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 2241 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 2242 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 2243 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 2244 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 2245 DIM(PERF_HPP__SAMPLES, "sample"), 2246 DIM(PERF_HPP__PERIOD, "period"), 2247 }; 2248 2249 #undef DIM 2250 2251 struct hpp_sort_entry { 2252 struct perf_hpp_fmt hpp; 2253 struct sort_entry *se; 2254 }; 2255 2256 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 2257 { 2258 struct hpp_sort_entry *hse; 2259 2260 if (!perf_hpp__is_sort_entry(fmt)) 2261 return; 2262 2263 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2264 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 2265 } 2266 2267 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2268 struct hists *hists, int line __maybe_unused, 2269 int *span __maybe_unused) 2270 { 2271 struct hpp_sort_entry *hse; 2272 size_t len = fmt->user_len; 2273 2274 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2275 2276 if (!len) 2277 len = hists__col_len(hists, hse->se->se_width_idx); 2278 2279 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 2280 } 2281 2282 static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 2283 struct perf_hpp *hpp __maybe_unused, 2284 struct hists *hists) 2285 { 2286 struct hpp_sort_entry *hse; 2287 size_t len = fmt->user_len; 2288 2289 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2290 2291 if (!len) 2292 len = hists__col_len(hists, hse->se->se_width_idx); 2293 2294 return len; 2295 } 2296 2297 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2298 struct hist_entry *he) 2299 { 2300 struct hpp_sort_entry *hse; 2301 size_t len = fmt->user_len; 2302 2303 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2304 2305 if (!len) 2306 len = hists__col_len(he->hists, hse->se->se_width_idx); 2307 2308 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 2309 } 2310 2311 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 2312 struct hist_entry *a, struct hist_entry *b) 2313 { 2314 struct hpp_sort_entry *hse; 2315 2316 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2317 return hse->se->se_cmp(a, b); 2318 } 2319 2320 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 2321 struct hist_entry *a, struct hist_entry *b) 2322 { 2323 struct hpp_sort_entry *hse; 2324 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 2325 2326 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2327 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 2328 return collapse_fn(a, b); 2329 } 2330 2331 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 2332 struct hist_entry *a, struct hist_entry *b) 2333 { 2334 struct hpp_sort_entry *hse; 2335 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 2336 2337 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2338 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 2339 return sort_fn(a, b); 2340 } 2341 2342 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 2343 { 2344 return format->header == __sort__hpp_header; 2345 } 2346 2347 #define MK_SORT_ENTRY_CHK(key) \ 2348 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 2349 { \ 2350 struct hpp_sort_entry *hse; \ 2351 \ 2352 if (!perf_hpp__is_sort_entry(fmt)) \ 2353 return false; \ 2354 \ 2355 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 2356 return hse->se == &sort_ ## key ; \ 2357 } 2358 2359 #ifdef HAVE_LIBTRACEEVENT 2360 MK_SORT_ENTRY_CHK(trace) 2361 #else 2362 bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt __maybe_unused) 2363 { 2364 return false; 2365 } 2366 #endif 2367 MK_SORT_ENTRY_CHK(srcline) 2368 MK_SORT_ENTRY_CHK(srcfile) 2369 MK_SORT_ENTRY_CHK(thread) 2370 MK_SORT_ENTRY_CHK(comm) 2371 MK_SORT_ENTRY_CHK(dso) 2372 MK_SORT_ENTRY_CHK(sym) 2373 2374 2375 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2376 { 2377 struct hpp_sort_entry *hse_a; 2378 struct hpp_sort_entry *hse_b; 2379 2380 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 2381 return false; 2382 2383 hse_a = container_of(a, struct hpp_sort_entry, hpp); 2384 hse_b = container_of(b, struct hpp_sort_entry, hpp); 2385 2386 return hse_a->se == hse_b->se; 2387 } 2388 2389 static void hse_free(struct perf_hpp_fmt *fmt) 2390 { 2391 struct hpp_sort_entry *hse; 2392 2393 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2394 free(hse); 2395 } 2396 2397 static void hse_init(struct perf_hpp_fmt *fmt, struct hist_entry *he) 2398 { 2399 struct hpp_sort_entry *hse; 2400 2401 if (!perf_hpp__is_sort_entry(fmt)) 2402 return; 2403 2404 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2405 2406 if (hse->se->se_init) 2407 hse->se->se_init(he); 2408 } 2409 2410 static struct hpp_sort_entry * 2411 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 2412 { 2413 struct hpp_sort_entry *hse; 2414 2415 hse = malloc(sizeof(*hse)); 2416 if (hse == NULL) { 2417 pr_err("Memory allocation failed\n"); 2418 return NULL; 2419 } 2420 2421 hse->se = sd->entry; 2422 hse->hpp.name = sd->entry->se_header; 2423 hse->hpp.header = __sort__hpp_header; 2424 hse->hpp.width = __sort__hpp_width; 2425 hse->hpp.entry = __sort__hpp_entry; 2426 hse->hpp.color = NULL; 2427 2428 hse->hpp.cmp = __sort__hpp_cmp; 2429 hse->hpp.collapse = __sort__hpp_collapse; 2430 hse->hpp.sort = __sort__hpp_sort; 2431 hse->hpp.equal = __sort__hpp_equal; 2432 hse->hpp.free = hse_free; 2433 hse->hpp.init = hse_init; 2434 2435 INIT_LIST_HEAD(&hse->hpp.list); 2436 INIT_LIST_HEAD(&hse->hpp.sort_list); 2437 hse->hpp.elide = false; 2438 hse->hpp.len = 0; 2439 hse->hpp.user_len = 0; 2440 hse->hpp.level = level; 2441 2442 return hse; 2443 } 2444 2445 static void hpp_free(struct perf_hpp_fmt *fmt) 2446 { 2447 free(fmt); 2448 } 2449 2450 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 2451 int level) 2452 { 2453 struct perf_hpp_fmt *fmt; 2454 2455 fmt = memdup(hd->fmt, sizeof(*fmt)); 2456 if (fmt) { 2457 INIT_LIST_HEAD(&fmt->list); 2458 INIT_LIST_HEAD(&fmt->sort_list); 2459 fmt->free = hpp_free; 2460 fmt->level = level; 2461 } 2462 2463 return fmt; 2464 } 2465 2466 int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 2467 { 2468 struct perf_hpp_fmt *fmt; 2469 struct hpp_sort_entry *hse; 2470 int ret = -1; 2471 int r; 2472 2473 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 2474 if (!perf_hpp__is_sort_entry(fmt)) 2475 continue; 2476 2477 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2478 if (hse->se->se_filter == NULL) 2479 continue; 2480 2481 /* 2482 * hist entry is filtered if any of sort key in the hpp list 2483 * is applied. But it should skip non-matched filter types. 2484 */ 2485 r = hse->se->se_filter(he, type, arg); 2486 if (r >= 0) { 2487 if (ret < 0) 2488 ret = 0; 2489 ret |= r; 2490 } 2491 } 2492 2493 return ret; 2494 } 2495 2496 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 2497 struct perf_hpp_list *list, 2498 int level) 2499 { 2500 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 2501 2502 if (hse == NULL) 2503 return -1; 2504 2505 perf_hpp_list__register_sort_field(list, &hse->hpp); 2506 return 0; 2507 } 2508 2509 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 2510 struct perf_hpp_list *list) 2511 { 2512 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 2513 2514 if (hse == NULL) 2515 return -1; 2516 2517 perf_hpp_list__column_register(list, &hse->hpp); 2518 return 0; 2519 } 2520 2521 #ifndef HAVE_LIBTRACEEVENT 2522 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused) 2523 { 2524 return false; 2525 } 2526 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused, 2527 struct hists *hists __maybe_unused) 2528 { 2529 return false; 2530 } 2531 #else 2532 struct hpp_dynamic_entry { 2533 struct perf_hpp_fmt hpp; 2534 struct evsel *evsel; 2535 struct tep_format_field *field; 2536 unsigned dynamic_len; 2537 bool raw_trace; 2538 }; 2539 2540 static int hde_width(struct hpp_dynamic_entry *hde) 2541 { 2542 if (!hde->hpp.len) { 2543 int len = hde->dynamic_len; 2544 int namelen = strlen(hde->field->name); 2545 int fieldlen = hde->field->size; 2546 2547 if (namelen > len) 2548 len = namelen; 2549 2550 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) { 2551 /* length for print hex numbers */ 2552 fieldlen = hde->field->size * 2 + 2; 2553 } 2554 if (fieldlen > len) 2555 len = fieldlen; 2556 2557 hde->hpp.len = len; 2558 } 2559 return hde->hpp.len; 2560 } 2561 2562 static void update_dynamic_len(struct hpp_dynamic_entry *hde, 2563 struct hist_entry *he) 2564 { 2565 char *str, *pos; 2566 struct tep_format_field *field = hde->field; 2567 size_t namelen; 2568 bool last = false; 2569 2570 if (hde->raw_trace) 2571 return; 2572 2573 /* parse pretty print result and update max length */ 2574 if (!he->trace_output) 2575 he->trace_output = get_trace_output(he); 2576 2577 namelen = strlen(field->name); 2578 str = he->trace_output; 2579 2580 while (str) { 2581 pos = strchr(str, ' '); 2582 if (pos == NULL) { 2583 last = true; 2584 pos = str + strlen(str); 2585 } 2586 2587 if (!strncmp(str, field->name, namelen)) { 2588 size_t len; 2589 2590 str += namelen + 1; 2591 len = pos - str; 2592 2593 if (len > hde->dynamic_len) 2594 hde->dynamic_len = len; 2595 break; 2596 } 2597 2598 if (last) 2599 str = NULL; 2600 else 2601 str = pos + 1; 2602 } 2603 } 2604 2605 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2606 struct hists *hists __maybe_unused, 2607 int line __maybe_unused, 2608 int *span __maybe_unused) 2609 { 2610 struct hpp_dynamic_entry *hde; 2611 size_t len = fmt->user_len; 2612 2613 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2614 2615 if (!len) 2616 len = hde_width(hde); 2617 2618 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 2619 } 2620 2621 static int __sort__hde_width(struct perf_hpp_fmt *fmt, 2622 struct perf_hpp *hpp __maybe_unused, 2623 struct hists *hists __maybe_unused) 2624 { 2625 struct hpp_dynamic_entry *hde; 2626 size_t len = fmt->user_len; 2627 2628 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2629 2630 if (!len) 2631 len = hde_width(hde); 2632 2633 return len; 2634 } 2635 2636 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 2637 { 2638 struct hpp_dynamic_entry *hde; 2639 2640 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2641 2642 return hists_to_evsel(hists) == hde->evsel; 2643 } 2644 2645 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2646 struct hist_entry *he) 2647 { 2648 struct hpp_dynamic_entry *hde; 2649 size_t len = fmt->user_len; 2650 char *str, *pos; 2651 struct tep_format_field *field; 2652 size_t namelen; 2653 bool last = false; 2654 int ret; 2655 2656 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2657 2658 if (!len) 2659 len = hde_width(hde); 2660 2661 if (hde->raw_trace) 2662 goto raw_field; 2663 2664 if (!he->trace_output) 2665 he->trace_output = get_trace_output(he); 2666 2667 field = hde->field; 2668 namelen = strlen(field->name); 2669 str = he->trace_output; 2670 2671 while (str) { 2672 pos = strchr(str, ' '); 2673 if (pos == NULL) { 2674 last = true; 2675 pos = str + strlen(str); 2676 } 2677 2678 if (!strncmp(str, field->name, namelen)) { 2679 str += namelen + 1; 2680 str = strndup(str, pos - str); 2681 2682 if (str == NULL) 2683 return scnprintf(hpp->buf, hpp->size, 2684 "%*.*s", len, len, "ERROR"); 2685 break; 2686 } 2687 2688 if (last) 2689 str = NULL; 2690 else 2691 str = pos + 1; 2692 } 2693 2694 if (str == NULL) { 2695 struct trace_seq seq; 2696 raw_field: 2697 trace_seq_init(&seq); 2698 tep_print_field(&seq, he->raw_data, hde->field); 2699 str = seq.buffer; 2700 } 2701 2702 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 2703 free(str); 2704 return ret; 2705 } 2706 2707 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 2708 struct hist_entry *a, struct hist_entry *b) 2709 { 2710 struct hpp_dynamic_entry *hde; 2711 struct tep_format_field *field; 2712 unsigned offset, size; 2713 2714 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2715 2716 field = hde->field; 2717 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2718 unsigned long long dyn; 2719 2720 tep_read_number_field(field, a->raw_data, &dyn); 2721 offset = dyn & 0xffff; 2722 size = (dyn >> 16) & 0xffff; 2723 if (tep_field_is_relative(field->flags)) 2724 offset += field->offset + field->size; 2725 /* record max width for output */ 2726 if (size > hde->dynamic_len) 2727 hde->dynamic_len = size; 2728 } else { 2729 offset = field->offset; 2730 size = field->size; 2731 } 2732 2733 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 2734 } 2735 2736 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 2737 { 2738 return fmt->cmp == __sort__hde_cmp; 2739 } 2740 2741 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2742 { 2743 struct hpp_dynamic_entry *hde_a; 2744 struct hpp_dynamic_entry *hde_b; 2745 2746 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 2747 return false; 2748 2749 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 2750 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 2751 2752 return hde_a->field == hde_b->field; 2753 } 2754 2755 static void hde_free(struct perf_hpp_fmt *fmt) 2756 { 2757 struct hpp_dynamic_entry *hde; 2758 2759 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2760 free(hde); 2761 } 2762 2763 static void __sort__hde_init(struct perf_hpp_fmt *fmt, struct hist_entry *he) 2764 { 2765 struct hpp_dynamic_entry *hde; 2766 2767 if (!perf_hpp__is_dynamic_entry(fmt)) 2768 return; 2769 2770 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2771 update_dynamic_len(hde, he); 2772 } 2773 2774 static struct hpp_dynamic_entry * 2775 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field, 2776 int level) 2777 { 2778 struct hpp_dynamic_entry *hde; 2779 2780 hde = malloc(sizeof(*hde)); 2781 if (hde == NULL) { 2782 pr_debug("Memory allocation failed\n"); 2783 return NULL; 2784 } 2785 2786 hde->evsel = evsel; 2787 hde->field = field; 2788 hde->dynamic_len = 0; 2789 2790 hde->hpp.name = field->name; 2791 hde->hpp.header = __sort__hde_header; 2792 hde->hpp.width = __sort__hde_width; 2793 hde->hpp.entry = __sort__hde_entry; 2794 hde->hpp.color = NULL; 2795 2796 hde->hpp.init = __sort__hde_init; 2797 hde->hpp.cmp = __sort__hde_cmp; 2798 hde->hpp.collapse = __sort__hde_cmp; 2799 hde->hpp.sort = __sort__hde_cmp; 2800 hde->hpp.equal = __sort__hde_equal; 2801 hde->hpp.free = hde_free; 2802 2803 INIT_LIST_HEAD(&hde->hpp.list); 2804 INIT_LIST_HEAD(&hde->hpp.sort_list); 2805 hde->hpp.elide = false; 2806 hde->hpp.len = 0; 2807 hde->hpp.user_len = 0; 2808 hde->hpp.level = level; 2809 2810 return hde; 2811 } 2812 #endif /* HAVE_LIBTRACEEVENT */ 2813 2814 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 2815 { 2816 struct perf_hpp_fmt *new_fmt = NULL; 2817 2818 if (perf_hpp__is_sort_entry(fmt)) { 2819 struct hpp_sort_entry *hse, *new_hse; 2820 2821 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2822 new_hse = memdup(hse, sizeof(*hse)); 2823 if (new_hse) 2824 new_fmt = &new_hse->hpp; 2825 #ifdef HAVE_LIBTRACEEVENT 2826 } else if (perf_hpp__is_dynamic_entry(fmt)) { 2827 struct hpp_dynamic_entry *hde, *new_hde; 2828 2829 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2830 new_hde = memdup(hde, sizeof(*hde)); 2831 if (new_hde) 2832 new_fmt = &new_hde->hpp; 2833 #endif 2834 } else { 2835 new_fmt = memdup(fmt, sizeof(*fmt)); 2836 } 2837 2838 INIT_LIST_HEAD(&new_fmt->list); 2839 INIT_LIST_HEAD(&new_fmt->sort_list); 2840 2841 return new_fmt; 2842 } 2843 2844 static int parse_field_name(char *str, char **event, char **field, char **opt) 2845 { 2846 char *event_name, *field_name, *opt_name; 2847 2848 event_name = str; 2849 field_name = strchr(str, '.'); 2850 2851 if (field_name) { 2852 *field_name++ = '\0'; 2853 } else { 2854 event_name = NULL; 2855 field_name = str; 2856 } 2857 2858 opt_name = strchr(field_name, '/'); 2859 if (opt_name) 2860 *opt_name++ = '\0'; 2861 2862 *event = event_name; 2863 *field = field_name; 2864 *opt = opt_name; 2865 2866 return 0; 2867 } 2868 2869 /* find match evsel using a given event name. The event name can be: 2870 * 1. '%' + event index (e.g. '%1' for first event) 2871 * 2. full event name (e.g. sched:sched_switch) 2872 * 3. partial event name (should not contain ':') 2873 */ 2874 static struct evsel *find_evsel(struct evlist *evlist, char *event_name) 2875 { 2876 struct evsel *evsel = NULL; 2877 struct evsel *pos; 2878 bool full_name; 2879 2880 /* case 1 */ 2881 if (event_name[0] == '%') { 2882 int nr = strtol(event_name+1, NULL, 0); 2883 2884 if (nr > evlist->core.nr_entries) 2885 return NULL; 2886 2887 evsel = evlist__first(evlist); 2888 while (--nr > 0) 2889 evsel = evsel__next(evsel); 2890 2891 return evsel; 2892 } 2893 2894 full_name = !!strchr(event_name, ':'); 2895 evlist__for_each_entry(evlist, pos) { 2896 /* case 2 */ 2897 if (full_name && !strcmp(pos->name, event_name)) 2898 return pos; 2899 /* case 3 */ 2900 if (!full_name && strstr(pos->name, event_name)) { 2901 if (evsel) { 2902 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 2903 event_name, evsel->name, pos->name); 2904 return NULL; 2905 } 2906 evsel = pos; 2907 } 2908 } 2909 2910 return evsel; 2911 } 2912 2913 #ifdef HAVE_LIBTRACEEVENT 2914 static int __dynamic_dimension__add(struct evsel *evsel, 2915 struct tep_format_field *field, 2916 bool raw_trace, int level) 2917 { 2918 struct hpp_dynamic_entry *hde; 2919 2920 hde = __alloc_dynamic_entry(evsel, field, level); 2921 if (hde == NULL) 2922 return -ENOMEM; 2923 2924 hde->raw_trace = raw_trace; 2925 2926 perf_hpp__register_sort_field(&hde->hpp); 2927 return 0; 2928 } 2929 2930 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level) 2931 { 2932 int ret; 2933 struct tep_format_field *field; 2934 2935 field = evsel->tp_format->format.fields; 2936 while (field) { 2937 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2938 if (ret < 0) 2939 return ret; 2940 2941 field = field->next; 2942 } 2943 return 0; 2944 } 2945 2946 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace, 2947 int level) 2948 { 2949 int ret; 2950 struct evsel *evsel; 2951 2952 evlist__for_each_entry(evlist, evsel) { 2953 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 2954 continue; 2955 2956 ret = add_evsel_fields(evsel, raw_trace, level); 2957 if (ret < 0) 2958 return ret; 2959 } 2960 return 0; 2961 } 2962 2963 static int add_all_matching_fields(struct evlist *evlist, 2964 char *field_name, bool raw_trace, int level) 2965 { 2966 int ret = -ESRCH; 2967 struct evsel *evsel; 2968 struct tep_format_field *field; 2969 2970 evlist__for_each_entry(evlist, evsel) { 2971 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 2972 continue; 2973 2974 field = tep_find_any_field(evsel->tp_format, field_name); 2975 if (field == NULL) 2976 continue; 2977 2978 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2979 if (ret < 0) 2980 break; 2981 } 2982 return ret; 2983 } 2984 #endif /* HAVE_LIBTRACEEVENT */ 2985 2986 static int add_dynamic_entry(struct evlist *evlist, const char *tok, 2987 int level) 2988 { 2989 char *str, *event_name, *field_name, *opt_name; 2990 struct evsel *evsel; 2991 bool raw_trace = symbol_conf.raw_trace; 2992 int ret = 0; 2993 2994 if (evlist == NULL) 2995 return -ENOENT; 2996 2997 str = strdup(tok); 2998 if (str == NULL) 2999 return -ENOMEM; 3000 3001 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 3002 ret = -EINVAL; 3003 goto out; 3004 } 3005 3006 if (opt_name) { 3007 if (strcmp(opt_name, "raw")) { 3008 pr_debug("unsupported field option %s\n", opt_name); 3009 ret = -EINVAL; 3010 goto out; 3011 } 3012 raw_trace = true; 3013 } 3014 3015 #ifdef HAVE_LIBTRACEEVENT 3016 if (!strcmp(field_name, "trace_fields")) { 3017 ret = add_all_dynamic_fields(evlist, raw_trace, level); 3018 goto out; 3019 } 3020 3021 if (event_name == NULL) { 3022 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 3023 goto out; 3024 } 3025 #else 3026 evlist__for_each_entry(evlist, evsel) { 3027 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) { 3028 pr_err("%s %s", ret ? "," : "This perf binary isn't linked with libtraceevent, can't process", evsel__name(evsel)); 3029 ret = -ENOTSUP; 3030 } 3031 } 3032 3033 if (ret) { 3034 pr_err("\n"); 3035 goto out; 3036 } 3037 #endif 3038 3039 evsel = find_evsel(evlist, event_name); 3040 if (evsel == NULL) { 3041 pr_debug("Cannot find event: %s\n", event_name); 3042 ret = -ENOENT; 3043 goto out; 3044 } 3045 3046 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 3047 pr_debug("%s is not a tracepoint event\n", event_name); 3048 ret = -EINVAL; 3049 goto out; 3050 } 3051 3052 #ifdef HAVE_LIBTRACEEVENT 3053 if (!strcmp(field_name, "*")) { 3054 ret = add_evsel_fields(evsel, raw_trace, level); 3055 } else { 3056 struct tep_format_field *field = tep_find_any_field(evsel->tp_format, field_name); 3057 3058 if (field == NULL) { 3059 pr_debug("Cannot find event field for %s.%s\n", 3060 event_name, field_name); 3061 return -ENOENT; 3062 } 3063 3064 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 3065 } 3066 #else 3067 (void)level; 3068 (void)raw_trace; 3069 #endif /* HAVE_LIBTRACEEVENT */ 3070 3071 out: 3072 free(str); 3073 return ret; 3074 } 3075 3076 static int __sort_dimension__add(struct sort_dimension *sd, 3077 struct perf_hpp_list *list, 3078 int level) 3079 { 3080 if (sd->taken) 3081 return 0; 3082 3083 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 3084 return -1; 3085 3086 if (sd->entry->se_collapse) 3087 list->need_collapse = 1; 3088 3089 sd->taken = 1; 3090 3091 return 0; 3092 } 3093 3094 static int __hpp_dimension__add(struct hpp_dimension *hd, 3095 struct perf_hpp_list *list, 3096 int level) 3097 { 3098 struct perf_hpp_fmt *fmt; 3099 3100 if (hd->taken) 3101 return 0; 3102 3103 fmt = __hpp_dimension__alloc_hpp(hd, level); 3104 if (!fmt) 3105 return -1; 3106 3107 hd->taken = 1; 3108 perf_hpp_list__register_sort_field(list, fmt); 3109 return 0; 3110 } 3111 3112 static int __sort_dimension__add_output(struct perf_hpp_list *list, 3113 struct sort_dimension *sd) 3114 { 3115 if (sd->taken) 3116 return 0; 3117 3118 if (__sort_dimension__add_hpp_output(sd, list) < 0) 3119 return -1; 3120 3121 sd->taken = 1; 3122 return 0; 3123 } 3124 3125 static int __hpp_dimension__add_output(struct perf_hpp_list *list, 3126 struct hpp_dimension *hd) 3127 { 3128 struct perf_hpp_fmt *fmt; 3129 3130 if (hd->taken) 3131 return 0; 3132 3133 fmt = __hpp_dimension__alloc_hpp(hd, 0); 3134 if (!fmt) 3135 return -1; 3136 3137 hd->taken = 1; 3138 perf_hpp_list__column_register(list, fmt); 3139 return 0; 3140 } 3141 3142 int hpp_dimension__add_output(unsigned col) 3143 { 3144 BUG_ON(col >= PERF_HPP__MAX_INDEX); 3145 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 3146 } 3147 3148 int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 3149 struct evlist *evlist, 3150 int level) 3151 { 3152 unsigned int i, j; 3153 3154 /* 3155 * Check to see if there are any arch specific 3156 * sort dimensions not applicable for the current 3157 * architecture. If so, Skip that sort key since 3158 * we don't want to display it in the output fields. 3159 */ 3160 for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) { 3161 if (!strcmp(arch_specific_sort_keys[j], tok) && 3162 !arch_support_sort_key(tok)) { 3163 return 0; 3164 } 3165 } 3166 3167 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 3168 struct sort_dimension *sd = &common_sort_dimensions[i]; 3169 3170 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3171 continue; 3172 3173 for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) { 3174 if (sd->name && !strcmp(dynamic_headers[j], sd->name)) 3175 sort_dimension_add_dynamic_header(sd); 3176 } 3177 3178 if (sd->entry == &sort_parent) { 3179 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 3180 if (ret) { 3181 char err[BUFSIZ]; 3182 3183 regerror(ret, &parent_regex, err, sizeof(err)); 3184 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 3185 return -EINVAL; 3186 } 3187 list->parent = 1; 3188 } else if (sd->entry == &sort_sym) { 3189 list->sym = 1; 3190 /* 3191 * perf diff displays the performance difference amongst 3192 * two or more perf.data files. Those files could come 3193 * from different binaries. So we should not compare 3194 * their ips, but the name of symbol. 3195 */ 3196 if (sort__mode == SORT_MODE__DIFF) 3197 sd->entry->se_collapse = sort__sym_sort; 3198 3199 } else if (sd->entry == &sort_dso) { 3200 list->dso = 1; 3201 } else if (sd->entry == &sort_socket) { 3202 list->socket = 1; 3203 } else if (sd->entry == &sort_thread) { 3204 list->thread = 1; 3205 } else if (sd->entry == &sort_comm) { 3206 list->comm = 1; 3207 } 3208 3209 return __sort_dimension__add(sd, list, level); 3210 } 3211 3212 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 3213 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 3214 3215 if (strncasecmp(tok, hd->name, strlen(tok))) 3216 continue; 3217 3218 return __hpp_dimension__add(hd, list, level); 3219 } 3220 3221 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 3222 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 3223 3224 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3225 continue; 3226 3227 if (sort__mode != SORT_MODE__BRANCH) 3228 return -EINVAL; 3229 3230 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 3231 list->sym = 1; 3232 3233 __sort_dimension__add(sd, list, level); 3234 return 0; 3235 } 3236 3237 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 3238 struct sort_dimension *sd = &memory_sort_dimensions[i]; 3239 3240 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3241 continue; 3242 3243 if (sort__mode != SORT_MODE__MEMORY) 3244 return -EINVAL; 3245 3246 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0) 3247 return -EINVAL; 3248 3249 if (sd->entry == &sort_mem_daddr_sym) 3250 list->sym = 1; 3251 3252 __sort_dimension__add(sd, list, level); 3253 return 0; 3254 } 3255 3256 if (!add_dynamic_entry(evlist, tok, level)) 3257 return 0; 3258 3259 return -ESRCH; 3260 } 3261 3262 static int setup_sort_list(struct perf_hpp_list *list, char *str, 3263 struct evlist *evlist) 3264 { 3265 char *tmp, *tok; 3266 int ret = 0; 3267 int level = 0; 3268 int next_level = 1; 3269 bool in_group = false; 3270 3271 do { 3272 tok = str; 3273 tmp = strpbrk(str, "{}, "); 3274 if (tmp) { 3275 if (in_group) 3276 next_level = level; 3277 else 3278 next_level = level + 1; 3279 3280 if (*tmp == '{') 3281 in_group = true; 3282 else if (*tmp == '}') 3283 in_group = false; 3284 3285 *tmp = '\0'; 3286 str = tmp + 1; 3287 } 3288 3289 if (*tok) { 3290 ret = sort_dimension__add(list, tok, evlist, level); 3291 if (ret == -EINVAL) { 3292 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok))) 3293 ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 3294 else 3295 ui__error("Invalid --sort key: `%s'", tok); 3296 break; 3297 } else if (ret == -ESRCH) { 3298 ui__error("Unknown --sort key: `%s'", tok); 3299 break; 3300 } 3301 } 3302 3303 level = next_level; 3304 } while (tmp); 3305 3306 return ret; 3307 } 3308 3309 static const char *get_default_sort_order(struct evlist *evlist) 3310 { 3311 const char *default_sort_orders[] = { 3312 default_sort_order, 3313 default_branch_sort_order, 3314 default_mem_sort_order, 3315 default_top_sort_order, 3316 default_diff_sort_order, 3317 default_tracepoint_sort_order, 3318 }; 3319 bool use_trace = true; 3320 struct evsel *evsel; 3321 3322 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 3323 3324 if (evlist == NULL || evlist__empty(evlist)) 3325 goto out_no_evlist; 3326 3327 evlist__for_each_entry(evlist, evsel) { 3328 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 3329 use_trace = false; 3330 break; 3331 } 3332 } 3333 3334 if (use_trace) { 3335 sort__mode = SORT_MODE__TRACEPOINT; 3336 if (symbol_conf.raw_trace) 3337 return "trace_fields"; 3338 } 3339 out_no_evlist: 3340 return default_sort_orders[sort__mode]; 3341 } 3342 3343 static int setup_sort_order(struct evlist *evlist) 3344 { 3345 char *new_sort_order; 3346 3347 /* 3348 * Append '+'-prefixed sort order to the default sort 3349 * order string. 3350 */ 3351 if (!sort_order || is_strict_order(sort_order)) 3352 return 0; 3353 3354 if (sort_order[1] == '\0') { 3355 ui__error("Invalid --sort key: `+'"); 3356 return -EINVAL; 3357 } 3358 3359 /* 3360 * We allocate new sort_order string, but we never free it, 3361 * because it's checked over the rest of the code. 3362 */ 3363 if (asprintf(&new_sort_order, "%s,%s", 3364 get_default_sort_order(evlist), sort_order + 1) < 0) { 3365 pr_err("Not enough memory to set up --sort"); 3366 return -ENOMEM; 3367 } 3368 3369 sort_order = new_sort_order; 3370 return 0; 3371 } 3372 3373 /* 3374 * Adds 'pre,' prefix into 'str' is 'pre' is 3375 * not already part of 'str'. 3376 */ 3377 static char *prefix_if_not_in(const char *pre, char *str) 3378 { 3379 char *n; 3380 3381 if (!str || strstr(str, pre)) 3382 return str; 3383 3384 if (asprintf(&n, "%s,%s", pre, str) < 0) 3385 n = NULL; 3386 3387 free(str); 3388 return n; 3389 } 3390 3391 static char *setup_overhead(char *keys) 3392 { 3393 if (sort__mode == SORT_MODE__DIFF) 3394 return keys; 3395 3396 keys = prefix_if_not_in("overhead", keys); 3397 3398 if (symbol_conf.cumulate_callchain) 3399 keys = prefix_if_not_in("overhead_children", keys); 3400 3401 return keys; 3402 } 3403 3404 static int __setup_sorting(struct evlist *evlist) 3405 { 3406 char *str; 3407 const char *sort_keys; 3408 int ret = 0; 3409 3410 ret = setup_sort_order(evlist); 3411 if (ret) 3412 return ret; 3413 3414 sort_keys = sort_order; 3415 if (sort_keys == NULL) { 3416 if (is_strict_order(field_order)) { 3417 /* 3418 * If user specified field order but no sort order, 3419 * we'll honor it and not add default sort orders. 3420 */ 3421 return 0; 3422 } 3423 3424 sort_keys = get_default_sort_order(evlist); 3425 } 3426 3427 str = strdup(sort_keys); 3428 if (str == NULL) { 3429 pr_err("Not enough memory to setup sort keys"); 3430 return -ENOMEM; 3431 } 3432 3433 /* 3434 * Prepend overhead fields for backward compatibility. 3435 */ 3436 if (!is_strict_order(field_order)) { 3437 str = setup_overhead(str); 3438 if (str == NULL) { 3439 pr_err("Not enough memory to setup overhead keys"); 3440 return -ENOMEM; 3441 } 3442 } 3443 3444 ret = setup_sort_list(&perf_hpp_list, str, evlist); 3445 3446 free(str); 3447 return ret; 3448 } 3449 3450 void perf_hpp__set_elide(int idx, bool elide) 3451 { 3452 struct perf_hpp_fmt *fmt; 3453 struct hpp_sort_entry *hse; 3454 3455 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3456 if (!perf_hpp__is_sort_entry(fmt)) 3457 continue; 3458 3459 hse = container_of(fmt, struct hpp_sort_entry, hpp); 3460 if (hse->se->se_width_idx == idx) { 3461 fmt->elide = elide; 3462 break; 3463 } 3464 } 3465 } 3466 3467 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 3468 { 3469 if (list && strlist__nr_entries(list) == 1) { 3470 if (fp != NULL) 3471 fprintf(fp, "# %s: %s\n", list_name, 3472 strlist__entry(list, 0)->s); 3473 return true; 3474 } 3475 return false; 3476 } 3477 3478 static bool get_elide(int idx, FILE *output) 3479 { 3480 switch (idx) { 3481 case HISTC_SYMBOL: 3482 return __get_elide(symbol_conf.sym_list, "symbol", output); 3483 case HISTC_DSO: 3484 return __get_elide(symbol_conf.dso_list, "dso", output); 3485 case HISTC_COMM: 3486 return __get_elide(symbol_conf.comm_list, "comm", output); 3487 default: 3488 break; 3489 } 3490 3491 if (sort__mode != SORT_MODE__BRANCH) 3492 return false; 3493 3494 switch (idx) { 3495 case HISTC_SYMBOL_FROM: 3496 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 3497 case HISTC_SYMBOL_TO: 3498 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 3499 case HISTC_DSO_FROM: 3500 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 3501 case HISTC_DSO_TO: 3502 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 3503 case HISTC_ADDR_FROM: 3504 return __get_elide(symbol_conf.sym_from_list, "addr_from", output); 3505 case HISTC_ADDR_TO: 3506 return __get_elide(symbol_conf.sym_to_list, "addr_to", output); 3507 default: 3508 break; 3509 } 3510 3511 return false; 3512 } 3513 3514 void sort__setup_elide(FILE *output) 3515 { 3516 struct perf_hpp_fmt *fmt; 3517 struct hpp_sort_entry *hse; 3518 3519 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3520 if (!perf_hpp__is_sort_entry(fmt)) 3521 continue; 3522 3523 hse = container_of(fmt, struct hpp_sort_entry, hpp); 3524 fmt->elide = get_elide(hse->se->se_width_idx, output); 3525 } 3526 3527 /* 3528 * It makes no sense to elide all of sort entries. 3529 * Just revert them to show up again. 3530 */ 3531 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3532 if (!perf_hpp__is_sort_entry(fmt)) 3533 continue; 3534 3535 if (!fmt->elide) 3536 return; 3537 } 3538 3539 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3540 if (!perf_hpp__is_sort_entry(fmt)) 3541 continue; 3542 3543 fmt->elide = false; 3544 } 3545 } 3546 3547 int output_field_add(struct perf_hpp_list *list, char *tok) 3548 { 3549 unsigned int i; 3550 3551 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 3552 struct sort_dimension *sd = &common_sort_dimensions[i]; 3553 3554 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3555 continue; 3556 3557 return __sort_dimension__add_output(list, sd); 3558 } 3559 3560 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 3561 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 3562 3563 if (strncasecmp(tok, hd->name, strlen(tok))) 3564 continue; 3565 3566 return __hpp_dimension__add_output(list, hd); 3567 } 3568 3569 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 3570 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 3571 3572 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3573 continue; 3574 3575 if (sort__mode != SORT_MODE__BRANCH) 3576 return -EINVAL; 3577 3578 return __sort_dimension__add_output(list, sd); 3579 } 3580 3581 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 3582 struct sort_dimension *sd = &memory_sort_dimensions[i]; 3583 3584 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3585 continue; 3586 3587 if (sort__mode != SORT_MODE__MEMORY) 3588 return -EINVAL; 3589 3590 return __sort_dimension__add_output(list, sd); 3591 } 3592 3593 return -ESRCH; 3594 } 3595 3596 static int setup_output_list(struct perf_hpp_list *list, char *str) 3597 { 3598 char *tmp, *tok; 3599 int ret = 0; 3600 3601 for (tok = strtok_r(str, ", ", &tmp); 3602 tok; tok = strtok_r(NULL, ", ", &tmp)) { 3603 ret = output_field_add(list, tok); 3604 if (ret == -EINVAL) { 3605 ui__error("Invalid --fields key: `%s'", tok); 3606 break; 3607 } else if (ret == -ESRCH) { 3608 ui__error("Unknown --fields key: `%s'", tok); 3609 break; 3610 } 3611 } 3612 3613 return ret; 3614 } 3615 3616 void reset_dimensions(void) 3617 { 3618 unsigned int i; 3619 3620 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 3621 common_sort_dimensions[i].taken = 0; 3622 3623 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 3624 hpp_sort_dimensions[i].taken = 0; 3625 3626 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 3627 bstack_sort_dimensions[i].taken = 0; 3628 3629 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 3630 memory_sort_dimensions[i].taken = 0; 3631 } 3632 3633 bool is_strict_order(const char *order) 3634 { 3635 return order && (*order != '+'); 3636 } 3637 3638 static int __setup_output_field(void) 3639 { 3640 char *str, *strp; 3641 int ret = -EINVAL; 3642 3643 if (field_order == NULL) 3644 return 0; 3645 3646 strp = str = strdup(field_order); 3647 if (str == NULL) { 3648 pr_err("Not enough memory to setup output fields"); 3649 return -ENOMEM; 3650 } 3651 3652 if (!is_strict_order(field_order)) 3653 strp++; 3654 3655 if (!strlen(strp)) { 3656 ui__error("Invalid --fields key: `+'"); 3657 goto out; 3658 } 3659 3660 ret = setup_output_list(&perf_hpp_list, strp); 3661 3662 out: 3663 free(str); 3664 return ret; 3665 } 3666 3667 int setup_sorting(struct evlist *evlist) 3668 { 3669 int err; 3670 3671 err = __setup_sorting(evlist); 3672 if (err < 0) 3673 return err; 3674 3675 if (parent_pattern != default_parent_pattern) { 3676 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 3677 if (err < 0) 3678 return err; 3679 } 3680 3681 reset_dimensions(); 3682 3683 /* 3684 * perf diff doesn't use default hpp output fields. 3685 */ 3686 if (sort__mode != SORT_MODE__DIFF) 3687 perf_hpp__init(); 3688 3689 err = __setup_output_field(); 3690 if (err < 0) 3691 return err; 3692 3693 /* copy sort keys to output fields */ 3694 perf_hpp__setup_output_field(&perf_hpp_list); 3695 /* and then copy output fields to sort keys */ 3696 perf_hpp__append_sort_keys(&perf_hpp_list); 3697 3698 /* setup hists-specific output fields */ 3699 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 3700 return -1; 3701 3702 return 0; 3703 } 3704 3705 void reset_output_field(void) 3706 { 3707 perf_hpp_list.need_collapse = 0; 3708 perf_hpp_list.parent = 0; 3709 perf_hpp_list.sym = 0; 3710 perf_hpp_list.dso = 0; 3711 3712 field_order = NULL; 3713 sort_order = NULL; 3714 3715 reset_dimensions(); 3716 perf_hpp__reset_output_field(&perf_hpp_list); 3717 } 3718 3719 #define INDENT (3*8 + 1) 3720 3721 static void add_key(struct strbuf *sb, const char *str, int *llen) 3722 { 3723 if (!str) 3724 return; 3725 3726 if (*llen >= 75) { 3727 strbuf_addstr(sb, "\n\t\t\t "); 3728 *llen = INDENT; 3729 } 3730 strbuf_addf(sb, " %s", str); 3731 *llen += strlen(str) + 1; 3732 } 3733 3734 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n, 3735 int *llen) 3736 { 3737 int i; 3738 3739 for (i = 0; i < n; i++) 3740 add_key(sb, s[i].name, llen); 3741 } 3742 3743 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n, 3744 int *llen) 3745 { 3746 int i; 3747 3748 for (i = 0; i < n; i++) 3749 add_key(sb, s[i].name, llen); 3750 } 3751 3752 char *sort_help(const char *prefix) 3753 { 3754 struct strbuf sb; 3755 char *s; 3756 int len = strlen(prefix) + INDENT; 3757 3758 strbuf_init(&sb, 300); 3759 strbuf_addstr(&sb, prefix); 3760 add_hpp_sort_string(&sb, hpp_sort_dimensions, 3761 ARRAY_SIZE(hpp_sort_dimensions), &len); 3762 add_sort_string(&sb, common_sort_dimensions, 3763 ARRAY_SIZE(common_sort_dimensions), &len); 3764 add_sort_string(&sb, bstack_sort_dimensions, 3765 ARRAY_SIZE(bstack_sort_dimensions), &len); 3766 add_sort_string(&sb, memory_sort_dimensions, 3767 ARRAY_SIZE(memory_sort_dimensions), &len); 3768 s = strbuf_detach(&sb, NULL); 3769 strbuf_release(&sb); 3770 return s; 3771 } 3772