1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <regex.h> 5 #include <stdlib.h> 6 #include <linux/mman.h> 7 #include <linux/time64.h> 8 #include "debug.h" 9 #include "dso.h" 10 #include "sort.h" 11 #include "hist.h" 12 #include "cacheline.h" 13 #include "comm.h" 14 #include "map.h" 15 #include "symbol.h" 16 #include "map_symbol.h" 17 #include "branch.h" 18 #include "thread.h" 19 #include "evsel.h" 20 #include "evlist.h" 21 #include "srcline.h" 22 #include "strlist.h" 23 #include "strbuf.h" 24 #include <traceevent/event-parse.h> 25 #include "mem-events.h" 26 #include "annotate.h" 27 #include "time-utils.h" 28 #include <linux/kernel.h> 29 #include <linux/string.h> 30 31 regex_t parent_regex; 32 const char default_parent_pattern[] = "^sys_|^do_page_fault"; 33 const char *parent_pattern = default_parent_pattern; 34 const char *default_sort_order = "comm,dso,symbol"; 35 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 36 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked"; 37 const char default_top_sort_order[] = "dso,symbol"; 38 const char default_diff_sort_order[] = "dso,symbol"; 39 const char default_tracepoint_sort_order[] = "trace"; 40 const char *sort_order; 41 const char *field_order; 42 regex_t ignore_callees_regex; 43 int have_ignore_callees = 0; 44 enum sort_mode sort__mode = SORT_MODE__NORMAL; 45 46 /* 47 * Replaces all occurrences of a char used with the: 48 * 49 * -t, --field-separator 50 * 51 * option, that uses a special separator character and don't pad with spaces, 52 * replacing all occurrences of this separator in symbol names (and other 53 * output) with a '.' character, that thus it's the only non valid separator. 54 */ 55 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 56 { 57 int n; 58 va_list ap; 59 60 va_start(ap, fmt); 61 n = vsnprintf(bf, size, fmt, ap); 62 if (symbol_conf.field_sep && n > 0) { 63 char *sep = bf; 64 65 while (1) { 66 sep = strchr(sep, *symbol_conf.field_sep); 67 if (sep == NULL) 68 break; 69 *sep = '.'; 70 } 71 } 72 va_end(ap); 73 74 if (n >= (int)size) 75 return size - 1; 76 return n; 77 } 78 79 static int64_t cmp_null(const void *l, const void *r) 80 { 81 if (!l && !r) 82 return 0; 83 else if (!l) 84 return -1; 85 else 86 return 1; 87 } 88 89 /* --sort pid */ 90 91 static int64_t 92 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 93 { 94 return right->thread->tid - left->thread->tid; 95 } 96 97 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 98 size_t size, unsigned int width) 99 { 100 const char *comm = thread__comm_str(he->thread); 101 102 width = max(7U, width) - 8; 103 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid, 104 width, width, comm ?: ""); 105 } 106 107 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 108 { 109 const struct thread *th = arg; 110 111 if (type != HIST_FILTER__THREAD) 112 return -1; 113 114 return th && he->thread != th; 115 } 116 117 struct sort_entry sort_thread = { 118 .se_header = " Pid:Command", 119 .se_cmp = sort__thread_cmp, 120 .se_snprintf = hist_entry__thread_snprintf, 121 .se_filter = hist_entry__thread_filter, 122 .se_width_idx = HISTC_THREAD, 123 }; 124 125 /* --sort comm */ 126 127 /* 128 * We can't use pointer comparison in functions below, 129 * because it gives different results based on pointer 130 * values, which could break some sorting assumptions. 131 */ 132 static int64_t 133 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 134 { 135 return strcmp(comm__str(right->comm), comm__str(left->comm)); 136 } 137 138 static int64_t 139 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 140 { 141 return strcmp(comm__str(right->comm), comm__str(left->comm)); 142 } 143 144 static int64_t 145 sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 146 { 147 return strcmp(comm__str(right->comm), comm__str(left->comm)); 148 } 149 150 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 151 size_t size, unsigned int width) 152 { 153 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 154 } 155 156 struct sort_entry sort_comm = { 157 .se_header = "Command", 158 .se_cmp = sort__comm_cmp, 159 .se_collapse = sort__comm_collapse, 160 .se_sort = sort__comm_sort, 161 .se_snprintf = hist_entry__comm_snprintf, 162 .se_filter = hist_entry__thread_filter, 163 .se_width_idx = HISTC_COMM, 164 }; 165 166 /* --sort dso */ 167 168 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 169 { 170 struct dso *dso_l = map_l ? map_l->dso : NULL; 171 struct dso *dso_r = map_r ? map_r->dso : NULL; 172 const char *dso_name_l, *dso_name_r; 173 174 if (!dso_l || !dso_r) 175 return cmp_null(dso_r, dso_l); 176 177 if (verbose > 0) { 178 dso_name_l = dso_l->long_name; 179 dso_name_r = dso_r->long_name; 180 } else { 181 dso_name_l = dso_l->short_name; 182 dso_name_r = dso_r->short_name; 183 } 184 185 return strcmp(dso_name_l, dso_name_r); 186 } 187 188 static int64_t 189 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 190 { 191 return _sort__dso_cmp(right->ms.map, left->ms.map); 192 } 193 194 static int _hist_entry__dso_snprintf(struct map *map, char *bf, 195 size_t size, unsigned int width) 196 { 197 if (map && map->dso) { 198 const char *dso_name = verbose > 0 ? map->dso->long_name : 199 map->dso->short_name; 200 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 201 } 202 203 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]"); 204 } 205 206 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 207 size_t size, unsigned int width) 208 { 209 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 210 } 211 212 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 213 { 214 const struct dso *dso = arg; 215 216 if (type != HIST_FILTER__DSO) 217 return -1; 218 219 return dso && (!he->ms.map || he->ms.map->dso != dso); 220 } 221 222 struct sort_entry sort_dso = { 223 .se_header = "Shared Object", 224 .se_cmp = sort__dso_cmp, 225 .se_snprintf = hist_entry__dso_snprintf, 226 .se_filter = hist_entry__dso_filter, 227 .se_width_idx = HISTC_DSO, 228 }; 229 230 /* --sort symbol */ 231 232 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 233 { 234 return (int64_t)(right_ip - left_ip); 235 } 236 237 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 238 { 239 if (!sym_l || !sym_r) 240 return cmp_null(sym_l, sym_r); 241 242 if (sym_l == sym_r) 243 return 0; 244 245 if (sym_l->inlined || sym_r->inlined) { 246 int ret = strcmp(sym_l->name, sym_r->name); 247 248 if (ret) 249 return ret; 250 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start)) 251 return 0; 252 } 253 254 if (sym_l->start != sym_r->start) 255 return (int64_t)(sym_r->start - sym_l->start); 256 257 return (int64_t)(sym_r->end - sym_l->end); 258 } 259 260 static int64_t 261 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 262 { 263 int64_t ret; 264 265 if (!left->ms.sym && !right->ms.sym) 266 return _sort__addr_cmp(left->ip, right->ip); 267 268 /* 269 * comparing symbol address alone is not enough since it's a 270 * relative address within a dso. 271 */ 272 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) { 273 ret = sort__dso_cmp(left, right); 274 if (ret != 0) 275 return ret; 276 } 277 278 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 279 } 280 281 static int64_t 282 sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 283 { 284 if (!left->ms.sym || !right->ms.sym) 285 return cmp_null(left->ms.sym, right->ms.sym); 286 287 return strcmp(right->ms.sym->name, left->ms.sym->name); 288 } 289 290 static int _hist_entry__sym_snprintf(struct map_symbol *ms, 291 u64 ip, char level, char *bf, size_t size, 292 unsigned int width) 293 { 294 struct symbol *sym = ms->sym; 295 struct map *map = ms->map; 296 size_t ret = 0; 297 298 if (verbose > 0) { 299 char o = map ? dso__symtab_origin(map->dso) : '!'; 300 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 301 BITS_PER_LONG / 4 + 2, ip, o); 302 } 303 304 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 305 if (sym && map) { 306 if (sym->type == STT_OBJECT) { 307 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 308 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 309 ip - map->unmap_ip(map, sym->start)); 310 } else { 311 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 312 width - ret, 313 sym->name); 314 if (sym->inlined) 315 ret += repsep_snprintf(bf + ret, size - ret, 316 " (inlined)"); 317 } 318 } else { 319 size_t len = BITS_PER_LONG / 4; 320 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 321 len, ip); 322 } 323 324 return ret; 325 } 326 327 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, 328 size_t size, unsigned int width) 329 { 330 return _hist_entry__sym_snprintf(&he->ms, he->ip, 331 he->level, bf, size, width); 332 } 333 334 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 335 { 336 const char *sym = arg; 337 338 if (type != HIST_FILTER__SYMBOL) 339 return -1; 340 341 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 342 } 343 344 struct sort_entry sort_sym = { 345 .se_header = "Symbol", 346 .se_cmp = sort__sym_cmp, 347 .se_sort = sort__sym_sort, 348 .se_snprintf = hist_entry__sym_snprintf, 349 .se_filter = hist_entry__sym_filter, 350 .se_width_idx = HISTC_SYMBOL, 351 }; 352 353 /* --sort srcline */ 354 355 char *hist_entry__srcline(struct hist_entry *he) 356 { 357 return map__srcline(he->ms.map, he->ip, he->ms.sym); 358 } 359 360 static int64_t 361 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 362 { 363 if (!left->srcline) 364 left->srcline = hist_entry__srcline(left); 365 if (!right->srcline) 366 right->srcline = hist_entry__srcline(right); 367 368 return strcmp(right->srcline, left->srcline); 369 } 370 371 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 372 size_t size, unsigned int width) 373 { 374 if (!he->srcline) 375 he->srcline = hist_entry__srcline(he); 376 377 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 378 } 379 380 struct sort_entry sort_srcline = { 381 .se_header = "Source:Line", 382 .se_cmp = sort__srcline_cmp, 383 .se_snprintf = hist_entry__srcline_snprintf, 384 .se_width_idx = HISTC_SRCLINE, 385 }; 386 387 /* --sort srcline_from */ 388 389 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams) 390 { 391 return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym); 392 } 393 394 static int64_t 395 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 396 { 397 if (!left->branch_info->srcline_from) 398 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from); 399 400 if (!right->branch_info->srcline_from) 401 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from); 402 403 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 404 } 405 406 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 407 size_t size, unsigned int width) 408 { 409 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 410 } 411 412 struct sort_entry sort_srcline_from = { 413 .se_header = "From Source:Line", 414 .se_cmp = sort__srcline_from_cmp, 415 .se_snprintf = hist_entry__srcline_from_snprintf, 416 .se_width_idx = HISTC_SRCLINE_FROM, 417 }; 418 419 /* --sort srcline_to */ 420 421 static int64_t 422 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 423 { 424 if (!left->branch_info->srcline_to) 425 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to); 426 427 if (!right->branch_info->srcline_to) 428 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to); 429 430 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 431 } 432 433 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 434 size_t size, unsigned int width) 435 { 436 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 437 } 438 439 struct sort_entry sort_srcline_to = { 440 .se_header = "To Source:Line", 441 .se_cmp = sort__srcline_to_cmp, 442 .se_snprintf = hist_entry__srcline_to_snprintf, 443 .se_width_idx = HISTC_SRCLINE_TO, 444 }; 445 446 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf, 447 size_t size, unsigned int width) 448 { 449 450 struct symbol *sym = he->ms.sym; 451 struct annotation *notes; 452 double ipc = 0.0, coverage = 0.0; 453 char tmp[64]; 454 455 if (!sym) 456 return repsep_snprintf(bf, size, "%-*s", width, "-"); 457 458 notes = symbol__annotation(sym); 459 460 if (notes->hit_cycles) 461 ipc = notes->hit_insn / ((double)notes->hit_cycles); 462 463 if (notes->total_insn) { 464 coverage = notes->cover_insn * 100.0 / 465 ((double)notes->total_insn); 466 } 467 468 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage); 469 return repsep_snprintf(bf, size, "%-*s", width, tmp); 470 } 471 472 struct sort_entry sort_sym_ipc = { 473 .se_header = "IPC [IPC Coverage]", 474 .se_cmp = sort__sym_cmp, 475 .se_snprintf = hist_entry__sym_ipc_snprintf, 476 .se_width_idx = HISTC_SYMBOL_IPC, 477 }; 478 479 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he 480 __maybe_unused, 481 char *bf, size_t size, 482 unsigned int width) 483 { 484 char tmp[64]; 485 486 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-"); 487 return repsep_snprintf(bf, size, "%-*s", width, tmp); 488 } 489 490 struct sort_entry sort_sym_ipc_null = { 491 .se_header = "IPC [IPC Coverage]", 492 .se_cmp = sort__sym_cmp, 493 .se_snprintf = hist_entry__sym_ipc_null_snprintf, 494 .se_width_idx = HISTC_SYMBOL_IPC, 495 }; 496 497 /* --sort srcfile */ 498 499 static char no_srcfile[1]; 500 501 static char *hist_entry__get_srcfile(struct hist_entry *e) 502 { 503 char *sf, *p; 504 struct map *map = e->ms.map; 505 506 if (!map) 507 return no_srcfile; 508 509 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip), 510 e->ms.sym, false, true, true, e->ip); 511 if (!strcmp(sf, SRCLINE_UNKNOWN)) 512 return no_srcfile; 513 p = strchr(sf, ':'); 514 if (p && *sf) { 515 *p = 0; 516 return sf; 517 } 518 free(sf); 519 return no_srcfile; 520 } 521 522 static int64_t 523 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 524 { 525 if (!left->srcfile) 526 left->srcfile = hist_entry__get_srcfile(left); 527 if (!right->srcfile) 528 right->srcfile = hist_entry__get_srcfile(right); 529 530 return strcmp(right->srcfile, left->srcfile); 531 } 532 533 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 534 size_t size, unsigned int width) 535 { 536 if (!he->srcfile) 537 he->srcfile = hist_entry__get_srcfile(he); 538 539 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 540 } 541 542 struct sort_entry sort_srcfile = { 543 .se_header = "Source File", 544 .se_cmp = sort__srcfile_cmp, 545 .se_snprintf = hist_entry__srcfile_snprintf, 546 .se_width_idx = HISTC_SRCFILE, 547 }; 548 549 /* --sort parent */ 550 551 static int64_t 552 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 553 { 554 struct symbol *sym_l = left->parent; 555 struct symbol *sym_r = right->parent; 556 557 if (!sym_l || !sym_r) 558 return cmp_null(sym_l, sym_r); 559 560 return strcmp(sym_r->name, sym_l->name); 561 } 562 563 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 564 size_t size, unsigned int width) 565 { 566 return repsep_snprintf(bf, size, "%-*.*s", width, width, 567 he->parent ? he->parent->name : "[other]"); 568 } 569 570 struct sort_entry sort_parent = { 571 .se_header = "Parent symbol", 572 .se_cmp = sort__parent_cmp, 573 .se_snprintf = hist_entry__parent_snprintf, 574 .se_width_idx = HISTC_PARENT, 575 }; 576 577 /* --sort cpu */ 578 579 static int64_t 580 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 581 { 582 return right->cpu - left->cpu; 583 } 584 585 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 586 size_t size, unsigned int width) 587 { 588 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 589 } 590 591 struct sort_entry sort_cpu = { 592 .se_header = "CPU", 593 .se_cmp = sort__cpu_cmp, 594 .se_snprintf = hist_entry__cpu_snprintf, 595 .se_width_idx = HISTC_CPU, 596 }; 597 598 /* --sort cgroup_id */ 599 600 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev) 601 { 602 return (int64_t)(right_dev - left_dev); 603 } 604 605 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino) 606 { 607 return (int64_t)(right_ino - left_ino); 608 } 609 610 static int64_t 611 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right) 612 { 613 int64_t ret; 614 615 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev); 616 if (ret != 0) 617 return ret; 618 619 return _sort__cgroup_inode_cmp(right->cgroup_id.ino, 620 left->cgroup_id.ino); 621 } 622 623 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he, 624 char *bf, size_t size, 625 unsigned int width __maybe_unused) 626 { 627 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev, 628 he->cgroup_id.ino); 629 } 630 631 struct sort_entry sort_cgroup_id = { 632 .se_header = "cgroup id (dev/inode)", 633 .se_cmp = sort__cgroup_id_cmp, 634 .se_snprintf = hist_entry__cgroup_id_snprintf, 635 .se_width_idx = HISTC_CGROUP_ID, 636 }; 637 638 /* --sort socket */ 639 640 static int64_t 641 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 642 { 643 return right->socket - left->socket; 644 } 645 646 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 647 size_t size, unsigned int width) 648 { 649 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 650 } 651 652 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 653 { 654 int sk = *(const int *)arg; 655 656 if (type != HIST_FILTER__SOCKET) 657 return -1; 658 659 return sk >= 0 && he->socket != sk; 660 } 661 662 struct sort_entry sort_socket = { 663 .se_header = "Socket", 664 .se_cmp = sort__socket_cmp, 665 .se_snprintf = hist_entry__socket_snprintf, 666 .se_filter = hist_entry__socket_filter, 667 .se_width_idx = HISTC_SOCKET, 668 }; 669 670 /* --sort time */ 671 672 static int64_t 673 sort__time_cmp(struct hist_entry *left, struct hist_entry *right) 674 { 675 return right->time - left->time; 676 } 677 678 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf, 679 size_t size, unsigned int width) 680 { 681 char he_time[32]; 682 683 if (symbol_conf.nanosecs) 684 timestamp__scnprintf_nsec(he->time, he_time, 685 sizeof(he_time)); 686 else 687 timestamp__scnprintf_usec(he->time, he_time, 688 sizeof(he_time)); 689 690 return repsep_snprintf(bf, size, "%-.*s", width, he_time); 691 } 692 693 struct sort_entry sort_time = { 694 .se_header = "Time", 695 .se_cmp = sort__time_cmp, 696 .se_snprintf = hist_entry__time_snprintf, 697 .se_width_idx = HISTC_TIME, 698 }; 699 700 /* --sort trace */ 701 702 static char *get_trace_output(struct hist_entry *he) 703 { 704 struct trace_seq seq; 705 struct evsel *evsel; 706 struct tep_record rec = { 707 .data = he->raw_data, 708 .size = he->raw_size, 709 }; 710 711 evsel = hists_to_evsel(he->hists); 712 713 trace_seq_init(&seq); 714 if (symbol_conf.raw_trace) { 715 tep_print_fields(&seq, he->raw_data, he->raw_size, 716 evsel->tp_format); 717 } else { 718 tep_print_event(evsel->tp_format->tep, 719 &seq, &rec, "%s", TEP_PRINT_INFO); 720 } 721 /* 722 * Trim the buffer, it starts at 4KB and we're not going to 723 * add anything more to this buffer. 724 */ 725 return realloc(seq.buffer, seq.len + 1); 726 } 727 728 static int64_t 729 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 730 { 731 struct evsel *evsel; 732 733 evsel = hists_to_evsel(left->hists); 734 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 735 return 0; 736 737 if (left->trace_output == NULL) 738 left->trace_output = get_trace_output(left); 739 if (right->trace_output == NULL) 740 right->trace_output = get_trace_output(right); 741 742 return strcmp(right->trace_output, left->trace_output); 743 } 744 745 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 746 size_t size, unsigned int width) 747 { 748 struct evsel *evsel; 749 750 evsel = hists_to_evsel(he->hists); 751 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 752 return scnprintf(bf, size, "%-.*s", width, "N/A"); 753 754 if (he->trace_output == NULL) 755 he->trace_output = get_trace_output(he); 756 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 757 } 758 759 struct sort_entry sort_trace = { 760 .se_header = "Trace output", 761 .se_cmp = sort__trace_cmp, 762 .se_snprintf = hist_entry__trace_snprintf, 763 .se_width_idx = HISTC_TRACE, 764 }; 765 766 /* sort keys for branch stacks */ 767 768 static int64_t 769 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 770 { 771 if (!left->branch_info || !right->branch_info) 772 return cmp_null(left->branch_info, right->branch_info); 773 774 return _sort__dso_cmp(left->branch_info->from.ms.map, 775 right->branch_info->from.ms.map); 776 } 777 778 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 779 size_t size, unsigned int width) 780 { 781 if (he->branch_info) 782 return _hist_entry__dso_snprintf(he->branch_info->from.ms.map, 783 bf, size, width); 784 else 785 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 786 } 787 788 static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 789 const void *arg) 790 { 791 const struct dso *dso = arg; 792 793 if (type != HIST_FILTER__DSO) 794 return -1; 795 796 return dso && (!he->branch_info || !he->branch_info->from.ms.map || 797 he->branch_info->from.ms.map->dso != dso); 798 } 799 800 static int64_t 801 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 802 { 803 if (!left->branch_info || !right->branch_info) 804 return cmp_null(left->branch_info, right->branch_info); 805 806 return _sort__dso_cmp(left->branch_info->to.ms.map, 807 right->branch_info->to.ms.map); 808 } 809 810 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 811 size_t size, unsigned int width) 812 { 813 if (he->branch_info) 814 return _hist_entry__dso_snprintf(he->branch_info->to.ms.map, 815 bf, size, width); 816 else 817 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 818 } 819 820 static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 821 const void *arg) 822 { 823 const struct dso *dso = arg; 824 825 if (type != HIST_FILTER__DSO) 826 return -1; 827 828 return dso && (!he->branch_info || !he->branch_info->to.ms.map || 829 he->branch_info->to.ms.map->dso != dso); 830 } 831 832 static int64_t 833 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 834 { 835 struct addr_map_symbol *from_l = &left->branch_info->from; 836 struct addr_map_symbol *from_r = &right->branch_info->from; 837 838 if (!left->branch_info || !right->branch_info) 839 return cmp_null(left->branch_info, right->branch_info); 840 841 from_l = &left->branch_info->from; 842 from_r = &right->branch_info->from; 843 844 if (!from_l->ms.sym && !from_r->ms.sym) 845 return _sort__addr_cmp(from_l->addr, from_r->addr); 846 847 return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym); 848 } 849 850 static int64_t 851 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 852 { 853 struct addr_map_symbol *to_l, *to_r; 854 855 if (!left->branch_info || !right->branch_info) 856 return cmp_null(left->branch_info, right->branch_info); 857 858 to_l = &left->branch_info->to; 859 to_r = &right->branch_info->to; 860 861 if (!to_l->ms.sym && !to_r->ms.sym) 862 return _sort__addr_cmp(to_l->addr, to_r->addr); 863 864 return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym); 865 } 866 867 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 868 size_t size, unsigned int width) 869 { 870 if (he->branch_info) { 871 struct addr_map_symbol *from = &he->branch_info->from; 872 873 return _hist_entry__sym_snprintf(&from->ms, from->addr, he->level, bf, size, width); 874 } 875 876 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 877 } 878 879 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 880 size_t size, unsigned int width) 881 { 882 if (he->branch_info) { 883 struct addr_map_symbol *to = &he->branch_info->to; 884 885 return _hist_entry__sym_snprintf(&to->ms, to->addr, he->level, bf, size, width); 886 } 887 888 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 889 } 890 891 static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 892 const void *arg) 893 { 894 const char *sym = arg; 895 896 if (type != HIST_FILTER__SYMBOL) 897 return -1; 898 899 return sym && !(he->branch_info && he->branch_info->from.ms.sym && 900 strstr(he->branch_info->from.ms.sym->name, sym)); 901 } 902 903 static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 904 const void *arg) 905 { 906 const char *sym = arg; 907 908 if (type != HIST_FILTER__SYMBOL) 909 return -1; 910 911 return sym && !(he->branch_info && he->branch_info->to.ms.sym && 912 strstr(he->branch_info->to.ms.sym->name, sym)); 913 } 914 915 struct sort_entry sort_dso_from = { 916 .se_header = "Source Shared Object", 917 .se_cmp = sort__dso_from_cmp, 918 .se_snprintf = hist_entry__dso_from_snprintf, 919 .se_filter = hist_entry__dso_from_filter, 920 .se_width_idx = HISTC_DSO_FROM, 921 }; 922 923 struct sort_entry sort_dso_to = { 924 .se_header = "Target Shared Object", 925 .se_cmp = sort__dso_to_cmp, 926 .se_snprintf = hist_entry__dso_to_snprintf, 927 .se_filter = hist_entry__dso_to_filter, 928 .se_width_idx = HISTC_DSO_TO, 929 }; 930 931 struct sort_entry sort_sym_from = { 932 .se_header = "Source Symbol", 933 .se_cmp = sort__sym_from_cmp, 934 .se_snprintf = hist_entry__sym_from_snprintf, 935 .se_filter = hist_entry__sym_from_filter, 936 .se_width_idx = HISTC_SYMBOL_FROM, 937 }; 938 939 struct sort_entry sort_sym_to = { 940 .se_header = "Target Symbol", 941 .se_cmp = sort__sym_to_cmp, 942 .se_snprintf = hist_entry__sym_to_snprintf, 943 .se_filter = hist_entry__sym_to_filter, 944 .se_width_idx = HISTC_SYMBOL_TO, 945 }; 946 947 static int64_t 948 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 949 { 950 unsigned char mp, p; 951 952 if (!left->branch_info || !right->branch_info) 953 return cmp_null(left->branch_info, right->branch_info); 954 955 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 956 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 957 return mp || p; 958 } 959 960 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 961 size_t size, unsigned int width){ 962 static const char *out = "N/A"; 963 964 if (he->branch_info) { 965 if (he->branch_info->flags.predicted) 966 out = "N"; 967 else if (he->branch_info->flags.mispred) 968 out = "Y"; 969 } 970 971 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 972 } 973 974 static int64_t 975 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 976 { 977 if (!left->branch_info || !right->branch_info) 978 return cmp_null(left->branch_info, right->branch_info); 979 980 return left->branch_info->flags.cycles - 981 right->branch_info->flags.cycles; 982 } 983 984 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 985 size_t size, unsigned int width) 986 { 987 if (!he->branch_info) 988 return scnprintf(bf, size, "%-.*s", width, "N/A"); 989 if (he->branch_info->flags.cycles == 0) 990 return repsep_snprintf(bf, size, "%-*s", width, "-"); 991 return repsep_snprintf(bf, size, "%-*hd", width, 992 he->branch_info->flags.cycles); 993 } 994 995 struct sort_entry sort_cycles = { 996 .se_header = "Basic Block Cycles", 997 .se_cmp = sort__cycles_cmp, 998 .se_snprintf = hist_entry__cycles_snprintf, 999 .se_width_idx = HISTC_CYCLES, 1000 }; 1001 1002 /* --sort daddr_sym */ 1003 int64_t 1004 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1005 { 1006 uint64_t l = 0, r = 0; 1007 1008 if (left->mem_info) 1009 l = left->mem_info->daddr.addr; 1010 if (right->mem_info) 1011 r = right->mem_info->daddr.addr; 1012 1013 return (int64_t)(r - l); 1014 } 1015 1016 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 1017 size_t size, unsigned int width) 1018 { 1019 uint64_t addr = 0; 1020 struct map_symbol *ms = NULL; 1021 1022 if (he->mem_info) { 1023 addr = he->mem_info->daddr.addr; 1024 ms = &he->mem_info->daddr.ms; 1025 } 1026 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); 1027 } 1028 1029 int64_t 1030 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 1031 { 1032 uint64_t l = 0, r = 0; 1033 1034 if (left->mem_info) 1035 l = left->mem_info->iaddr.addr; 1036 if (right->mem_info) 1037 r = right->mem_info->iaddr.addr; 1038 1039 return (int64_t)(r - l); 1040 } 1041 1042 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 1043 size_t size, unsigned int width) 1044 { 1045 uint64_t addr = 0; 1046 struct map_symbol *ms = NULL; 1047 1048 if (he->mem_info) { 1049 addr = he->mem_info->iaddr.addr; 1050 ms = &he->mem_info->iaddr.ms; 1051 } 1052 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); 1053 } 1054 1055 static int64_t 1056 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1057 { 1058 struct map *map_l = NULL; 1059 struct map *map_r = NULL; 1060 1061 if (left->mem_info) 1062 map_l = left->mem_info->daddr.ms.map; 1063 if (right->mem_info) 1064 map_r = right->mem_info->daddr.ms.map; 1065 1066 return _sort__dso_cmp(map_l, map_r); 1067 } 1068 1069 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 1070 size_t size, unsigned int width) 1071 { 1072 struct map *map = NULL; 1073 1074 if (he->mem_info) 1075 map = he->mem_info->daddr.ms.map; 1076 1077 return _hist_entry__dso_snprintf(map, bf, size, width); 1078 } 1079 1080 static int64_t 1081 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 1082 { 1083 union perf_mem_data_src data_src_l; 1084 union perf_mem_data_src data_src_r; 1085 1086 if (left->mem_info) 1087 data_src_l = left->mem_info->data_src; 1088 else 1089 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 1090 1091 if (right->mem_info) 1092 data_src_r = right->mem_info->data_src; 1093 else 1094 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 1095 1096 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 1097 } 1098 1099 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 1100 size_t size, unsigned int width) 1101 { 1102 char out[10]; 1103 1104 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 1105 return repsep_snprintf(bf, size, "%.*s", width, out); 1106 } 1107 1108 static int64_t 1109 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 1110 { 1111 union perf_mem_data_src data_src_l; 1112 union perf_mem_data_src data_src_r; 1113 1114 if (left->mem_info) 1115 data_src_l = left->mem_info->data_src; 1116 else 1117 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 1118 1119 if (right->mem_info) 1120 data_src_r = right->mem_info->data_src; 1121 else 1122 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 1123 1124 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 1125 } 1126 1127 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1128 size_t size, unsigned int width) 1129 { 1130 char out[64]; 1131 1132 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1133 return repsep_snprintf(bf, size, "%-*s", width, out); 1134 } 1135 1136 static int64_t 1137 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1138 { 1139 union perf_mem_data_src data_src_l; 1140 union perf_mem_data_src data_src_r; 1141 1142 if (left->mem_info) 1143 data_src_l = left->mem_info->data_src; 1144 else 1145 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1146 1147 if (right->mem_info) 1148 data_src_r = right->mem_info->data_src; 1149 else 1150 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1151 1152 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1153 } 1154 1155 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1156 size_t size, unsigned int width) 1157 { 1158 char out[64]; 1159 1160 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1161 return repsep_snprintf(bf, size, "%-*s", width, out); 1162 } 1163 1164 static int64_t 1165 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1166 { 1167 union perf_mem_data_src data_src_l; 1168 union perf_mem_data_src data_src_r; 1169 1170 if (left->mem_info) 1171 data_src_l = left->mem_info->data_src; 1172 else 1173 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1174 1175 if (right->mem_info) 1176 data_src_r = right->mem_info->data_src; 1177 else 1178 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1179 1180 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1181 } 1182 1183 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1184 size_t size, unsigned int width) 1185 { 1186 char out[64]; 1187 1188 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1189 return repsep_snprintf(bf, size, "%-*s", width, out); 1190 } 1191 1192 int64_t 1193 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1194 { 1195 u64 l, r; 1196 struct map *l_map, *r_map; 1197 1198 if (!left->mem_info) return -1; 1199 if (!right->mem_info) return 1; 1200 1201 /* group event types together */ 1202 if (left->cpumode > right->cpumode) return -1; 1203 if (left->cpumode < right->cpumode) return 1; 1204 1205 l_map = left->mem_info->daddr.ms.map; 1206 r_map = right->mem_info->daddr.ms.map; 1207 1208 /* if both are NULL, jump to sort on al_addr instead */ 1209 if (!l_map && !r_map) 1210 goto addr; 1211 1212 if (!l_map) return -1; 1213 if (!r_map) return 1; 1214 1215 if (l_map->maj > r_map->maj) return -1; 1216 if (l_map->maj < r_map->maj) return 1; 1217 1218 if (l_map->min > r_map->min) return -1; 1219 if (l_map->min < r_map->min) return 1; 1220 1221 if (l_map->ino > r_map->ino) return -1; 1222 if (l_map->ino < r_map->ino) return 1; 1223 1224 if (l_map->ino_generation > r_map->ino_generation) return -1; 1225 if (l_map->ino_generation < r_map->ino_generation) return 1; 1226 1227 /* 1228 * Addresses with no major/minor numbers are assumed to be 1229 * anonymous in userspace. Sort those on pid then address. 1230 * 1231 * The kernel and non-zero major/minor mapped areas are 1232 * assumed to be unity mapped. Sort those on address. 1233 */ 1234 1235 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1236 (!(l_map->flags & MAP_SHARED)) && 1237 !l_map->maj && !l_map->min && !l_map->ino && 1238 !l_map->ino_generation) { 1239 /* userspace anonymous */ 1240 1241 if (left->thread->pid_ > right->thread->pid_) return -1; 1242 if (left->thread->pid_ < right->thread->pid_) return 1; 1243 } 1244 1245 addr: 1246 /* al_addr does all the right addr - start + offset calculations */ 1247 l = cl_address(left->mem_info->daddr.al_addr); 1248 r = cl_address(right->mem_info->daddr.al_addr); 1249 1250 if (l > r) return -1; 1251 if (l < r) return 1; 1252 1253 return 0; 1254 } 1255 1256 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1257 size_t size, unsigned int width) 1258 { 1259 1260 uint64_t addr = 0; 1261 struct map_symbol *ms = NULL; 1262 char level = he->level; 1263 1264 if (he->mem_info) { 1265 struct map *map = he->mem_info->daddr.ms.map; 1266 1267 addr = cl_address(he->mem_info->daddr.al_addr); 1268 ms = &he->mem_info->daddr.ms; 1269 1270 /* print [s] for shared data mmaps */ 1271 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1272 map && !(map->prot & PROT_EXEC) && 1273 (map->flags & MAP_SHARED) && 1274 (map->maj || map->min || map->ino || 1275 map->ino_generation)) 1276 level = 's'; 1277 else if (!map) 1278 level = 'X'; 1279 } 1280 return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width); 1281 } 1282 1283 struct sort_entry sort_mispredict = { 1284 .se_header = "Branch Mispredicted", 1285 .se_cmp = sort__mispredict_cmp, 1286 .se_snprintf = hist_entry__mispredict_snprintf, 1287 .se_width_idx = HISTC_MISPREDICT, 1288 }; 1289 1290 static u64 he_weight(struct hist_entry *he) 1291 { 1292 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; 1293 } 1294 1295 static int64_t 1296 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1297 { 1298 return he_weight(left) - he_weight(right); 1299 } 1300 1301 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1302 size_t size, unsigned int width) 1303 { 1304 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he)); 1305 } 1306 1307 struct sort_entry sort_local_weight = { 1308 .se_header = "Local Weight", 1309 .se_cmp = sort__local_weight_cmp, 1310 .se_snprintf = hist_entry__local_weight_snprintf, 1311 .se_width_idx = HISTC_LOCAL_WEIGHT, 1312 }; 1313 1314 static int64_t 1315 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1316 { 1317 return left->stat.weight - right->stat.weight; 1318 } 1319 1320 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1321 size_t size, unsigned int width) 1322 { 1323 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight); 1324 } 1325 1326 struct sort_entry sort_global_weight = { 1327 .se_header = "Weight", 1328 .se_cmp = sort__global_weight_cmp, 1329 .se_snprintf = hist_entry__global_weight_snprintf, 1330 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1331 }; 1332 1333 struct sort_entry sort_mem_daddr_sym = { 1334 .se_header = "Data Symbol", 1335 .se_cmp = sort__daddr_cmp, 1336 .se_snprintf = hist_entry__daddr_snprintf, 1337 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1338 }; 1339 1340 struct sort_entry sort_mem_iaddr_sym = { 1341 .se_header = "Code Symbol", 1342 .se_cmp = sort__iaddr_cmp, 1343 .se_snprintf = hist_entry__iaddr_snprintf, 1344 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1345 }; 1346 1347 struct sort_entry sort_mem_daddr_dso = { 1348 .se_header = "Data Object", 1349 .se_cmp = sort__dso_daddr_cmp, 1350 .se_snprintf = hist_entry__dso_daddr_snprintf, 1351 .se_width_idx = HISTC_MEM_DADDR_DSO, 1352 }; 1353 1354 struct sort_entry sort_mem_locked = { 1355 .se_header = "Locked", 1356 .se_cmp = sort__locked_cmp, 1357 .se_snprintf = hist_entry__locked_snprintf, 1358 .se_width_idx = HISTC_MEM_LOCKED, 1359 }; 1360 1361 struct sort_entry sort_mem_tlb = { 1362 .se_header = "TLB access", 1363 .se_cmp = sort__tlb_cmp, 1364 .se_snprintf = hist_entry__tlb_snprintf, 1365 .se_width_idx = HISTC_MEM_TLB, 1366 }; 1367 1368 struct sort_entry sort_mem_lvl = { 1369 .se_header = "Memory access", 1370 .se_cmp = sort__lvl_cmp, 1371 .se_snprintf = hist_entry__lvl_snprintf, 1372 .se_width_idx = HISTC_MEM_LVL, 1373 }; 1374 1375 struct sort_entry sort_mem_snoop = { 1376 .se_header = "Snoop", 1377 .se_cmp = sort__snoop_cmp, 1378 .se_snprintf = hist_entry__snoop_snprintf, 1379 .se_width_idx = HISTC_MEM_SNOOP, 1380 }; 1381 1382 struct sort_entry sort_mem_dcacheline = { 1383 .se_header = "Data Cacheline", 1384 .se_cmp = sort__dcacheline_cmp, 1385 .se_snprintf = hist_entry__dcacheline_snprintf, 1386 .se_width_idx = HISTC_MEM_DCACHELINE, 1387 }; 1388 1389 static int64_t 1390 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1391 { 1392 uint64_t l = 0, r = 0; 1393 1394 if (left->mem_info) 1395 l = left->mem_info->daddr.phys_addr; 1396 if (right->mem_info) 1397 r = right->mem_info->daddr.phys_addr; 1398 1399 return (int64_t)(r - l); 1400 } 1401 1402 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf, 1403 size_t size, unsigned int width) 1404 { 1405 uint64_t addr = 0; 1406 size_t ret = 0; 1407 size_t len = BITS_PER_LONG / 4; 1408 1409 addr = he->mem_info->daddr.phys_addr; 1410 1411 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level); 1412 1413 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr); 1414 1415 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, ""); 1416 1417 if (ret > width) 1418 bf[width] = '\0'; 1419 1420 return width; 1421 } 1422 1423 struct sort_entry sort_mem_phys_daddr = { 1424 .se_header = "Data Physical Address", 1425 .se_cmp = sort__phys_daddr_cmp, 1426 .se_snprintf = hist_entry__phys_daddr_snprintf, 1427 .se_width_idx = HISTC_MEM_PHYS_DADDR, 1428 }; 1429 1430 static int64_t 1431 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1432 { 1433 if (!left->branch_info || !right->branch_info) 1434 return cmp_null(left->branch_info, right->branch_info); 1435 1436 return left->branch_info->flags.abort != 1437 right->branch_info->flags.abort; 1438 } 1439 1440 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1441 size_t size, unsigned int width) 1442 { 1443 static const char *out = "N/A"; 1444 1445 if (he->branch_info) { 1446 if (he->branch_info->flags.abort) 1447 out = "A"; 1448 else 1449 out = "."; 1450 } 1451 1452 return repsep_snprintf(bf, size, "%-*s", width, out); 1453 } 1454 1455 struct sort_entry sort_abort = { 1456 .se_header = "Transaction abort", 1457 .se_cmp = sort__abort_cmp, 1458 .se_snprintf = hist_entry__abort_snprintf, 1459 .se_width_idx = HISTC_ABORT, 1460 }; 1461 1462 static int64_t 1463 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1464 { 1465 if (!left->branch_info || !right->branch_info) 1466 return cmp_null(left->branch_info, right->branch_info); 1467 1468 return left->branch_info->flags.in_tx != 1469 right->branch_info->flags.in_tx; 1470 } 1471 1472 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1473 size_t size, unsigned int width) 1474 { 1475 static const char *out = "N/A"; 1476 1477 if (he->branch_info) { 1478 if (he->branch_info->flags.in_tx) 1479 out = "T"; 1480 else 1481 out = "."; 1482 } 1483 1484 return repsep_snprintf(bf, size, "%-*s", width, out); 1485 } 1486 1487 struct sort_entry sort_in_tx = { 1488 .se_header = "Branch in transaction", 1489 .se_cmp = sort__in_tx_cmp, 1490 .se_snprintf = hist_entry__in_tx_snprintf, 1491 .se_width_idx = HISTC_IN_TX, 1492 }; 1493 1494 static int64_t 1495 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1496 { 1497 return left->transaction - right->transaction; 1498 } 1499 1500 static inline char *add_str(char *p, const char *str) 1501 { 1502 strcpy(p, str); 1503 return p + strlen(str); 1504 } 1505 1506 static struct txbit { 1507 unsigned flag; 1508 const char *name; 1509 int skip_for_len; 1510 } txbits[] = { 1511 { PERF_TXN_ELISION, "EL ", 0 }, 1512 { PERF_TXN_TRANSACTION, "TX ", 1 }, 1513 { PERF_TXN_SYNC, "SYNC ", 1 }, 1514 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 1515 { PERF_TXN_RETRY, "RETRY ", 0 }, 1516 { PERF_TXN_CONFLICT, "CON ", 0 }, 1517 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 1518 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 1519 { 0, NULL, 0 } 1520 }; 1521 1522 int hist_entry__transaction_len(void) 1523 { 1524 int i; 1525 int len = 0; 1526 1527 for (i = 0; txbits[i].name; i++) { 1528 if (!txbits[i].skip_for_len) 1529 len += strlen(txbits[i].name); 1530 } 1531 len += 4; /* :XX<space> */ 1532 return len; 1533 } 1534 1535 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 1536 size_t size, unsigned int width) 1537 { 1538 u64 t = he->transaction; 1539 char buf[128]; 1540 char *p = buf; 1541 int i; 1542 1543 buf[0] = 0; 1544 for (i = 0; txbits[i].name; i++) 1545 if (txbits[i].flag & t) 1546 p = add_str(p, txbits[i].name); 1547 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 1548 p = add_str(p, "NEITHER "); 1549 if (t & PERF_TXN_ABORT_MASK) { 1550 sprintf(p, ":%" PRIx64, 1551 (t & PERF_TXN_ABORT_MASK) >> 1552 PERF_TXN_ABORT_SHIFT); 1553 p += strlen(p); 1554 } 1555 1556 return repsep_snprintf(bf, size, "%-*s", width, buf); 1557 } 1558 1559 struct sort_entry sort_transaction = { 1560 .se_header = "Transaction ", 1561 .se_cmp = sort__transaction_cmp, 1562 .se_snprintf = hist_entry__transaction_snprintf, 1563 .se_width_idx = HISTC_TRANSACTION, 1564 }; 1565 1566 /* --sort symbol_size */ 1567 1568 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r) 1569 { 1570 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0; 1571 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0; 1572 1573 return size_l < size_r ? -1 : 1574 size_l == size_r ? 0 : 1; 1575 } 1576 1577 static int64_t 1578 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right) 1579 { 1580 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym); 1581 } 1582 1583 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf, 1584 size_t bf_size, unsigned int width) 1585 { 1586 if (sym) 1587 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym)); 1588 1589 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1590 } 1591 1592 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf, 1593 size_t size, unsigned int width) 1594 { 1595 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width); 1596 } 1597 1598 struct sort_entry sort_sym_size = { 1599 .se_header = "Symbol size", 1600 .se_cmp = sort__sym_size_cmp, 1601 .se_snprintf = hist_entry__sym_size_snprintf, 1602 .se_width_idx = HISTC_SYM_SIZE, 1603 }; 1604 1605 /* --sort dso_size */ 1606 1607 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r) 1608 { 1609 int64_t size_l = map_l != NULL ? map__size(map_l) : 0; 1610 int64_t size_r = map_r != NULL ? map__size(map_r) : 0; 1611 1612 return size_l < size_r ? -1 : 1613 size_l == size_r ? 0 : 1; 1614 } 1615 1616 static int64_t 1617 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right) 1618 { 1619 return _sort__dso_size_cmp(right->ms.map, left->ms.map); 1620 } 1621 1622 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf, 1623 size_t bf_size, unsigned int width) 1624 { 1625 if (map && map->dso) 1626 return repsep_snprintf(bf, bf_size, "%*d", width, 1627 map__size(map)); 1628 1629 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1630 } 1631 1632 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf, 1633 size_t size, unsigned int width) 1634 { 1635 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width); 1636 } 1637 1638 struct sort_entry sort_dso_size = { 1639 .se_header = "DSO size", 1640 .se_cmp = sort__dso_size_cmp, 1641 .se_snprintf = hist_entry__dso_size_snprintf, 1642 .se_width_idx = HISTC_DSO_SIZE, 1643 }; 1644 1645 1646 struct sort_dimension { 1647 const char *name; 1648 struct sort_entry *entry; 1649 int taken; 1650 }; 1651 1652 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 1653 1654 static struct sort_dimension common_sort_dimensions[] = { 1655 DIM(SORT_PID, "pid", sort_thread), 1656 DIM(SORT_COMM, "comm", sort_comm), 1657 DIM(SORT_DSO, "dso", sort_dso), 1658 DIM(SORT_SYM, "symbol", sort_sym), 1659 DIM(SORT_PARENT, "parent", sort_parent), 1660 DIM(SORT_CPU, "cpu", sort_cpu), 1661 DIM(SORT_SOCKET, "socket", sort_socket), 1662 DIM(SORT_SRCLINE, "srcline", sort_srcline), 1663 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 1664 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 1665 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 1666 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 1667 DIM(SORT_TRACE, "trace", sort_trace), 1668 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 1669 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size), 1670 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 1671 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null), 1672 DIM(SORT_TIME, "time", sort_time), 1673 }; 1674 1675 #undef DIM 1676 1677 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 1678 1679 static struct sort_dimension bstack_sort_dimensions[] = { 1680 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 1681 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 1682 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 1683 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 1684 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 1685 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 1686 DIM(SORT_ABORT, "abort", sort_abort), 1687 DIM(SORT_CYCLES, "cycles", sort_cycles), 1688 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 1689 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 1690 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc), 1691 }; 1692 1693 #undef DIM 1694 1695 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 1696 1697 static struct sort_dimension memory_sort_dimensions[] = { 1698 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 1699 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 1700 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 1701 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 1702 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 1703 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 1704 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 1705 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 1706 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr), 1707 }; 1708 1709 #undef DIM 1710 1711 struct hpp_dimension { 1712 const char *name; 1713 struct perf_hpp_fmt *fmt; 1714 int taken; 1715 }; 1716 1717 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 1718 1719 static struct hpp_dimension hpp_sort_dimensions[] = { 1720 DIM(PERF_HPP__OVERHEAD, "overhead"), 1721 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 1722 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 1723 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 1724 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 1725 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 1726 DIM(PERF_HPP__SAMPLES, "sample"), 1727 DIM(PERF_HPP__PERIOD, "period"), 1728 }; 1729 1730 #undef DIM 1731 1732 struct hpp_sort_entry { 1733 struct perf_hpp_fmt hpp; 1734 struct sort_entry *se; 1735 }; 1736 1737 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 1738 { 1739 struct hpp_sort_entry *hse; 1740 1741 if (!perf_hpp__is_sort_entry(fmt)) 1742 return; 1743 1744 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1745 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 1746 } 1747 1748 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1749 struct hists *hists, int line __maybe_unused, 1750 int *span __maybe_unused) 1751 { 1752 struct hpp_sort_entry *hse; 1753 size_t len = fmt->user_len; 1754 1755 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1756 1757 if (!len) 1758 len = hists__col_len(hists, hse->se->se_width_idx); 1759 1760 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 1761 } 1762 1763 static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 1764 struct perf_hpp *hpp __maybe_unused, 1765 struct hists *hists) 1766 { 1767 struct hpp_sort_entry *hse; 1768 size_t len = fmt->user_len; 1769 1770 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1771 1772 if (!len) 1773 len = hists__col_len(hists, hse->se->se_width_idx); 1774 1775 return len; 1776 } 1777 1778 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1779 struct hist_entry *he) 1780 { 1781 struct hpp_sort_entry *hse; 1782 size_t len = fmt->user_len; 1783 1784 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1785 1786 if (!len) 1787 len = hists__col_len(he->hists, hse->se->se_width_idx); 1788 1789 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 1790 } 1791 1792 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 1793 struct hist_entry *a, struct hist_entry *b) 1794 { 1795 struct hpp_sort_entry *hse; 1796 1797 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1798 return hse->se->se_cmp(a, b); 1799 } 1800 1801 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 1802 struct hist_entry *a, struct hist_entry *b) 1803 { 1804 struct hpp_sort_entry *hse; 1805 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 1806 1807 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1808 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 1809 return collapse_fn(a, b); 1810 } 1811 1812 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 1813 struct hist_entry *a, struct hist_entry *b) 1814 { 1815 struct hpp_sort_entry *hse; 1816 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 1817 1818 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1819 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 1820 return sort_fn(a, b); 1821 } 1822 1823 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 1824 { 1825 return format->header == __sort__hpp_header; 1826 } 1827 1828 #define MK_SORT_ENTRY_CHK(key) \ 1829 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 1830 { \ 1831 struct hpp_sort_entry *hse; \ 1832 \ 1833 if (!perf_hpp__is_sort_entry(fmt)) \ 1834 return false; \ 1835 \ 1836 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 1837 return hse->se == &sort_ ## key ; \ 1838 } 1839 1840 MK_SORT_ENTRY_CHK(trace) 1841 MK_SORT_ENTRY_CHK(srcline) 1842 MK_SORT_ENTRY_CHK(srcfile) 1843 MK_SORT_ENTRY_CHK(thread) 1844 MK_SORT_ENTRY_CHK(comm) 1845 MK_SORT_ENTRY_CHK(dso) 1846 MK_SORT_ENTRY_CHK(sym) 1847 1848 1849 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 1850 { 1851 struct hpp_sort_entry *hse_a; 1852 struct hpp_sort_entry *hse_b; 1853 1854 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 1855 return false; 1856 1857 hse_a = container_of(a, struct hpp_sort_entry, hpp); 1858 hse_b = container_of(b, struct hpp_sort_entry, hpp); 1859 1860 return hse_a->se == hse_b->se; 1861 } 1862 1863 static void hse_free(struct perf_hpp_fmt *fmt) 1864 { 1865 struct hpp_sort_entry *hse; 1866 1867 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1868 free(hse); 1869 } 1870 1871 static struct hpp_sort_entry * 1872 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 1873 { 1874 struct hpp_sort_entry *hse; 1875 1876 hse = malloc(sizeof(*hse)); 1877 if (hse == NULL) { 1878 pr_err("Memory allocation failed\n"); 1879 return NULL; 1880 } 1881 1882 hse->se = sd->entry; 1883 hse->hpp.name = sd->entry->se_header; 1884 hse->hpp.header = __sort__hpp_header; 1885 hse->hpp.width = __sort__hpp_width; 1886 hse->hpp.entry = __sort__hpp_entry; 1887 hse->hpp.color = NULL; 1888 1889 hse->hpp.cmp = __sort__hpp_cmp; 1890 hse->hpp.collapse = __sort__hpp_collapse; 1891 hse->hpp.sort = __sort__hpp_sort; 1892 hse->hpp.equal = __sort__hpp_equal; 1893 hse->hpp.free = hse_free; 1894 1895 INIT_LIST_HEAD(&hse->hpp.list); 1896 INIT_LIST_HEAD(&hse->hpp.sort_list); 1897 hse->hpp.elide = false; 1898 hse->hpp.len = 0; 1899 hse->hpp.user_len = 0; 1900 hse->hpp.level = level; 1901 1902 return hse; 1903 } 1904 1905 static void hpp_free(struct perf_hpp_fmt *fmt) 1906 { 1907 free(fmt); 1908 } 1909 1910 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 1911 int level) 1912 { 1913 struct perf_hpp_fmt *fmt; 1914 1915 fmt = memdup(hd->fmt, sizeof(*fmt)); 1916 if (fmt) { 1917 INIT_LIST_HEAD(&fmt->list); 1918 INIT_LIST_HEAD(&fmt->sort_list); 1919 fmt->free = hpp_free; 1920 fmt->level = level; 1921 } 1922 1923 return fmt; 1924 } 1925 1926 int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 1927 { 1928 struct perf_hpp_fmt *fmt; 1929 struct hpp_sort_entry *hse; 1930 int ret = -1; 1931 int r; 1932 1933 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1934 if (!perf_hpp__is_sort_entry(fmt)) 1935 continue; 1936 1937 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1938 if (hse->se->se_filter == NULL) 1939 continue; 1940 1941 /* 1942 * hist entry is filtered if any of sort key in the hpp list 1943 * is applied. But it should skip non-matched filter types. 1944 */ 1945 r = hse->se->se_filter(he, type, arg); 1946 if (r >= 0) { 1947 if (ret < 0) 1948 ret = 0; 1949 ret |= r; 1950 } 1951 } 1952 1953 return ret; 1954 } 1955 1956 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 1957 struct perf_hpp_list *list, 1958 int level) 1959 { 1960 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 1961 1962 if (hse == NULL) 1963 return -1; 1964 1965 perf_hpp_list__register_sort_field(list, &hse->hpp); 1966 return 0; 1967 } 1968 1969 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 1970 struct perf_hpp_list *list) 1971 { 1972 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 1973 1974 if (hse == NULL) 1975 return -1; 1976 1977 perf_hpp_list__column_register(list, &hse->hpp); 1978 return 0; 1979 } 1980 1981 struct hpp_dynamic_entry { 1982 struct perf_hpp_fmt hpp; 1983 struct evsel *evsel; 1984 struct tep_format_field *field; 1985 unsigned dynamic_len; 1986 bool raw_trace; 1987 }; 1988 1989 static int hde_width(struct hpp_dynamic_entry *hde) 1990 { 1991 if (!hde->hpp.len) { 1992 int len = hde->dynamic_len; 1993 int namelen = strlen(hde->field->name); 1994 int fieldlen = hde->field->size; 1995 1996 if (namelen > len) 1997 len = namelen; 1998 1999 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) { 2000 /* length for print hex numbers */ 2001 fieldlen = hde->field->size * 2 + 2; 2002 } 2003 if (fieldlen > len) 2004 len = fieldlen; 2005 2006 hde->hpp.len = len; 2007 } 2008 return hde->hpp.len; 2009 } 2010 2011 static void update_dynamic_len(struct hpp_dynamic_entry *hde, 2012 struct hist_entry *he) 2013 { 2014 char *str, *pos; 2015 struct tep_format_field *field = hde->field; 2016 size_t namelen; 2017 bool last = false; 2018 2019 if (hde->raw_trace) 2020 return; 2021 2022 /* parse pretty print result and update max length */ 2023 if (!he->trace_output) 2024 he->trace_output = get_trace_output(he); 2025 2026 namelen = strlen(field->name); 2027 str = he->trace_output; 2028 2029 while (str) { 2030 pos = strchr(str, ' '); 2031 if (pos == NULL) { 2032 last = true; 2033 pos = str + strlen(str); 2034 } 2035 2036 if (!strncmp(str, field->name, namelen)) { 2037 size_t len; 2038 2039 str += namelen + 1; 2040 len = pos - str; 2041 2042 if (len > hde->dynamic_len) 2043 hde->dynamic_len = len; 2044 break; 2045 } 2046 2047 if (last) 2048 str = NULL; 2049 else 2050 str = pos + 1; 2051 } 2052 } 2053 2054 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2055 struct hists *hists __maybe_unused, 2056 int line __maybe_unused, 2057 int *span __maybe_unused) 2058 { 2059 struct hpp_dynamic_entry *hde; 2060 size_t len = fmt->user_len; 2061 2062 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2063 2064 if (!len) 2065 len = hde_width(hde); 2066 2067 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 2068 } 2069 2070 static int __sort__hde_width(struct perf_hpp_fmt *fmt, 2071 struct perf_hpp *hpp __maybe_unused, 2072 struct hists *hists __maybe_unused) 2073 { 2074 struct hpp_dynamic_entry *hde; 2075 size_t len = fmt->user_len; 2076 2077 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2078 2079 if (!len) 2080 len = hde_width(hde); 2081 2082 return len; 2083 } 2084 2085 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 2086 { 2087 struct hpp_dynamic_entry *hde; 2088 2089 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2090 2091 return hists_to_evsel(hists) == hde->evsel; 2092 } 2093 2094 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2095 struct hist_entry *he) 2096 { 2097 struct hpp_dynamic_entry *hde; 2098 size_t len = fmt->user_len; 2099 char *str, *pos; 2100 struct tep_format_field *field; 2101 size_t namelen; 2102 bool last = false; 2103 int ret; 2104 2105 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2106 2107 if (!len) 2108 len = hde_width(hde); 2109 2110 if (hde->raw_trace) 2111 goto raw_field; 2112 2113 if (!he->trace_output) 2114 he->trace_output = get_trace_output(he); 2115 2116 field = hde->field; 2117 namelen = strlen(field->name); 2118 str = he->trace_output; 2119 2120 while (str) { 2121 pos = strchr(str, ' '); 2122 if (pos == NULL) { 2123 last = true; 2124 pos = str + strlen(str); 2125 } 2126 2127 if (!strncmp(str, field->name, namelen)) { 2128 str += namelen + 1; 2129 str = strndup(str, pos - str); 2130 2131 if (str == NULL) 2132 return scnprintf(hpp->buf, hpp->size, 2133 "%*.*s", len, len, "ERROR"); 2134 break; 2135 } 2136 2137 if (last) 2138 str = NULL; 2139 else 2140 str = pos + 1; 2141 } 2142 2143 if (str == NULL) { 2144 struct trace_seq seq; 2145 raw_field: 2146 trace_seq_init(&seq); 2147 tep_print_field(&seq, he->raw_data, hde->field); 2148 str = seq.buffer; 2149 } 2150 2151 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 2152 free(str); 2153 return ret; 2154 } 2155 2156 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 2157 struct hist_entry *a, struct hist_entry *b) 2158 { 2159 struct hpp_dynamic_entry *hde; 2160 struct tep_format_field *field; 2161 unsigned offset, size; 2162 2163 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2164 2165 if (b == NULL) { 2166 update_dynamic_len(hde, a); 2167 return 0; 2168 } 2169 2170 field = hde->field; 2171 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2172 unsigned long long dyn; 2173 2174 tep_read_number_field(field, a->raw_data, &dyn); 2175 offset = dyn & 0xffff; 2176 size = (dyn >> 16) & 0xffff; 2177 2178 /* record max width for output */ 2179 if (size > hde->dynamic_len) 2180 hde->dynamic_len = size; 2181 } else { 2182 offset = field->offset; 2183 size = field->size; 2184 } 2185 2186 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 2187 } 2188 2189 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 2190 { 2191 return fmt->cmp == __sort__hde_cmp; 2192 } 2193 2194 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2195 { 2196 struct hpp_dynamic_entry *hde_a; 2197 struct hpp_dynamic_entry *hde_b; 2198 2199 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 2200 return false; 2201 2202 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 2203 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 2204 2205 return hde_a->field == hde_b->field; 2206 } 2207 2208 static void hde_free(struct perf_hpp_fmt *fmt) 2209 { 2210 struct hpp_dynamic_entry *hde; 2211 2212 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2213 free(hde); 2214 } 2215 2216 static struct hpp_dynamic_entry * 2217 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field, 2218 int level) 2219 { 2220 struct hpp_dynamic_entry *hde; 2221 2222 hde = malloc(sizeof(*hde)); 2223 if (hde == NULL) { 2224 pr_debug("Memory allocation failed\n"); 2225 return NULL; 2226 } 2227 2228 hde->evsel = evsel; 2229 hde->field = field; 2230 hde->dynamic_len = 0; 2231 2232 hde->hpp.name = field->name; 2233 hde->hpp.header = __sort__hde_header; 2234 hde->hpp.width = __sort__hde_width; 2235 hde->hpp.entry = __sort__hde_entry; 2236 hde->hpp.color = NULL; 2237 2238 hde->hpp.cmp = __sort__hde_cmp; 2239 hde->hpp.collapse = __sort__hde_cmp; 2240 hde->hpp.sort = __sort__hde_cmp; 2241 hde->hpp.equal = __sort__hde_equal; 2242 hde->hpp.free = hde_free; 2243 2244 INIT_LIST_HEAD(&hde->hpp.list); 2245 INIT_LIST_HEAD(&hde->hpp.sort_list); 2246 hde->hpp.elide = false; 2247 hde->hpp.len = 0; 2248 hde->hpp.user_len = 0; 2249 hde->hpp.level = level; 2250 2251 return hde; 2252 } 2253 2254 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 2255 { 2256 struct perf_hpp_fmt *new_fmt = NULL; 2257 2258 if (perf_hpp__is_sort_entry(fmt)) { 2259 struct hpp_sort_entry *hse, *new_hse; 2260 2261 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2262 new_hse = memdup(hse, sizeof(*hse)); 2263 if (new_hse) 2264 new_fmt = &new_hse->hpp; 2265 } else if (perf_hpp__is_dynamic_entry(fmt)) { 2266 struct hpp_dynamic_entry *hde, *new_hde; 2267 2268 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2269 new_hde = memdup(hde, sizeof(*hde)); 2270 if (new_hde) 2271 new_fmt = &new_hde->hpp; 2272 } else { 2273 new_fmt = memdup(fmt, sizeof(*fmt)); 2274 } 2275 2276 INIT_LIST_HEAD(&new_fmt->list); 2277 INIT_LIST_HEAD(&new_fmt->sort_list); 2278 2279 return new_fmt; 2280 } 2281 2282 static int parse_field_name(char *str, char **event, char **field, char **opt) 2283 { 2284 char *event_name, *field_name, *opt_name; 2285 2286 event_name = str; 2287 field_name = strchr(str, '.'); 2288 2289 if (field_name) { 2290 *field_name++ = '\0'; 2291 } else { 2292 event_name = NULL; 2293 field_name = str; 2294 } 2295 2296 opt_name = strchr(field_name, '/'); 2297 if (opt_name) 2298 *opt_name++ = '\0'; 2299 2300 *event = event_name; 2301 *field = field_name; 2302 *opt = opt_name; 2303 2304 return 0; 2305 } 2306 2307 /* find match evsel using a given event name. The event name can be: 2308 * 1. '%' + event index (e.g. '%1' for first event) 2309 * 2. full event name (e.g. sched:sched_switch) 2310 * 3. partial event name (should not contain ':') 2311 */ 2312 static struct evsel *find_evsel(struct evlist *evlist, char *event_name) 2313 { 2314 struct evsel *evsel = NULL; 2315 struct evsel *pos; 2316 bool full_name; 2317 2318 /* case 1 */ 2319 if (event_name[0] == '%') { 2320 int nr = strtol(event_name+1, NULL, 0); 2321 2322 if (nr > evlist->core.nr_entries) 2323 return NULL; 2324 2325 evsel = evlist__first(evlist); 2326 while (--nr > 0) 2327 evsel = perf_evsel__next(evsel); 2328 2329 return evsel; 2330 } 2331 2332 full_name = !!strchr(event_name, ':'); 2333 evlist__for_each_entry(evlist, pos) { 2334 /* case 2 */ 2335 if (full_name && !strcmp(pos->name, event_name)) 2336 return pos; 2337 /* case 3 */ 2338 if (!full_name && strstr(pos->name, event_name)) { 2339 if (evsel) { 2340 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 2341 event_name, evsel->name, pos->name); 2342 return NULL; 2343 } 2344 evsel = pos; 2345 } 2346 } 2347 2348 return evsel; 2349 } 2350 2351 static int __dynamic_dimension__add(struct evsel *evsel, 2352 struct tep_format_field *field, 2353 bool raw_trace, int level) 2354 { 2355 struct hpp_dynamic_entry *hde; 2356 2357 hde = __alloc_dynamic_entry(evsel, field, level); 2358 if (hde == NULL) 2359 return -ENOMEM; 2360 2361 hde->raw_trace = raw_trace; 2362 2363 perf_hpp__register_sort_field(&hde->hpp); 2364 return 0; 2365 } 2366 2367 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level) 2368 { 2369 int ret; 2370 struct tep_format_field *field; 2371 2372 field = evsel->tp_format->format.fields; 2373 while (field) { 2374 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2375 if (ret < 0) 2376 return ret; 2377 2378 field = field->next; 2379 } 2380 return 0; 2381 } 2382 2383 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace, 2384 int level) 2385 { 2386 int ret; 2387 struct evsel *evsel; 2388 2389 evlist__for_each_entry(evlist, evsel) { 2390 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 2391 continue; 2392 2393 ret = add_evsel_fields(evsel, raw_trace, level); 2394 if (ret < 0) 2395 return ret; 2396 } 2397 return 0; 2398 } 2399 2400 static int add_all_matching_fields(struct evlist *evlist, 2401 char *field_name, bool raw_trace, int level) 2402 { 2403 int ret = -ESRCH; 2404 struct evsel *evsel; 2405 struct tep_format_field *field; 2406 2407 evlist__for_each_entry(evlist, evsel) { 2408 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 2409 continue; 2410 2411 field = tep_find_any_field(evsel->tp_format, field_name); 2412 if (field == NULL) 2413 continue; 2414 2415 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2416 if (ret < 0) 2417 break; 2418 } 2419 return ret; 2420 } 2421 2422 static int add_dynamic_entry(struct evlist *evlist, const char *tok, 2423 int level) 2424 { 2425 char *str, *event_name, *field_name, *opt_name; 2426 struct evsel *evsel; 2427 struct tep_format_field *field; 2428 bool raw_trace = symbol_conf.raw_trace; 2429 int ret = 0; 2430 2431 if (evlist == NULL) 2432 return -ENOENT; 2433 2434 str = strdup(tok); 2435 if (str == NULL) 2436 return -ENOMEM; 2437 2438 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 2439 ret = -EINVAL; 2440 goto out; 2441 } 2442 2443 if (opt_name) { 2444 if (strcmp(opt_name, "raw")) { 2445 pr_debug("unsupported field option %s\n", opt_name); 2446 ret = -EINVAL; 2447 goto out; 2448 } 2449 raw_trace = true; 2450 } 2451 2452 if (!strcmp(field_name, "trace_fields")) { 2453 ret = add_all_dynamic_fields(evlist, raw_trace, level); 2454 goto out; 2455 } 2456 2457 if (event_name == NULL) { 2458 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 2459 goto out; 2460 } 2461 2462 evsel = find_evsel(evlist, event_name); 2463 if (evsel == NULL) { 2464 pr_debug("Cannot find event: %s\n", event_name); 2465 ret = -ENOENT; 2466 goto out; 2467 } 2468 2469 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2470 pr_debug("%s is not a tracepoint event\n", event_name); 2471 ret = -EINVAL; 2472 goto out; 2473 } 2474 2475 if (!strcmp(field_name, "*")) { 2476 ret = add_evsel_fields(evsel, raw_trace, level); 2477 } else { 2478 field = tep_find_any_field(evsel->tp_format, field_name); 2479 if (field == NULL) { 2480 pr_debug("Cannot find event field for %s.%s\n", 2481 event_name, field_name); 2482 return -ENOENT; 2483 } 2484 2485 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2486 } 2487 2488 out: 2489 free(str); 2490 return ret; 2491 } 2492 2493 static int __sort_dimension__add(struct sort_dimension *sd, 2494 struct perf_hpp_list *list, 2495 int level) 2496 { 2497 if (sd->taken) 2498 return 0; 2499 2500 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 2501 return -1; 2502 2503 if (sd->entry->se_collapse) 2504 list->need_collapse = 1; 2505 2506 sd->taken = 1; 2507 2508 return 0; 2509 } 2510 2511 static int __hpp_dimension__add(struct hpp_dimension *hd, 2512 struct perf_hpp_list *list, 2513 int level) 2514 { 2515 struct perf_hpp_fmt *fmt; 2516 2517 if (hd->taken) 2518 return 0; 2519 2520 fmt = __hpp_dimension__alloc_hpp(hd, level); 2521 if (!fmt) 2522 return -1; 2523 2524 hd->taken = 1; 2525 perf_hpp_list__register_sort_field(list, fmt); 2526 return 0; 2527 } 2528 2529 static int __sort_dimension__add_output(struct perf_hpp_list *list, 2530 struct sort_dimension *sd) 2531 { 2532 if (sd->taken) 2533 return 0; 2534 2535 if (__sort_dimension__add_hpp_output(sd, list) < 0) 2536 return -1; 2537 2538 sd->taken = 1; 2539 return 0; 2540 } 2541 2542 static int __hpp_dimension__add_output(struct perf_hpp_list *list, 2543 struct hpp_dimension *hd) 2544 { 2545 struct perf_hpp_fmt *fmt; 2546 2547 if (hd->taken) 2548 return 0; 2549 2550 fmt = __hpp_dimension__alloc_hpp(hd, 0); 2551 if (!fmt) 2552 return -1; 2553 2554 hd->taken = 1; 2555 perf_hpp_list__column_register(list, fmt); 2556 return 0; 2557 } 2558 2559 int hpp_dimension__add_output(unsigned col) 2560 { 2561 BUG_ON(col >= PERF_HPP__MAX_INDEX); 2562 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 2563 } 2564 2565 int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 2566 struct evlist *evlist, 2567 int level) 2568 { 2569 unsigned int i; 2570 2571 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2572 struct sort_dimension *sd = &common_sort_dimensions[i]; 2573 2574 if (strncasecmp(tok, sd->name, strlen(tok))) 2575 continue; 2576 2577 if (sd->entry == &sort_parent) { 2578 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 2579 if (ret) { 2580 char err[BUFSIZ]; 2581 2582 regerror(ret, &parent_regex, err, sizeof(err)); 2583 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 2584 return -EINVAL; 2585 } 2586 list->parent = 1; 2587 } else if (sd->entry == &sort_sym) { 2588 list->sym = 1; 2589 /* 2590 * perf diff displays the performance difference amongst 2591 * two or more perf.data files. Those files could come 2592 * from different binaries. So we should not compare 2593 * their ips, but the name of symbol. 2594 */ 2595 if (sort__mode == SORT_MODE__DIFF) 2596 sd->entry->se_collapse = sort__sym_sort; 2597 2598 } else if (sd->entry == &sort_dso) { 2599 list->dso = 1; 2600 } else if (sd->entry == &sort_socket) { 2601 list->socket = 1; 2602 } else if (sd->entry == &sort_thread) { 2603 list->thread = 1; 2604 } else if (sd->entry == &sort_comm) { 2605 list->comm = 1; 2606 } 2607 2608 return __sort_dimension__add(sd, list, level); 2609 } 2610 2611 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2612 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2613 2614 if (strncasecmp(tok, hd->name, strlen(tok))) 2615 continue; 2616 2617 return __hpp_dimension__add(hd, list, level); 2618 } 2619 2620 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2621 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2622 2623 if (strncasecmp(tok, sd->name, strlen(tok))) 2624 continue; 2625 2626 if (sort__mode != SORT_MODE__BRANCH) 2627 return -EINVAL; 2628 2629 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 2630 list->sym = 1; 2631 2632 __sort_dimension__add(sd, list, level); 2633 return 0; 2634 } 2635 2636 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2637 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2638 2639 if (strncasecmp(tok, sd->name, strlen(tok))) 2640 continue; 2641 2642 if (sort__mode != SORT_MODE__MEMORY) 2643 return -EINVAL; 2644 2645 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0) 2646 return -EINVAL; 2647 2648 if (sd->entry == &sort_mem_daddr_sym) 2649 list->sym = 1; 2650 2651 __sort_dimension__add(sd, list, level); 2652 return 0; 2653 } 2654 2655 if (!add_dynamic_entry(evlist, tok, level)) 2656 return 0; 2657 2658 return -ESRCH; 2659 } 2660 2661 static int setup_sort_list(struct perf_hpp_list *list, char *str, 2662 struct evlist *evlist) 2663 { 2664 char *tmp, *tok; 2665 int ret = 0; 2666 int level = 0; 2667 int next_level = 1; 2668 bool in_group = false; 2669 2670 do { 2671 tok = str; 2672 tmp = strpbrk(str, "{}, "); 2673 if (tmp) { 2674 if (in_group) 2675 next_level = level; 2676 else 2677 next_level = level + 1; 2678 2679 if (*tmp == '{') 2680 in_group = true; 2681 else if (*tmp == '}') 2682 in_group = false; 2683 2684 *tmp = '\0'; 2685 str = tmp + 1; 2686 } 2687 2688 if (*tok) { 2689 ret = sort_dimension__add(list, tok, evlist, level); 2690 if (ret == -EINVAL) { 2691 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok))) 2692 pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 2693 else 2694 pr_err("Invalid --sort key: `%s'", tok); 2695 break; 2696 } else if (ret == -ESRCH) { 2697 pr_err("Unknown --sort key: `%s'", tok); 2698 break; 2699 } 2700 } 2701 2702 level = next_level; 2703 } while (tmp); 2704 2705 return ret; 2706 } 2707 2708 static const char *get_default_sort_order(struct evlist *evlist) 2709 { 2710 const char *default_sort_orders[] = { 2711 default_sort_order, 2712 default_branch_sort_order, 2713 default_mem_sort_order, 2714 default_top_sort_order, 2715 default_diff_sort_order, 2716 default_tracepoint_sort_order, 2717 }; 2718 bool use_trace = true; 2719 struct evsel *evsel; 2720 2721 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 2722 2723 if (evlist == NULL || perf_evlist__empty(evlist)) 2724 goto out_no_evlist; 2725 2726 evlist__for_each_entry(evlist, evsel) { 2727 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2728 use_trace = false; 2729 break; 2730 } 2731 } 2732 2733 if (use_trace) { 2734 sort__mode = SORT_MODE__TRACEPOINT; 2735 if (symbol_conf.raw_trace) 2736 return "trace_fields"; 2737 } 2738 out_no_evlist: 2739 return default_sort_orders[sort__mode]; 2740 } 2741 2742 static int setup_sort_order(struct evlist *evlist) 2743 { 2744 char *new_sort_order; 2745 2746 /* 2747 * Append '+'-prefixed sort order to the default sort 2748 * order string. 2749 */ 2750 if (!sort_order || is_strict_order(sort_order)) 2751 return 0; 2752 2753 if (sort_order[1] == '\0') { 2754 pr_err("Invalid --sort key: `+'"); 2755 return -EINVAL; 2756 } 2757 2758 /* 2759 * We allocate new sort_order string, but we never free it, 2760 * because it's checked over the rest of the code. 2761 */ 2762 if (asprintf(&new_sort_order, "%s,%s", 2763 get_default_sort_order(evlist), sort_order + 1) < 0) { 2764 pr_err("Not enough memory to set up --sort"); 2765 return -ENOMEM; 2766 } 2767 2768 sort_order = new_sort_order; 2769 return 0; 2770 } 2771 2772 /* 2773 * Adds 'pre,' prefix into 'str' is 'pre' is 2774 * not already part of 'str'. 2775 */ 2776 static char *prefix_if_not_in(const char *pre, char *str) 2777 { 2778 char *n; 2779 2780 if (!str || strstr(str, pre)) 2781 return str; 2782 2783 if (asprintf(&n, "%s,%s", pre, str) < 0) 2784 return NULL; 2785 2786 free(str); 2787 return n; 2788 } 2789 2790 static char *setup_overhead(char *keys) 2791 { 2792 if (sort__mode == SORT_MODE__DIFF) 2793 return keys; 2794 2795 keys = prefix_if_not_in("overhead", keys); 2796 2797 if (symbol_conf.cumulate_callchain) 2798 keys = prefix_if_not_in("overhead_children", keys); 2799 2800 return keys; 2801 } 2802 2803 static int __setup_sorting(struct evlist *evlist) 2804 { 2805 char *str; 2806 const char *sort_keys; 2807 int ret = 0; 2808 2809 ret = setup_sort_order(evlist); 2810 if (ret) 2811 return ret; 2812 2813 sort_keys = sort_order; 2814 if (sort_keys == NULL) { 2815 if (is_strict_order(field_order)) { 2816 /* 2817 * If user specified field order but no sort order, 2818 * we'll honor it and not add default sort orders. 2819 */ 2820 return 0; 2821 } 2822 2823 sort_keys = get_default_sort_order(evlist); 2824 } 2825 2826 str = strdup(sort_keys); 2827 if (str == NULL) { 2828 pr_err("Not enough memory to setup sort keys"); 2829 return -ENOMEM; 2830 } 2831 2832 /* 2833 * Prepend overhead fields for backward compatibility. 2834 */ 2835 if (!is_strict_order(field_order)) { 2836 str = setup_overhead(str); 2837 if (str == NULL) { 2838 pr_err("Not enough memory to setup overhead keys"); 2839 return -ENOMEM; 2840 } 2841 } 2842 2843 ret = setup_sort_list(&perf_hpp_list, str, evlist); 2844 2845 free(str); 2846 return ret; 2847 } 2848 2849 void perf_hpp__set_elide(int idx, bool elide) 2850 { 2851 struct perf_hpp_fmt *fmt; 2852 struct hpp_sort_entry *hse; 2853 2854 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2855 if (!perf_hpp__is_sort_entry(fmt)) 2856 continue; 2857 2858 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2859 if (hse->se->se_width_idx == idx) { 2860 fmt->elide = elide; 2861 break; 2862 } 2863 } 2864 } 2865 2866 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 2867 { 2868 if (list && strlist__nr_entries(list) == 1) { 2869 if (fp != NULL) 2870 fprintf(fp, "# %s: %s\n", list_name, 2871 strlist__entry(list, 0)->s); 2872 return true; 2873 } 2874 return false; 2875 } 2876 2877 static bool get_elide(int idx, FILE *output) 2878 { 2879 switch (idx) { 2880 case HISTC_SYMBOL: 2881 return __get_elide(symbol_conf.sym_list, "symbol", output); 2882 case HISTC_DSO: 2883 return __get_elide(symbol_conf.dso_list, "dso", output); 2884 case HISTC_COMM: 2885 return __get_elide(symbol_conf.comm_list, "comm", output); 2886 default: 2887 break; 2888 } 2889 2890 if (sort__mode != SORT_MODE__BRANCH) 2891 return false; 2892 2893 switch (idx) { 2894 case HISTC_SYMBOL_FROM: 2895 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 2896 case HISTC_SYMBOL_TO: 2897 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 2898 case HISTC_DSO_FROM: 2899 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 2900 case HISTC_DSO_TO: 2901 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 2902 default: 2903 break; 2904 } 2905 2906 return false; 2907 } 2908 2909 void sort__setup_elide(FILE *output) 2910 { 2911 struct perf_hpp_fmt *fmt; 2912 struct hpp_sort_entry *hse; 2913 2914 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2915 if (!perf_hpp__is_sort_entry(fmt)) 2916 continue; 2917 2918 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2919 fmt->elide = get_elide(hse->se->se_width_idx, output); 2920 } 2921 2922 /* 2923 * It makes no sense to elide all of sort entries. 2924 * Just revert them to show up again. 2925 */ 2926 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2927 if (!perf_hpp__is_sort_entry(fmt)) 2928 continue; 2929 2930 if (!fmt->elide) 2931 return; 2932 } 2933 2934 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2935 if (!perf_hpp__is_sort_entry(fmt)) 2936 continue; 2937 2938 fmt->elide = false; 2939 } 2940 } 2941 2942 int output_field_add(struct perf_hpp_list *list, char *tok) 2943 { 2944 unsigned int i; 2945 2946 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2947 struct sort_dimension *sd = &common_sort_dimensions[i]; 2948 2949 if (strncasecmp(tok, sd->name, strlen(tok))) 2950 continue; 2951 2952 return __sort_dimension__add_output(list, sd); 2953 } 2954 2955 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2956 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2957 2958 if (strncasecmp(tok, hd->name, strlen(tok))) 2959 continue; 2960 2961 return __hpp_dimension__add_output(list, hd); 2962 } 2963 2964 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2965 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2966 2967 if (strncasecmp(tok, sd->name, strlen(tok))) 2968 continue; 2969 2970 return __sort_dimension__add_output(list, sd); 2971 } 2972 2973 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2974 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2975 2976 if (strncasecmp(tok, sd->name, strlen(tok))) 2977 continue; 2978 2979 return __sort_dimension__add_output(list, sd); 2980 } 2981 2982 return -ESRCH; 2983 } 2984 2985 static int setup_output_list(struct perf_hpp_list *list, char *str) 2986 { 2987 char *tmp, *tok; 2988 int ret = 0; 2989 2990 for (tok = strtok_r(str, ", ", &tmp); 2991 tok; tok = strtok_r(NULL, ", ", &tmp)) { 2992 ret = output_field_add(list, tok); 2993 if (ret == -EINVAL) { 2994 ui__error("Invalid --fields key: `%s'", tok); 2995 break; 2996 } else if (ret == -ESRCH) { 2997 ui__error("Unknown --fields key: `%s'", tok); 2998 break; 2999 } 3000 } 3001 3002 return ret; 3003 } 3004 3005 void reset_dimensions(void) 3006 { 3007 unsigned int i; 3008 3009 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 3010 common_sort_dimensions[i].taken = 0; 3011 3012 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 3013 hpp_sort_dimensions[i].taken = 0; 3014 3015 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 3016 bstack_sort_dimensions[i].taken = 0; 3017 3018 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 3019 memory_sort_dimensions[i].taken = 0; 3020 } 3021 3022 bool is_strict_order(const char *order) 3023 { 3024 return order && (*order != '+'); 3025 } 3026 3027 static int __setup_output_field(void) 3028 { 3029 char *str, *strp; 3030 int ret = -EINVAL; 3031 3032 if (field_order == NULL) 3033 return 0; 3034 3035 strp = str = strdup(field_order); 3036 if (str == NULL) { 3037 pr_err("Not enough memory to setup output fields"); 3038 return -ENOMEM; 3039 } 3040 3041 if (!is_strict_order(field_order)) 3042 strp++; 3043 3044 if (!strlen(strp)) { 3045 pr_err("Invalid --fields key: `+'"); 3046 goto out; 3047 } 3048 3049 ret = setup_output_list(&perf_hpp_list, strp); 3050 3051 out: 3052 free(str); 3053 return ret; 3054 } 3055 3056 int setup_sorting(struct evlist *evlist) 3057 { 3058 int err; 3059 3060 err = __setup_sorting(evlist); 3061 if (err < 0) 3062 return err; 3063 3064 if (parent_pattern != default_parent_pattern) { 3065 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 3066 if (err < 0) 3067 return err; 3068 } 3069 3070 reset_dimensions(); 3071 3072 /* 3073 * perf diff doesn't use default hpp output fields. 3074 */ 3075 if (sort__mode != SORT_MODE__DIFF) 3076 perf_hpp__init(); 3077 3078 err = __setup_output_field(); 3079 if (err < 0) 3080 return err; 3081 3082 /* copy sort keys to output fields */ 3083 perf_hpp__setup_output_field(&perf_hpp_list); 3084 /* and then copy output fields to sort keys */ 3085 perf_hpp__append_sort_keys(&perf_hpp_list); 3086 3087 /* setup hists-specific output fields */ 3088 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 3089 return -1; 3090 3091 return 0; 3092 } 3093 3094 void reset_output_field(void) 3095 { 3096 perf_hpp_list.need_collapse = 0; 3097 perf_hpp_list.parent = 0; 3098 perf_hpp_list.sym = 0; 3099 perf_hpp_list.dso = 0; 3100 3101 field_order = NULL; 3102 sort_order = NULL; 3103 3104 reset_dimensions(); 3105 perf_hpp__reset_output_field(&perf_hpp_list); 3106 } 3107 3108 #define INDENT (3*8 + 1) 3109 3110 static void add_key(struct strbuf *sb, const char *str, int *llen) 3111 { 3112 if (*llen >= 75) { 3113 strbuf_addstr(sb, "\n\t\t\t "); 3114 *llen = INDENT; 3115 } 3116 strbuf_addf(sb, " %s", str); 3117 *llen += strlen(str) + 1; 3118 } 3119 3120 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n, 3121 int *llen) 3122 { 3123 int i; 3124 3125 for (i = 0; i < n; i++) 3126 add_key(sb, s[i].name, llen); 3127 } 3128 3129 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n, 3130 int *llen) 3131 { 3132 int i; 3133 3134 for (i = 0; i < n; i++) 3135 add_key(sb, s[i].name, llen); 3136 } 3137 3138 const char *sort_help(const char *prefix) 3139 { 3140 struct strbuf sb; 3141 char *s; 3142 int len = strlen(prefix) + INDENT; 3143 3144 strbuf_init(&sb, 300); 3145 strbuf_addstr(&sb, prefix); 3146 add_hpp_sort_string(&sb, hpp_sort_dimensions, 3147 ARRAY_SIZE(hpp_sort_dimensions), &len); 3148 add_sort_string(&sb, common_sort_dimensions, 3149 ARRAY_SIZE(common_sort_dimensions), &len); 3150 add_sort_string(&sb, bstack_sort_dimensions, 3151 ARRAY_SIZE(bstack_sort_dimensions), &len); 3152 add_sort_string(&sb, memory_sort_dimensions, 3153 ARRAY_SIZE(memory_sort_dimensions), &len); 3154 s = strbuf_detach(&sb, NULL); 3155 strbuf_release(&sb); 3156 return s; 3157 } 3158