1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <regex.h> 5 #include <stdlib.h> 6 #include <linux/mman.h> 7 #include <linux/time64.h> 8 #include "debug.h" 9 #include "dso.h" 10 #include "sort.h" 11 #include "hist.h" 12 #include "cacheline.h" 13 #include "comm.h" 14 #include "map.h" 15 #include "symbol.h" 16 #include "map_symbol.h" 17 #include "branch.h" 18 #include "thread.h" 19 #include "evsel.h" 20 #include "evlist.h" 21 #include "srcline.h" 22 #include "strlist.h" 23 #include "strbuf.h" 24 #include <traceevent/event-parse.h> 25 #include "mem-events.h" 26 #include "annotate.h" 27 #include "time-utils.h" 28 #include <linux/kernel.h> 29 #include <linux/string.h> 30 31 regex_t parent_regex; 32 const char default_parent_pattern[] = "^sys_|^do_page_fault"; 33 const char *parent_pattern = default_parent_pattern; 34 const char *default_sort_order = "comm,dso,symbol"; 35 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 36 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked"; 37 const char default_top_sort_order[] = "dso,symbol"; 38 const char default_diff_sort_order[] = "dso,symbol"; 39 const char default_tracepoint_sort_order[] = "trace"; 40 const char *sort_order; 41 const char *field_order; 42 regex_t ignore_callees_regex; 43 int have_ignore_callees = 0; 44 enum sort_mode sort__mode = SORT_MODE__NORMAL; 45 46 /* 47 * Replaces all occurrences of a char used with the: 48 * 49 * -t, --field-separator 50 * 51 * option, that uses a special separator character and don't pad with spaces, 52 * replacing all occurrences of this separator in symbol names (and other 53 * output) with a '.' character, that thus it's the only non valid separator. 54 */ 55 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 56 { 57 int n; 58 va_list ap; 59 60 va_start(ap, fmt); 61 n = vsnprintf(bf, size, fmt, ap); 62 if (symbol_conf.field_sep && n > 0) { 63 char *sep = bf; 64 65 while (1) { 66 sep = strchr(sep, *symbol_conf.field_sep); 67 if (sep == NULL) 68 break; 69 *sep = '.'; 70 } 71 } 72 va_end(ap); 73 74 if (n >= (int)size) 75 return size - 1; 76 return n; 77 } 78 79 static int64_t cmp_null(const void *l, const void *r) 80 { 81 if (!l && !r) 82 return 0; 83 else if (!l) 84 return -1; 85 else 86 return 1; 87 } 88 89 /* --sort pid */ 90 91 static int64_t 92 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 93 { 94 return right->thread->tid - left->thread->tid; 95 } 96 97 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 98 size_t size, unsigned int width) 99 { 100 const char *comm = thread__comm_str(he->thread); 101 102 width = max(7U, width) - 8; 103 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid, 104 width, width, comm ?: ""); 105 } 106 107 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 108 { 109 const struct thread *th = arg; 110 111 if (type != HIST_FILTER__THREAD) 112 return -1; 113 114 return th && he->thread != th; 115 } 116 117 struct sort_entry sort_thread = { 118 .se_header = " Pid:Command", 119 .se_cmp = sort__thread_cmp, 120 .se_snprintf = hist_entry__thread_snprintf, 121 .se_filter = hist_entry__thread_filter, 122 .se_width_idx = HISTC_THREAD, 123 }; 124 125 /* --sort comm */ 126 127 /* 128 * We can't use pointer comparison in functions below, 129 * because it gives different results based on pointer 130 * values, which could break some sorting assumptions. 131 */ 132 static int64_t 133 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 134 { 135 return strcmp(comm__str(right->comm), comm__str(left->comm)); 136 } 137 138 static int64_t 139 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 140 { 141 return strcmp(comm__str(right->comm), comm__str(left->comm)); 142 } 143 144 static int64_t 145 sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 146 { 147 return strcmp(comm__str(right->comm), comm__str(left->comm)); 148 } 149 150 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 151 size_t size, unsigned int width) 152 { 153 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 154 } 155 156 struct sort_entry sort_comm = { 157 .se_header = "Command", 158 .se_cmp = sort__comm_cmp, 159 .se_collapse = sort__comm_collapse, 160 .se_sort = sort__comm_sort, 161 .se_snprintf = hist_entry__comm_snprintf, 162 .se_filter = hist_entry__thread_filter, 163 .se_width_idx = HISTC_COMM, 164 }; 165 166 /* --sort dso */ 167 168 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 169 { 170 struct dso *dso_l = map_l ? map_l->dso : NULL; 171 struct dso *dso_r = map_r ? map_r->dso : NULL; 172 const char *dso_name_l, *dso_name_r; 173 174 if (!dso_l || !dso_r) 175 return cmp_null(dso_r, dso_l); 176 177 if (verbose > 0) { 178 dso_name_l = dso_l->long_name; 179 dso_name_r = dso_r->long_name; 180 } else { 181 dso_name_l = dso_l->short_name; 182 dso_name_r = dso_r->short_name; 183 } 184 185 return strcmp(dso_name_l, dso_name_r); 186 } 187 188 static int64_t 189 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 190 { 191 return _sort__dso_cmp(right->ms.map, left->ms.map); 192 } 193 194 static int _hist_entry__dso_snprintf(struct map *map, char *bf, 195 size_t size, unsigned int width) 196 { 197 if (map && map->dso) { 198 const char *dso_name = verbose > 0 ? map->dso->long_name : 199 map->dso->short_name; 200 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 201 } 202 203 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]"); 204 } 205 206 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 207 size_t size, unsigned int width) 208 { 209 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 210 } 211 212 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 213 { 214 const struct dso *dso = arg; 215 216 if (type != HIST_FILTER__DSO) 217 return -1; 218 219 return dso && (!he->ms.map || he->ms.map->dso != dso); 220 } 221 222 struct sort_entry sort_dso = { 223 .se_header = "Shared Object", 224 .se_cmp = sort__dso_cmp, 225 .se_snprintf = hist_entry__dso_snprintf, 226 .se_filter = hist_entry__dso_filter, 227 .se_width_idx = HISTC_DSO, 228 }; 229 230 /* --sort symbol */ 231 232 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 233 { 234 return (int64_t)(right_ip - left_ip); 235 } 236 237 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 238 { 239 if (!sym_l || !sym_r) 240 return cmp_null(sym_l, sym_r); 241 242 if (sym_l == sym_r) 243 return 0; 244 245 if (sym_l->inlined || sym_r->inlined) { 246 int ret = strcmp(sym_l->name, sym_r->name); 247 248 if (ret) 249 return ret; 250 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start)) 251 return 0; 252 } 253 254 if (sym_l->start != sym_r->start) 255 return (int64_t)(sym_r->start - sym_l->start); 256 257 return (int64_t)(sym_r->end - sym_l->end); 258 } 259 260 static int64_t 261 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 262 { 263 int64_t ret; 264 265 if (!left->ms.sym && !right->ms.sym) 266 return _sort__addr_cmp(left->ip, right->ip); 267 268 /* 269 * comparing symbol address alone is not enough since it's a 270 * relative address within a dso. 271 */ 272 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) { 273 ret = sort__dso_cmp(left, right); 274 if (ret != 0) 275 return ret; 276 } 277 278 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 279 } 280 281 static int64_t 282 sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 283 { 284 if (!left->ms.sym || !right->ms.sym) 285 return cmp_null(left->ms.sym, right->ms.sym); 286 287 return strcmp(right->ms.sym->name, left->ms.sym->name); 288 } 289 290 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, 291 u64 ip, char level, char *bf, size_t size, 292 unsigned int width) 293 { 294 size_t ret = 0; 295 296 if (verbose > 0) { 297 char o = map ? dso__symtab_origin(map->dso) : '!'; 298 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 299 BITS_PER_LONG / 4 + 2, ip, o); 300 } 301 302 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 303 if (sym && map) { 304 if (sym->type == STT_OBJECT) { 305 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 306 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 307 ip - map->unmap_ip(map, sym->start)); 308 } else { 309 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 310 width - ret, 311 sym->name); 312 if (sym->inlined) 313 ret += repsep_snprintf(bf + ret, size - ret, 314 " (inlined)"); 315 } 316 } else { 317 size_t len = BITS_PER_LONG / 4; 318 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 319 len, ip); 320 } 321 322 return ret; 323 } 324 325 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, 326 size_t size, unsigned int width) 327 { 328 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip, 329 he->level, bf, size, width); 330 } 331 332 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 333 { 334 const char *sym = arg; 335 336 if (type != HIST_FILTER__SYMBOL) 337 return -1; 338 339 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 340 } 341 342 struct sort_entry sort_sym = { 343 .se_header = "Symbol", 344 .se_cmp = sort__sym_cmp, 345 .se_sort = sort__sym_sort, 346 .se_snprintf = hist_entry__sym_snprintf, 347 .se_filter = hist_entry__sym_filter, 348 .se_width_idx = HISTC_SYMBOL, 349 }; 350 351 /* --sort srcline */ 352 353 char *hist_entry__srcline(struct hist_entry *he) 354 { 355 return map__srcline(he->ms.map, he->ip, he->ms.sym); 356 } 357 358 static int64_t 359 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 360 { 361 if (!left->srcline) 362 left->srcline = hist_entry__srcline(left); 363 if (!right->srcline) 364 right->srcline = hist_entry__srcline(right); 365 366 return strcmp(right->srcline, left->srcline); 367 } 368 369 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 370 size_t size, unsigned int width) 371 { 372 if (!he->srcline) 373 he->srcline = hist_entry__srcline(he); 374 375 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 376 } 377 378 struct sort_entry sort_srcline = { 379 .se_header = "Source:Line", 380 .se_cmp = sort__srcline_cmp, 381 .se_snprintf = hist_entry__srcline_snprintf, 382 .se_width_idx = HISTC_SRCLINE, 383 }; 384 385 /* --sort srcline_from */ 386 387 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams) 388 { 389 return map__srcline(ams->map, ams->al_addr, ams->sym); 390 } 391 392 static int64_t 393 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 394 { 395 if (!left->branch_info->srcline_from) 396 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from); 397 398 if (!right->branch_info->srcline_from) 399 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from); 400 401 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 402 } 403 404 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 405 size_t size, unsigned int width) 406 { 407 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 408 } 409 410 struct sort_entry sort_srcline_from = { 411 .se_header = "From Source:Line", 412 .se_cmp = sort__srcline_from_cmp, 413 .se_snprintf = hist_entry__srcline_from_snprintf, 414 .se_width_idx = HISTC_SRCLINE_FROM, 415 }; 416 417 /* --sort srcline_to */ 418 419 static int64_t 420 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 421 { 422 if (!left->branch_info->srcline_to) 423 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to); 424 425 if (!right->branch_info->srcline_to) 426 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to); 427 428 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 429 } 430 431 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 432 size_t size, unsigned int width) 433 { 434 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 435 } 436 437 struct sort_entry sort_srcline_to = { 438 .se_header = "To Source:Line", 439 .se_cmp = sort__srcline_to_cmp, 440 .se_snprintf = hist_entry__srcline_to_snprintf, 441 .se_width_idx = HISTC_SRCLINE_TO, 442 }; 443 444 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf, 445 size_t size, unsigned int width) 446 { 447 448 struct symbol *sym = he->ms.sym; 449 struct annotation *notes; 450 double ipc = 0.0, coverage = 0.0; 451 char tmp[64]; 452 453 if (!sym) 454 return repsep_snprintf(bf, size, "%-*s", width, "-"); 455 456 notes = symbol__annotation(sym); 457 458 if (notes->hit_cycles) 459 ipc = notes->hit_insn / ((double)notes->hit_cycles); 460 461 if (notes->total_insn) { 462 coverage = notes->cover_insn * 100.0 / 463 ((double)notes->total_insn); 464 } 465 466 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage); 467 return repsep_snprintf(bf, size, "%-*s", width, tmp); 468 } 469 470 struct sort_entry sort_sym_ipc = { 471 .se_header = "IPC [IPC Coverage]", 472 .se_cmp = sort__sym_cmp, 473 .se_snprintf = hist_entry__sym_ipc_snprintf, 474 .se_width_idx = HISTC_SYMBOL_IPC, 475 }; 476 477 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he 478 __maybe_unused, 479 char *bf, size_t size, 480 unsigned int width) 481 { 482 char tmp[64]; 483 484 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-"); 485 return repsep_snprintf(bf, size, "%-*s", width, tmp); 486 } 487 488 struct sort_entry sort_sym_ipc_null = { 489 .se_header = "IPC [IPC Coverage]", 490 .se_cmp = sort__sym_cmp, 491 .se_snprintf = hist_entry__sym_ipc_null_snprintf, 492 .se_width_idx = HISTC_SYMBOL_IPC, 493 }; 494 495 /* --sort srcfile */ 496 497 static char no_srcfile[1]; 498 499 static char *hist_entry__get_srcfile(struct hist_entry *e) 500 { 501 char *sf, *p; 502 struct map *map = e->ms.map; 503 504 if (!map) 505 return no_srcfile; 506 507 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip), 508 e->ms.sym, false, true, true, e->ip); 509 if (!strcmp(sf, SRCLINE_UNKNOWN)) 510 return no_srcfile; 511 p = strchr(sf, ':'); 512 if (p && *sf) { 513 *p = 0; 514 return sf; 515 } 516 free(sf); 517 return no_srcfile; 518 } 519 520 static int64_t 521 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 522 { 523 if (!left->srcfile) 524 left->srcfile = hist_entry__get_srcfile(left); 525 if (!right->srcfile) 526 right->srcfile = hist_entry__get_srcfile(right); 527 528 return strcmp(right->srcfile, left->srcfile); 529 } 530 531 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 532 size_t size, unsigned int width) 533 { 534 if (!he->srcfile) 535 he->srcfile = hist_entry__get_srcfile(he); 536 537 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 538 } 539 540 struct sort_entry sort_srcfile = { 541 .se_header = "Source File", 542 .se_cmp = sort__srcfile_cmp, 543 .se_snprintf = hist_entry__srcfile_snprintf, 544 .se_width_idx = HISTC_SRCFILE, 545 }; 546 547 /* --sort parent */ 548 549 static int64_t 550 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 551 { 552 struct symbol *sym_l = left->parent; 553 struct symbol *sym_r = right->parent; 554 555 if (!sym_l || !sym_r) 556 return cmp_null(sym_l, sym_r); 557 558 return strcmp(sym_r->name, sym_l->name); 559 } 560 561 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 562 size_t size, unsigned int width) 563 { 564 return repsep_snprintf(bf, size, "%-*.*s", width, width, 565 he->parent ? he->parent->name : "[other]"); 566 } 567 568 struct sort_entry sort_parent = { 569 .se_header = "Parent symbol", 570 .se_cmp = sort__parent_cmp, 571 .se_snprintf = hist_entry__parent_snprintf, 572 .se_width_idx = HISTC_PARENT, 573 }; 574 575 /* --sort cpu */ 576 577 static int64_t 578 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 579 { 580 return right->cpu - left->cpu; 581 } 582 583 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 584 size_t size, unsigned int width) 585 { 586 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 587 } 588 589 struct sort_entry sort_cpu = { 590 .se_header = "CPU", 591 .se_cmp = sort__cpu_cmp, 592 .se_snprintf = hist_entry__cpu_snprintf, 593 .se_width_idx = HISTC_CPU, 594 }; 595 596 /* --sort cgroup_id */ 597 598 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev) 599 { 600 return (int64_t)(right_dev - left_dev); 601 } 602 603 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino) 604 { 605 return (int64_t)(right_ino - left_ino); 606 } 607 608 static int64_t 609 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right) 610 { 611 int64_t ret; 612 613 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev); 614 if (ret != 0) 615 return ret; 616 617 return _sort__cgroup_inode_cmp(right->cgroup_id.ino, 618 left->cgroup_id.ino); 619 } 620 621 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he, 622 char *bf, size_t size, 623 unsigned int width __maybe_unused) 624 { 625 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev, 626 he->cgroup_id.ino); 627 } 628 629 struct sort_entry sort_cgroup_id = { 630 .se_header = "cgroup id (dev/inode)", 631 .se_cmp = sort__cgroup_id_cmp, 632 .se_snprintf = hist_entry__cgroup_id_snprintf, 633 .se_width_idx = HISTC_CGROUP_ID, 634 }; 635 636 /* --sort socket */ 637 638 static int64_t 639 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 640 { 641 return right->socket - left->socket; 642 } 643 644 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 645 size_t size, unsigned int width) 646 { 647 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 648 } 649 650 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 651 { 652 int sk = *(const int *)arg; 653 654 if (type != HIST_FILTER__SOCKET) 655 return -1; 656 657 return sk >= 0 && he->socket != sk; 658 } 659 660 struct sort_entry sort_socket = { 661 .se_header = "Socket", 662 .se_cmp = sort__socket_cmp, 663 .se_snprintf = hist_entry__socket_snprintf, 664 .se_filter = hist_entry__socket_filter, 665 .se_width_idx = HISTC_SOCKET, 666 }; 667 668 /* --sort time */ 669 670 static int64_t 671 sort__time_cmp(struct hist_entry *left, struct hist_entry *right) 672 { 673 return right->time - left->time; 674 } 675 676 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf, 677 size_t size, unsigned int width) 678 { 679 char he_time[32]; 680 681 if (symbol_conf.nanosecs) 682 timestamp__scnprintf_nsec(he->time, he_time, 683 sizeof(he_time)); 684 else 685 timestamp__scnprintf_usec(he->time, he_time, 686 sizeof(he_time)); 687 688 return repsep_snprintf(bf, size, "%-.*s", width, he_time); 689 } 690 691 struct sort_entry sort_time = { 692 .se_header = "Time", 693 .se_cmp = sort__time_cmp, 694 .se_snprintf = hist_entry__time_snprintf, 695 .se_width_idx = HISTC_TIME, 696 }; 697 698 /* --sort trace */ 699 700 static char *get_trace_output(struct hist_entry *he) 701 { 702 struct trace_seq seq; 703 struct evsel *evsel; 704 struct tep_record rec = { 705 .data = he->raw_data, 706 .size = he->raw_size, 707 }; 708 709 evsel = hists_to_evsel(he->hists); 710 711 trace_seq_init(&seq); 712 if (symbol_conf.raw_trace) { 713 tep_print_fields(&seq, he->raw_data, he->raw_size, 714 evsel->tp_format); 715 } else { 716 tep_print_event(evsel->tp_format->tep, 717 &seq, &rec, "%s", TEP_PRINT_INFO); 718 } 719 /* 720 * Trim the buffer, it starts at 4KB and we're not going to 721 * add anything more to this buffer. 722 */ 723 return realloc(seq.buffer, seq.len + 1); 724 } 725 726 static int64_t 727 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 728 { 729 struct evsel *evsel; 730 731 evsel = hists_to_evsel(left->hists); 732 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 733 return 0; 734 735 if (left->trace_output == NULL) 736 left->trace_output = get_trace_output(left); 737 if (right->trace_output == NULL) 738 right->trace_output = get_trace_output(right); 739 740 return strcmp(right->trace_output, left->trace_output); 741 } 742 743 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 744 size_t size, unsigned int width) 745 { 746 struct evsel *evsel; 747 748 evsel = hists_to_evsel(he->hists); 749 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 750 return scnprintf(bf, size, "%-.*s", width, "N/A"); 751 752 if (he->trace_output == NULL) 753 he->trace_output = get_trace_output(he); 754 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 755 } 756 757 struct sort_entry sort_trace = { 758 .se_header = "Trace output", 759 .se_cmp = sort__trace_cmp, 760 .se_snprintf = hist_entry__trace_snprintf, 761 .se_width_idx = HISTC_TRACE, 762 }; 763 764 /* sort keys for branch stacks */ 765 766 static int64_t 767 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 768 { 769 if (!left->branch_info || !right->branch_info) 770 return cmp_null(left->branch_info, right->branch_info); 771 772 return _sort__dso_cmp(left->branch_info->from.map, 773 right->branch_info->from.map); 774 } 775 776 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 777 size_t size, unsigned int width) 778 { 779 if (he->branch_info) 780 return _hist_entry__dso_snprintf(he->branch_info->from.map, 781 bf, size, width); 782 else 783 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 784 } 785 786 static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 787 const void *arg) 788 { 789 const struct dso *dso = arg; 790 791 if (type != HIST_FILTER__DSO) 792 return -1; 793 794 return dso && (!he->branch_info || !he->branch_info->from.map || 795 he->branch_info->from.map->dso != dso); 796 } 797 798 static int64_t 799 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 800 { 801 if (!left->branch_info || !right->branch_info) 802 return cmp_null(left->branch_info, right->branch_info); 803 804 return _sort__dso_cmp(left->branch_info->to.map, 805 right->branch_info->to.map); 806 } 807 808 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 809 size_t size, unsigned int width) 810 { 811 if (he->branch_info) 812 return _hist_entry__dso_snprintf(he->branch_info->to.map, 813 bf, size, width); 814 else 815 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 816 } 817 818 static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 819 const void *arg) 820 { 821 const struct dso *dso = arg; 822 823 if (type != HIST_FILTER__DSO) 824 return -1; 825 826 return dso && (!he->branch_info || !he->branch_info->to.map || 827 he->branch_info->to.map->dso != dso); 828 } 829 830 static int64_t 831 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 832 { 833 struct addr_map_symbol *from_l = &left->branch_info->from; 834 struct addr_map_symbol *from_r = &right->branch_info->from; 835 836 if (!left->branch_info || !right->branch_info) 837 return cmp_null(left->branch_info, right->branch_info); 838 839 from_l = &left->branch_info->from; 840 from_r = &right->branch_info->from; 841 842 if (!from_l->sym && !from_r->sym) 843 return _sort__addr_cmp(from_l->addr, from_r->addr); 844 845 return _sort__sym_cmp(from_l->sym, from_r->sym); 846 } 847 848 static int64_t 849 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 850 { 851 struct addr_map_symbol *to_l, *to_r; 852 853 if (!left->branch_info || !right->branch_info) 854 return cmp_null(left->branch_info, right->branch_info); 855 856 to_l = &left->branch_info->to; 857 to_r = &right->branch_info->to; 858 859 if (!to_l->sym && !to_r->sym) 860 return _sort__addr_cmp(to_l->addr, to_r->addr); 861 862 return _sort__sym_cmp(to_l->sym, to_r->sym); 863 } 864 865 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 866 size_t size, unsigned int width) 867 { 868 if (he->branch_info) { 869 struct addr_map_symbol *from = &he->branch_info->from; 870 871 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, 872 he->level, bf, size, width); 873 } 874 875 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 876 } 877 878 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 879 size_t size, unsigned int width) 880 { 881 if (he->branch_info) { 882 struct addr_map_symbol *to = &he->branch_info->to; 883 884 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, 885 he->level, bf, size, width); 886 } 887 888 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 889 } 890 891 static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 892 const void *arg) 893 { 894 const char *sym = arg; 895 896 if (type != HIST_FILTER__SYMBOL) 897 return -1; 898 899 return sym && !(he->branch_info && he->branch_info->from.sym && 900 strstr(he->branch_info->from.sym->name, sym)); 901 } 902 903 static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 904 const void *arg) 905 { 906 const char *sym = arg; 907 908 if (type != HIST_FILTER__SYMBOL) 909 return -1; 910 911 return sym && !(he->branch_info && he->branch_info->to.sym && 912 strstr(he->branch_info->to.sym->name, sym)); 913 } 914 915 struct sort_entry sort_dso_from = { 916 .se_header = "Source Shared Object", 917 .se_cmp = sort__dso_from_cmp, 918 .se_snprintf = hist_entry__dso_from_snprintf, 919 .se_filter = hist_entry__dso_from_filter, 920 .se_width_idx = HISTC_DSO_FROM, 921 }; 922 923 struct sort_entry sort_dso_to = { 924 .se_header = "Target Shared Object", 925 .se_cmp = sort__dso_to_cmp, 926 .se_snprintf = hist_entry__dso_to_snprintf, 927 .se_filter = hist_entry__dso_to_filter, 928 .se_width_idx = HISTC_DSO_TO, 929 }; 930 931 struct sort_entry sort_sym_from = { 932 .se_header = "Source Symbol", 933 .se_cmp = sort__sym_from_cmp, 934 .se_snprintf = hist_entry__sym_from_snprintf, 935 .se_filter = hist_entry__sym_from_filter, 936 .se_width_idx = HISTC_SYMBOL_FROM, 937 }; 938 939 struct sort_entry sort_sym_to = { 940 .se_header = "Target Symbol", 941 .se_cmp = sort__sym_to_cmp, 942 .se_snprintf = hist_entry__sym_to_snprintf, 943 .se_filter = hist_entry__sym_to_filter, 944 .se_width_idx = HISTC_SYMBOL_TO, 945 }; 946 947 static int64_t 948 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 949 { 950 unsigned char mp, p; 951 952 if (!left->branch_info || !right->branch_info) 953 return cmp_null(left->branch_info, right->branch_info); 954 955 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 956 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 957 return mp || p; 958 } 959 960 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 961 size_t size, unsigned int width){ 962 static const char *out = "N/A"; 963 964 if (he->branch_info) { 965 if (he->branch_info->flags.predicted) 966 out = "N"; 967 else if (he->branch_info->flags.mispred) 968 out = "Y"; 969 } 970 971 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 972 } 973 974 static int64_t 975 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 976 { 977 if (!left->branch_info || !right->branch_info) 978 return cmp_null(left->branch_info, right->branch_info); 979 980 return left->branch_info->flags.cycles - 981 right->branch_info->flags.cycles; 982 } 983 984 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 985 size_t size, unsigned int width) 986 { 987 if (!he->branch_info) 988 return scnprintf(bf, size, "%-.*s", width, "N/A"); 989 if (he->branch_info->flags.cycles == 0) 990 return repsep_snprintf(bf, size, "%-*s", width, "-"); 991 return repsep_snprintf(bf, size, "%-*hd", width, 992 he->branch_info->flags.cycles); 993 } 994 995 struct sort_entry sort_cycles = { 996 .se_header = "Basic Block Cycles", 997 .se_cmp = sort__cycles_cmp, 998 .se_snprintf = hist_entry__cycles_snprintf, 999 .se_width_idx = HISTC_CYCLES, 1000 }; 1001 1002 /* --sort daddr_sym */ 1003 int64_t 1004 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1005 { 1006 uint64_t l = 0, r = 0; 1007 1008 if (left->mem_info) 1009 l = left->mem_info->daddr.addr; 1010 if (right->mem_info) 1011 r = right->mem_info->daddr.addr; 1012 1013 return (int64_t)(r - l); 1014 } 1015 1016 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 1017 size_t size, unsigned int width) 1018 { 1019 uint64_t addr = 0; 1020 struct map *map = NULL; 1021 struct symbol *sym = NULL; 1022 1023 if (he->mem_info) { 1024 addr = he->mem_info->daddr.addr; 1025 map = he->mem_info->daddr.map; 1026 sym = he->mem_info->daddr.sym; 1027 } 1028 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 1029 width); 1030 } 1031 1032 int64_t 1033 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 1034 { 1035 uint64_t l = 0, r = 0; 1036 1037 if (left->mem_info) 1038 l = left->mem_info->iaddr.addr; 1039 if (right->mem_info) 1040 r = right->mem_info->iaddr.addr; 1041 1042 return (int64_t)(r - l); 1043 } 1044 1045 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 1046 size_t size, unsigned int width) 1047 { 1048 uint64_t addr = 0; 1049 struct map *map = NULL; 1050 struct symbol *sym = NULL; 1051 1052 if (he->mem_info) { 1053 addr = he->mem_info->iaddr.addr; 1054 map = he->mem_info->iaddr.map; 1055 sym = he->mem_info->iaddr.sym; 1056 } 1057 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 1058 width); 1059 } 1060 1061 static int64_t 1062 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1063 { 1064 struct map *map_l = NULL; 1065 struct map *map_r = NULL; 1066 1067 if (left->mem_info) 1068 map_l = left->mem_info->daddr.map; 1069 if (right->mem_info) 1070 map_r = right->mem_info->daddr.map; 1071 1072 return _sort__dso_cmp(map_l, map_r); 1073 } 1074 1075 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 1076 size_t size, unsigned int width) 1077 { 1078 struct map *map = NULL; 1079 1080 if (he->mem_info) 1081 map = he->mem_info->daddr.map; 1082 1083 return _hist_entry__dso_snprintf(map, bf, size, width); 1084 } 1085 1086 static int64_t 1087 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 1088 { 1089 union perf_mem_data_src data_src_l; 1090 union perf_mem_data_src data_src_r; 1091 1092 if (left->mem_info) 1093 data_src_l = left->mem_info->data_src; 1094 else 1095 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 1096 1097 if (right->mem_info) 1098 data_src_r = right->mem_info->data_src; 1099 else 1100 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 1101 1102 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 1103 } 1104 1105 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 1106 size_t size, unsigned int width) 1107 { 1108 char out[10]; 1109 1110 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 1111 return repsep_snprintf(bf, size, "%.*s", width, out); 1112 } 1113 1114 static int64_t 1115 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 1116 { 1117 union perf_mem_data_src data_src_l; 1118 union perf_mem_data_src data_src_r; 1119 1120 if (left->mem_info) 1121 data_src_l = left->mem_info->data_src; 1122 else 1123 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 1124 1125 if (right->mem_info) 1126 data_src_r = right->mem_info->data_src; 1127 else 1128 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 1129 1130 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 1131 } 1132 1133 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1134 size_t size, unsigned int width) 1135 { 1136 char out[64]; 1137 1138 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1139 return repsep_snprintf(bf, size, "%-*s", width, out); 1140 } 1141 1142 static int64_t 1143 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1144 { 1145 union perf_mem_data_src data_src_l; 1146 union perf_mem_data_src data_src_r; 1147 1148 if (left->mem_info) 1149 data_src_l = left->mem_info->data_src; 1150 else 1151 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1152 1153 if (right->mem_info) 1154 data_src_r = right->mem_info->data_src; 1155 else 1156 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1157 1158 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1159 } 1160 1161 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1162 size_t size, unsigned int width) 1163 { 1164 char out[64]; 1165 1166 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1167 return repsep_snprintf(bf, size, "%-*s", width, out); 1168 } 1169 1170 static int64_t 1171 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1172 { 1173 union perf_mem_data_src data_src_l; 1174 union perf_mem_data_src data_src_r; 1175 1176 if (left->mem_info) 1177 data_src_l = left->mem_info->data_src; 1178 else 1179 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1180 1181 if (right->mem_info) 1182 data_src_r = right->mem_info->data_src; 1183 else 1184 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1185 1186 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1187 } 1188 1189 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1190 size_t size, unsigned int width) 1191 { 1192 char out[64]; 1193 1194 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1195 return repsep_snprintf(bf, size, "%-*s", width, out); 1196 } 1197 1198 int64_t 1199 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1200 { 1201 u64 l, r; 1202 struct map *l_map, *r_map; 1203 1204 if (!left->mem_info) return -1; 1205 if (!right->mem_info) return 1; 1206 1207 /* group event types together */ 1208 if (left->cpumode > right->cpumode) return -1; 1209 if (left->cpumode < right->cpumode) return 1; 1210 1211 l_map = left->mem_info->daddr.map; 1212 r_map = right->mem_info->daddr.map; 1213 1214 /* if both are NULL, jump to sort on al_addr instead */ 1215 if (!l_map && !r_map) 1216 goto addr; 1217 1218 if (!l_map) return -1; 1219 if (!r_map) return 1; 1220 1221 if (l_map->maj > r_map->maj) return -1; 1222 if (l_map->maj < r_map->maj) return 1; 1223 1224 if (l_map->min > r_map->min) return -1; 1225 if (l_map->min < r_map->min) return 1; 1226 1227 if (l_map->ino > r_map->ino) return -1; 1228 if (l_map->ino < r_map->ino) return 1; 1229 1230 if (l_map->ino_generation > r_map->ino_generation) return -1; 1231 if (l_map->ino_generation < r_map->ino_generation) return 1; 1232 1233 /* 1234 * Addresses with no major/minor numbers are assumed to be 1235 * anonymous in userspace. Sort those on pid then address. 1236 * 1237 * The kernel and non-zero major/minor mapped areas are 1238 * assumed to be unity mapped. Sort those on address. 1239 */ 1240 1241 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1242 (!(l_map->flags & MAP_SHARED)) && 1243 !l_map->maj && !l_map->min && !l_map->ino && 1244 !l_map->ino_generation) { 1245 /* userspace anonymous */ 1246 1247 if (left->thread->pid_ > right->thread->pid_) return -1; 1248 if (left->thread->pid_ < right->thread->pid_) return 1; 1249 } 1250 1251 addr: 1252 /* al_addr does all the right addr - start + offset calculations */ 1253 l = cl_address(left->mem_info->daddr.al_addr); 1254 r = cl_address(right->mem_info->daddr.al_addr); 1255 1256 if (l > r) return -1; 1257 if (l < r) return 1; 1258 1259 return 0; 1260 } 1261 1262 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1263 size_t size, unsigned int width) 1264 { 1265 1266 uint64_t addr = 0; 1267 struct map *map = NULL; 1268 struct symbol *sym = NULL; 1269 char level = he->level; 1270 1271 if (he->mem_info) { 1272 addr = cl_address(he->mem_info->daddr.al_addr); 1273 map = he->mem_info->daddr.map; 1274 sym = he->mem_info->daddr.sym; 1275 1276 /* print [s] for shared data mmaps */ 1277 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1278 map && !(map->prot & PROT_EXEC) && 1279 (map->flags & MAP_SHARED) && 1280 (map->maj || map->min || map->ino || 1281 map->ino_generation)) 1282 level = 's'; 1283 else if (!map) 1284 level = 'X'; 1285 } 1286 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size, 1287 width); 1288 } 1289 1290 struct sort_entry sort_mispredict = { 1291 .se_header = "Branch Mispredicted", 1292 .se_cmp = sort__mispredict_cmp, 1293 .se_snprintf = hist_entry__mispredict_snprintf, 1294 .se_width_idx = HISTC_MISPREDICT, 1295 }; 1296 1297 static u64 he_weight(struct hist_entry *he) 1298 { 1299 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; 1300 } 1301 1302 static int64_t 1303 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1304 { 1305 return he_weight(left) - he_weight(right); 1306 } 1307 1308 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1309 size_t size, unsigned int width) 1310 { 1311 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he)); 1312 } 1313 1314 struct sort_entry sort_local_weight = { 1315 .se_header = "Local Weight", 1316 .se_cmp = sort__local_weight_cmp, 1317 .se_snprintf = hist_entry__local_weight_snprintf, 1318 .se_width_idx = HISTC_LOCAL_WEIGHT, 1319 }; 1320 1321 static int64_t 1322 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1323 { 1324 return left->stat.weight - right->stat.weight; 1325 } 1326 1327 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1328 size_t size, unsigned int width) 1329 { 1330 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight); 1331 } 1332 1333 struct sort_entry sort_global_weight = { 1334 .se_header = "Weight", 1335 .se_cmp = sort__global_weight_cmp, 1336 .se_snprintf = hist_entry__global_weight_snprintf, 1337 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1338 }; 1339 1340 struct sort_entry sort_mem_daddr_sym = { 1341 .se_header = "Data Symbol", 1342 .se_cmp = sort__daddr_cmp, 1343 .se_snprintf = hist_entry__daddr_snprintf, 1344 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1345 }; 1346 1347 struct sort_entry sort_mem_iaddr_sym = { 1348 .se_header = "Code Symbol", 1349 .se_cmp = sort__iaddr_cmp, 1350 .se_snprintf = hist_entry__iaddr_snprintf, 1351 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1352 }; 1353 1354 struct sort_entry sort_mem_daddr_dso = { 1355 .se_header = "Data Object", 1356 .se_cmp = sort__dso_daddr_cmp, 1357 .se_snprintf = hist_entry__dso_daddr_snprintf, 1358 .se_width_idx = HISTC_MEM_DADDR_DSO, 1359 }; 1360 1361 struct sort_entry sort_mem_locked = { 1362 .se_header = "Locked", 1363 .se_cmp = sort__locked_cmp, 1364 .se_snprintf = hist_entry__locked_snprintf, 1365 .se_width_idx = HISTC_MEM_LOCKED, 1366 }; 1367 1368 struct sort_entry sort_mem_tlb = { 1369 .se_header = "TLB access", 1370 .se_cmp = sort__tlb_cmp, 1371 .se_snprintf = hist_entry__tlb_snprintf, 1372 .se_width_idx = HISTC_MEM_TLB, 1373 }; 1374 1375 struct sort_entry sort_mem_lvl = { 1376 .se_header = "Memory access", 1377 .se_cmp = sort__lvl_cmp, 1378 .se_snprintf = hist_entry__lvl_snprintf, 1379 .se_width_idx = HISTC_MEM_LVL, 1380 }; 1381 1382 struct sort_entry sort_mem_snoop = { 1383 .se_header = "Snoop", 1384 .se_cmp = sort__snoop_cmp, 1385 .se_snprintf = hist_entry__snoop_snprintf, 1386 .se_width_idx = HISTC_MEM_SNOOP, 1387 }; 1388 1389 struct sort_entry sort_mem_dcacheline = { 1390 .se_header = "Data Cacheline", 1391 .se_cmp = sort__dcacheline_cmp, 1392 .se_snprintf = hist_entry__dcacheline_snprintf, 1393 .se_width_idx = HISTC_MEM_DCACHELINE, 1394 }; 1395 1396 static int64_t 1397 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1398 { 1399 uint64_t l = 0, r = 0; 1400 1401 if (left->mem_info) 1402 l = left->mem_info->daddr.phys_addr; 1403 if (right->mem_info) 1404 r = right->mem_info->daddr.phys_addr; 1405 1406 return (int64_t)(r - l); 1407 } 1408 1409 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf, 1410 size_t size, unsigned int width) 1411 { 1412 uint64_t addr = 0; 1413 size_t ret = 0; 1414 size_t len = BITS_PER_LONG / 4; 1415 1416 addr = he->mem_info->daddr.phys_addr; 1417 1418 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level); 1419 1420 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr); 1421 1422 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, ""); 1423 1424 if (ret > width) 1425 bf[width] = '\0'; 1426 1427 return width; 1428 } 1429 1430 struct sort_entry sort_mem_phys_daddr = { 1431 .se_header = "Data Physical Address", 1432 .se_cmp = sort__phys_daddr_cmp, 1433 .se_snprintf = hist_entry__phys_daddr_snprintf, 1434 .se_width_idx = HISTC_MEM_PHYS_DADDR, 1435 }; 1436 1437 static int64_t 1438 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1439 { 1440 if (!left->branch_info || !right->branch_info) 1441 return cmp_null(left->branch_info, right->branch_info); 1442 1443 return left->branch_info->flags.abort != 1444 right->branch_info->flags.abort; 1445 } 1446 1447 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1448 size_t size, unsigned int width) 1449 { 1450 static const char *out = "N/A"; 1451 1452 if (he->branch_info) { 1453 if (he->branch_info->flags.abort) 1454 out = "A"; 1455 else 1456 out = "."; 1457 } 1458 1459 return repsep_snprintf(bf, size, "%-*s", width, out); 1460 } 1461 1462 struct sort_entry sort_abort = { 1463 .se_header = "Transaction abort", 1464 .se_cmp = sort__abort_cmp, 1465 .se_snprintf = hist_entry__abort_snprintf, 1466 .se_width_idx = HISTC_ABORT, 1467 }; 1468 1469 static int64_t 1470 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1471 { 1472 if (!left->branch_info || !right->branch_info) 1473 return cmp_null(left->branch_info, right->branch_info); 1474 1475 return left->branch_info->flags.in_tx != 1476 right->branch_info->flags.in_tx; 1477 } 1478 1479 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1480 size_t size, unsigned int width) 1481 { 1482 static const char *out = "N/A"; 1483 1484 if (he->branch_info) { 1485 if (he->branch_info->flags.in_tx) 1486 out = "T"; 1487 else 1488 out = "."; 1489 } 1490 1491 return repsep_snprintf(bf, size, "%-*s", width, out); 1492 } 1493 1494 struct sort_entry sort_in_tx = { 1495 .se_header = "Branch in transaction", 1496 .se_cmp = sort__in_tx_cmp, 1497 .se_snprintf = hist_entry__in_tx_snprintf, 1498 .se_width_idx = HISTC_IN_TX, 1499 }; 1500 1501 static int64_t 1502 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1503 { 1504 return left->transaction - right->transaction; 1505 } 1506 1507 static inline char *add_str(char *p, const char *str) 1508 { 1509 strcpy(p, str); 1510 return p + strlen(str); 1511 } 1512 1513 static struct txbit { 1514 unsigned flag; 1515 const char *name; 1516 int skip_for_len; 1517 } txbits[] = { 1518 { PERF_TXN_ELISION, "EL ", 0 }, 1519 { PERF_TXN_TRANSACTION, "TX ", 1 }, 1520 { PERF_TXN_SYNC, "SYNC ", 1 }, 1521 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 1522 { PERF_TXN_RETRY, "RETRY ", 0 }, 1523 { PERF_TXN_CONFLICT, "CON ", 0 }, 1524 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 1525 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 1526 { 0, NULL, 0 } 1527 }; 1528 1529 int hist_entry__transaction_len(void) 1530 { 1531 int i; 1532 int len = 0; 1533 1534 for (i = 0; txbits[i].name; i++) { 1535 if (!txbits[i].skip_for_len) 1536 len += strlen(txbits[i].name); 1537 } 1538 len += 4; /* :XX<space> */ 1539 return len; 1540 } 1541 1542 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 1543 size_t size, unsigned int width) 1544 { 1545 u64 t = he->transaction; 1546 char buf[128]; 1547 char *p = buf; 1548 int i; 1549 1550 buf[0] = 0; 1551 for (i = 0; txbits[i].name; i++) 1552 if (txbits[i].flag & t) 1553 p = add_str(p, txbits[i].name); 1554 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 1555 p = add_str(p, "NEITHER "); 1556 if (t & PERF_TXN_ABORT_MASK) { 1557 sprintf(p, ":%" PRIx64, 1558 (t & PERF_TXN_ABORT_MASK) >> 1559 PERF_TXN_ABORT_SHIFT); 1560 p += strlen(p); 1561 } 1562 1563 return repsep_snprintf(bf, size, "%-*s", width, buf); 1564 } 1565 1566 struct sort_entry sort_transaction = { 1567 .se_header = "Transaction ", 1568 .se_cmp = sort__transaction_cmp, 1569 .se_snprintf = hist_entry__transaction_snprintf, 1570 .se_width_idx = HISTC_TRANSACTION, 1571 }; 1572 1573 /* --sort symbol_size */ 1574 1575 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r) 1576 { 1577 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0; 1578 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0; 1579 1580 return size_l < size_r ? -1 : 1581 size_l == size_r ? 0 : 1; 1582 } 1583 1584 static int64_t 1585 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right) 1586 { 1587 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym); 1588 } 1589 1590 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf, 1591 size_t bf_size, unsigned int width) 1592 { 1593 if (sym) 1594 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym)); 1595 1596 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1597 } 1598 1599 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf, 1600 size_t size, unsigned int width) 1601 { 1602 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width); 1603 } 1604 1605 struct sort_entry sort_sym_size = { 1606 .se_header = "Symbol size", 1607 .se_cmp = sort__sym_size_cmp, 1608 .se_snprintf = hist_entry__sym_size_snprintf, 1609 .se_width_idx = HISTC_SYM_SIZE, 1610 }; 1611 1612 /* --sort dso_size */ 1613 1614 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r) 1615 { 1616 int64_t size_l = map_l != NULL ? map__size(map_l) : 0; 1617 int64_t size_r = map_r != NULL ? map__size(map_r) : 0; 1618 1619 return size_l < size_r ? -1 : 1620 size_l == size_r ? 0 : 1; 1621 } 1622 1623 static int64_t 1624 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right) 1625 { 1626 return _sort__dso_size_cmp(right->ms.map, left->ms.map); 1627 } 1628 1629 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf, 1630 size_t bf_size, unsigned int width) 1631 { 1632 if (map && map->dso) 1633 return repsep_snprintf(bf, bf_size, "%*d", width, 1634 map__size(map)); 1635 1636 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1637 } 1638 1639 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf, 1640 size_t size, unsigned int width) 1641 { 1642 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width); 1643 } 1644 1645 struct sort_entry sort_dso_size = { 1646 .se_header = "DSO size", 1647 .se_cmp = sort__dso_size_cmp, 1648 .se_snprintf = hist_entry__dso_size_snprintf, 1649 .se_width_idx = HISTC_DSO_SIZE, 1650 }; 1651 1652 1653 struct sort_dimension { 1654 const char *name; 1655 struct sort_entry *entry; 1656 int taken; 1657 }; 1658 1659 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 1660 1661 static struct sort_dimension common_sort_dimensions[] = { 1662 DIM(SORT_PID, "pid", sort_thread), 1663 DIM(SORT_COMM, "comm", sort_comm), 1664 DIM(SORT_DSO, "dso", sort_dso), 1665 DIM(SORT_SYM, "symbol", sort_sym), 1666 DIM(SORT_PARENT, "parent", sort_parent), 1667 DIM(SORT_CPU, "cpu", sort_cpu), 1668 DIM(SORT_SOCKET, "socket", sort_socket), 1669 DIM(SORT_SRCLINE, "srcline", sort_srcline), 1670 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 1671 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 1672 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 1673 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 1674 DIM(SORT_TRACE, "trace", sort_trace), 1675 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 1676 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size), 1677 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 1678 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null), 1679 DIM(SORT_TIME, "time", sort_time), 1680 }; 1681 1682 #undef DIM 1683 1684 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 1685 1686 static struct sort_dimension bstack_sort_dimensions[] = { 1687 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 1688 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 1689 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 1690 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 1691 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 1692 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 1693 DIM(SORT_ABORT, "abort", sort_abort), 1694 DIM(SORT_CYCLES, "cycles", sort_cycles), 1695 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 1696 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 1697 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc), 1698 }; 1699 1700 #undef DIM 1701 1702 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 1703 1704 static struct sort_dimension memory_sort_dimensions[] = { 1705 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 1706 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 1707 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 1708 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 1709 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 1710 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 1711 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 1712 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 1713 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr), 1714 }; 1715 1716 #undef DIM 1717 1718 struct hpp_dimension { 1719 const char *name; 1720 struct perf_hpp_fmt *fmt; 1721 int taken; 1722 }; 1723 1724 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 1725 1726 static struct hpp_dimension hpp_sort_dimensions[] = { 1727 DIM(PERF_HPP__OVERHEAD, "overhead"), 1728 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 1729 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 1730 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 1731 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 1732 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 1733 DIM(PERF_HPP__SAMPLES, "sample"), 1734 DIM(PERF_HPP__PERIOD, "period"), 1735 }; 1736 1737 #undef DIM 1738 1739 struct hpp_sort_entry { 1740 struct perf_hpp_fmt hpp; 1741 struct sort_entry *se; 1742 }; 1743 1744 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 1745 { 1746 struct hpp_sort_entry *hse; 1747 1748 if (!perf_hpp__is_sort_entry(fmt)) 1749 return; 1750 1751 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1752 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 1753 } 1754 1755 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1756 struct hists *hists, int line __maybe_unused, 1757 int *span __maybe_unused) 1758 { 1759 struct hpp_sort_entry *hse; 1760 size_t len = fmt->user_len; 1761 1762 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1763 1764 if (!len) 1765 len = hists__col_len(hists, hse->se->se_width_idx); 1766 1767 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 1768 } 1769 1770 static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 1771 struct perf_hpp *hpp __maybe_unused, 1772 struct hists *hists) 1773 { 1774 struct hpp_sort_entry *hse; 1775 size_t len = fmt->user_len; 1776 1777 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1778 1779 if (!len) 1780 len = hists__col_len(hists, hse->se->se_width_idx); 1781 1782 return len; 1783 } 1784 1785 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1786 struct hist_entry *he) 1787 { 1788 struct hpp_sort_entry *hse; 1789 size_t len = fmt->user_len; 1790 1791 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1792 1793 if (!len) 1794 len = hists__col_len(he->hists, hse->se->se_width_idx); 1795 1796 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 1797 } 1798 1799 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 1800 struct hist_entry *a, struct hist_entry *b) 1801 { 1802 struct hpp_sort_entry *hse; 1803 1804 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1805 return hse->se->se_cmp(a, b); 1806 } 1807 1808 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 1809 struct hist_entry *a, struct hist_entry *b) 1810 { 1811 struct hpp_sort_entry *hse; 1812 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 1813 1814 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1815 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 1816 return collapse_fn(a, b); 1817 } 1818 1819 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 1820 struct hist_entry *a, struct hist_entry *b) 1821 { 1822 struct hpp_sort_entry *hse; 1823 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 1824 1825 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1826 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 1827 return sort_fn(a, b); 1828 } 1829 1830 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 1831 { 1832 return format->header == __sort__hpp_header; 1833 } 1834 1835 #define MK_SORT_ENTRY_CHK(key) \ 1836 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 1837 { \ 1838 struct hpp_sort_entry *hse; \ 1839 \ 1840 if (!perf_hpp__is_sort_entry(fmt)) \ 1841 return false; \ 1842 \ 1843 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 1844 return hse->se == &sort_ ## key ; \ 1845 } 1846 1847 MK_SORT_ENTRY_CHK(trace) 1848 MK_SORT_ENTRY_CHK(srcline) 1849 MK_SORT_ENTRY_CHK(srcfile) 1850 MK_SORT_ENTRY_CHK(thread) 1851 MK_SORT_ENTRY_CHK(comm) 1852 MK_SORT_ENTRY_CHK(dso) 1853 MK_SORT_ENTRY_CHK(sym) 1854 1855 1856 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 1857 { 1858 struct hpp_sort_entry *hse_a; 1859 struct hpp_sort_entry *hse_b; 1860 1861 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 1862 return false; 1863 1864 hse_a = container_of(a, struct hpp_sort_entry, hpp); 1865 hse_b = container_of(b, struct hpp_sort_entry, hpp); 1866 1867 return hse_a->se == hse_b->se; 1868 } 1869 1870 static void hse_free(struct perf_hpp_fmt *fmt) 1871 { 1872 struct hpp_sort_entry *hse; 1873 1874 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1875 free(hse); 1876 } 1877 1878 static struct hpp_sort_entry * 1879 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 1880 { 1881 struct hpp_sort_entry *hse; 1882 1883 hse = malloc(sizeof(*hse)); 1884 if (hse == NULL) { 1885 pr_err("Memory allocation failed\n"); 1886 return NULL; 1887 } 1888 1889 hse->se = sd->entry; 1890 hse->hpp.name = sd->entry->se_header; 1891 hse->hpp.header = __sort__hpp_header; 1892 hse->hpp.width = __sort__hpp_width; 1893 hse->hpp.entry = __sort__hpp_entry; 1894 hse->hpp.color = NULL; 1895 1896 hse->hpp.cmp = __sort__hpp_cmp; 1897 hse->hpp.collapse = __sort__hpp_collapse; 1898 hse->hpp.sort = __sort__hpp_sort; 1899 hse->hpp.equal = __sort__hpp_equal; 1900 hse->hpp.free = hse_free; 1901 1902 INIT_LIST_HEAD(&hse->hpp.list); 1903 INIT_LIST_HEAD(&hse->hpp.sort_list); 1904 hse->hpp.elide = false; 1905 hse->hpp.len = 0; 1906 hse->hpp.user_len = 0; 1907 hse->hpp.level = level; 1908 1909 return hse; 1910 } 1911 1912 static void hpp_free(struct perf_hpp_fmt *fmt) 1913 { 1914 free(fmt); 1915 } 1916 1917 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 1918 int level) 1919 { 1920 struct perf_hpp_fmt *fmt; 1921 1922 fmt = memdup(hd->fmt, sizeof(*fmt)); 1923 if (fmt) { 1924 INIT_LIST_HEAD(&fmt->list); 1925 INIT_LIST_HEAD(&fmt->sort_list); 1926 fmt->free = hpp_free; 1927 fmt->level = level; 1928 } 1929 1930 return fmt; 1931 } 1932 1933 int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 1934 { 1935 struct perf_hpp_fmt *fmt; 1936 struct hpp_sort_entry *hse; 1937 int ret = -1; 1938 int r; 1939 1940 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1941 if (!perf_hpp__is_sort_entry(fmt)) 1942 continue; 1943 1944 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1945 if (hse->se->se_filter == NULL) 1946 continue; 1947 1948 /* 1949 * hist entry is filtered if any of sort key in the hpp list 1950 * is applied. But it should skip non-matched filter types. 1951 */ 1952 r = hse->se->se_filter(he, type, arg); 1953 if (r >= 0) { 1954 if (ret < 0) 1955 ret = 0; 1956 ret |= r; 1957 } 1958 } 1959 1960 return ret; 1961 } 1962 1963 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 1964 struct perf_hpp_list *list, 1965 int level) 1966 { 1967 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 1968 1969 if (hse == NULL) 1970 return -1; 1971 1972 perf_hpp_list__register_sort_field(list, &hse->hpp); 1973 return 0; 1974 } 1975 1976 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 1977 struct perf_hpp_list *list) 1978 { 1979 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 1980 1981 if (hse == NULL) 1982 return -1; 1983 1984 perf_hpp_list__column_register(list, &hse->hpp); 1985 return 0; 1986 } 1987 1988 struct hpp_dynamic_entry { 1989 struct perf_hpp_fmt hpp; 1990 struct evsel *evsel; 1991 struct tep_format_field *field; 1992 unsigned dynamic_len; 1993 bool raw_trace; 1994 }; 1995 1996 static int hde_width(struct hpp_dynamic_entry *hde) 1997 { 1998 if (!hde->hpp.len) { 1999 int len = hde->dynamic_len; 2000 int namelen = strlen(hde->field->name); 2001 int fieldlen = hde->field->size; 2002 2003 if (namelen > len) 2004 len = namelen; 2005 2006 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) { 2007 /* length for print hex numbers */ 2008 fieldlen = hde->field->size * 2 + 2; 2009 } 2010 if (fieldlen > len) 2011 len = fieldlen; 2012 2013 hde->hpp.len = len; 2014 } 2015 return hde->hpp.len; 2016 } 2017 2018 static void update_dynamic_len(struct hpp_dynamic_entry *hde, 2019 struct hist_entry *he) 2020 { 2021 char *str, *pos; 2022 struct tep_format_field *field = hde->field; 2023 size_t namelen; 2024 bool last = false; 2025 2026 if (hde->raw_trace) 2027 return; 2028 2029 /* parse pretty print result and update max length */ 2030 if (!he->trace_output) 2031 he->trace_output = get_trace_output(he); 2032 2033 namelen = strlen(field->name); 2034 str = he->trace_output; 2035 2036 while (str) { 2037 pos = strchr(str, ' '); 2038 if (pos == NULL) { 2039 last = true; 2040 pos = str + strlen(str); 2041 } 2042 2043 if (!strncmp(str, field->name, namelen)) { 2044 size_t len; 2045 2046 str += namelen + 1; 2047 len = pos - str; 2048 2049 if (len > hde->dynamic_len) 2050 hde->dynamic_len = len; 2051 break; 2052 } 2053 2054 if (last) 2055 str = NULL; 2056 else 2057 str = pos + 1; 2058 } 2059 } 2060 2061 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2062 struct hists *hists __maybe_unused, 2063 int line __maybe_unused, 2064 int *span __maybe_unused) 2065 { 2066 struct hpp_dynamic_entry *hde; 2067 size_t len = fmt->user_len; 2068 2069 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2070 2071 if (!len) 2072 len = hde_width(hde); 2073 2074 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 2075 } 2076 2077 static int __sort__hde_width(struct perf_hpp_fmt *fmt, 2078 struct perf_hpp *hpp __maybe_unused, 2079 struct hists *hists __maybe_unused) 2080 { 2081 struct hpp_dynamic_entry *hde; 2082 size_t len = fmt->user_len; 2083 2084 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2085 2086 if (!len) 2087 len = hde_width(hde); 2088 2089 return len; 2090 } 2091 2092 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 2093 { 2094 struct hpp_dynamic_entry *hde; 2095 2096 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2097 2098 return hists_to_evsel(hists) == hde->evsel; 2099 } 2100 2101 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2102 struct hist_entry *he) 2103 { 2104 struct hpp_dynamic_entry *hde; 2105 size_t len = fmt->user_len; 2106 char *str, *pos; 2107 struct tep_format_field *field; 2108 size_t namelen; 2109 bool last = false; 2110 int ret; 2111 2112 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2113 2114 if (!len) 2115 len = hde_width(hde); 2116 2117 if (hde->raw_trace) 2118 goto raw_field; 2119 2120 if (!he->trace_output) 2121 he->trace_output = get_trace_output(he); 2122 2123 field = hde->field; 2124 namelen = strlen(field->name); 2125 str = he->trace_output; 2126 2127 while (str) { 2128 pos = strchr(str, ' '); 2129 if (pos == NULL) { 2130 last = true; 2131 pos = str + strlen(str); 2132 } 2133 2134 if (!strncmp(str, field->name, namelen)) { 2135 str += namelen + 1; 2136 str = strndup(str, pos - str); 2137 2138 if (str == NULL) 2139 return scnprintf(hpp->buf, hpp->size, 2140 "%*.*s", len, len, "ERROR"); 2141 break; 2142 } 2143 2144 if (last) 2145 str = NULL; 2146 else 2147 str = pos + 1; 2148 } 2149 2150 if (str == NULL) { 2151 struct trace_seq seq; 2152 raw_field: 2153 trace_seq_init(&seq); 2154 tep_print_field(&seq, he->raw_data, hde->field); 2155 str = seq.buffer; 2156 } 2157 2158 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 2159 free(str); 2160 return ret; 2161 } 2162 2163 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 2164 struct hist_entry *a, struct hist_entry *b) 2165 { 2166 struct hpp_dynamic_entry *hde; 2167 struct tep_format_field *field; 2168 unsigned offset, size; 2169 2170 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2171 2172 if (b == NULL) { 2173 update_dynamic_len(hde, a); 2174 return 0; 2175 } 2176 2177 field = hde->field; 2178 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2179 unsigned long long dyn; 2180 2181 tep_read_number_field(field, a->raw_data, &dyn); 2182 offset = dyn & 0xffff; 2183 size = (dyn >> 16) & 0xffff; 2184 2185 /* record max width for output */ 2186 if (size > hde->dynamic_len) 2187 hde->dynamic_len = size; 2188 } else { 2189 offset = field->offset; 2190 size = field->size; 2191 } 2192 2193 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 2194 } 2195 2196 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 2197 { 2198 return fmt->cmp == __sort__hde_cmp; 2199 } 2200 2201 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2202 { 2203 struct hpp_dynamic_entry *hde_a; 2204 struct hpp_dynamic_entry *hde_b; 2205 2206 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 2207 return false; 2208 2209 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 2210 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 2211 2212 return hde_a->field == hde_b->field; 2213 } 2214 2215 static void hde_free(struct perf_hpp_fmt *fmt) 2216 { 2217 struct hpp_dynamic_entry *hde; 2218 2219 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2220 free(hde); 2221 } 2222 2223 static struct hpp_dynamic_entry * 2224 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field, 2225 int level) 2226 { 2227 struct hpp_dynamic_entry *hde; 2228 2229 hde = malloc(sizeof(*hde)); 2230 if (hde == NULL) { 2231 pr_debug("Memory allocation failed\n"); 2232 return NULL; 2233 } 2234 2235 hde->evsel = evsel; 2236 hde->field = field; 2237 hde->dynamic_len = 0; 2238 2239 hde->hpp.name = field->name; 2240 hde->hpp.header = __sort__hde_header; 2241 hde->hpp.width = __sort__hde_width; 2242 hde->hpp.entry = __sort__hde_entry; 2243 hde->hpp.color = NULL; 2244 2245 hde->hpp.cmp = __sort__hde_cmp; 2246 hde->hpp.collapse = __sort__hde_cmp; 2247 hde->hpp.sort = __sort__hde_cmp; 2248 hde->hpp.equal = __sort__hde_equal; 2249 hde->hpp.free = hde_free; 2250 2251 INIT_LIST_HEAD(&hde->hpp.list); 2252 INIT_LIST_HEAD(&hde->hpp.sort_list); 2253 hde->hpp.elide = false; 2254 hde->hpp.len = 0; 2255 hde->hpp.user_len = 0; 2256 hde->hpp.level = level; 2257 2258 return hde; 2259 } 2260 2261 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 2262 { 2263 struct perf_hpp_fmt *new_fmt = NULL; 2264 2265 if (perf_hpp__is_sort_entry(fmt)) { 2266 struct hpp_sort_entry *hse, *new_hse; 2267 2268 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2269 new_hse = memdup(hse, sizeof(*hse)); 2270 if (new_hse) 2271 new_fmt = &new_hse->hpp; 2272 } else if (perf_hpp__is_dynamic_entry(fmt)) { 2273 struct hpp_dynamic_entry *hde, *new_hde; 2274 2275 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2276 new_hde = memdup(hde, sizeof(*hde)); 2277 if (new_hde) 2278 new_fmt = &new_hde->hpp; 2279 } else { 2280 new_fmt = memdup(fmt, sizeof(*fmt)); 2281 } 2282 2283 INIT_LIST_HEAD(&new_fmt->list); 2284 INIT_LIST_HEAD(&new_fmt->sort_list); 2285 2286 return new_fmt; 2287 } 2288 2289 static int parse_field_name(char *str, char **event, char **field, char **opt) 2290 { 2291 char *event_name, *field_name, *opt_name; 2292 2293 event_name = str; 2294 field_name = strchr(str, '.'); 2295 2296 if (field_name) { 2297 *field_name++ = '\0'; 2298 } else { 2299 event_name = NULL; 2300 field_name = str; 2301 } 2302 2303 opt_name = strchr(field_name, '/'); 2304 if (opt_name) 2305 *opt_name++ = '\0'; 2306 2307 *event = event_name; 2308 *field = field_name; 2309 *opt = opt_name; 2310 2311 return 0; 2312 } 2313 2314 /* find match evsel using a given event name. The event name can be: 2315 * 1. '%' + event index (e.g. '%1' for first event) 2316 * 2. full event name (e.g. sched:sched_switch) 2317 * 3. partial event name (should not contain ':') 2318 */ 2319 static struct evsel *find_evsel(struct evlist *evlist, char *event_name) 2320 { 2321 struct evsel *evsel = NULL; 2322 struct evsel *pos; 2323 bool full_name; 2324 2325 /* case 1 */ 2326 if (event_name[0] == '%') { 2327 int nr = strtol(event_name+1, NULL, 0); 2328 2329 if (nr > evlist->core.nr_entries) 2330 return NULL; 2331 2332 evsel = evlist__first(evlist); 2333 while (--nr > 0) 2334 evsel = perf_evsel__next(evsel); 2335 2336 return evsel; 2337 } 2338 2339 full_name = !!strchr(event_name, ':'); 2340 evlist__for_each_entry(evlist, pos) { 2341 /* case 2 */ 2342 if (full_name && !strcmp(pos->name, event_name)) 2343 return pos; 2344 /* case 3 */ 2345 if (!full_name && strstr(pos->name, event_name)) { 2346 if (evsel) { 2347 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 2348 event_name, evsel->name, pos->name); 2349 return NULL; 2350 } 2351 evsel = pos; 2352 } 2353 } 2354 2355 return evsel; 2356 } 2357 2358 static int __dynamic_dimension__add(struct evsel *evsel, 2359 struct tep_format_field *field, 2360 bool raw_trace, int level) 2361 { 2362 struct hpp_dynamic_entry *hde; 2363 2364 hde = __alloc_dynamic_entry(evsel, field, level); 2365 if (hde == NULL) 2366 return -ENOMEM; 2367 2368 hde->raw_trace = raw_trace; 2369 2370 perf_hpp__register_sort_field(&hde->hpp); 2371 return 0; 2372 } 2373 2374 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level) 2375 { 2376 int ret; 2377 struct tep_format_field *field; 2378 2379 field = evsel->tp_format->format.fields; 2380 while (field) { 2381 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2382 if (ret < 0) 2383 return ret; 2384 2385 field = field->next; 2386 } 2387 return 0; 2388 } 2389 2390 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace, 2391 int level) 2392 { 2393 int ret; 2394 struct evsel *evsel; 2395 2396 evlist__for_each_entry(evlist, evsel) { 2397 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 2398 continue; 2399 2400 ret = add_evsel_fields(evsel, raw_trace, level); 2401 if (ret < 0) 2402 return ret; 2403 } 2404 return 0; 2405 } 2406 2407 static int add_all_matching_fields(struct evlist *evlist, 2408 char *field_name, bool raw_trace, int level) 2409 { 2410 int ret = -ESRCH; 2411 struct evsel *evsel; 2412 struct tep_format_field *field; 2413 2414 evlist__for_each_entry(evlist, evsel) { 2415 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 2416 continue; 2417 2418 field = tep_find_any_field(evsel->tp_format, field_name); 2419 if (field == NULL) 2420 continue; 2421 2422 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2423 if (ret < 0) 2424 break; 2425 } 2426 return ret; 2427 } 2428 2429 static int add_dynamic_entry(struct evlist *evlist, const char *tok, 2430 int level) 2431 { 2432 char *str, *event_name, *field_name, *opt_name; 2433 struct evsel *evsel; 2434 struct tep_format_field *field; 2435 bool raw_trace = symbol_conf.raw_trace; 2436 int ret = 0; 2437 2438 if (evlist == NULL) 2439 return -ENOENT; 2440 2441 str = strdup(tok); 2442 if (str == NULL) 2443 return -ENOMEM; 2444 2445 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 2446 ret = -EINVAL; 2447 goto out; 2448 } 2449 2450 if (opt_name) { 2451 if (strcmp(opt_name, "raw")) { 2452 pr_debug("unsupported field option %s\n", opt_name); 2453 ret = -EINVAL; 2454 goto out; 2455 } 2456 raw_trace = true; 2457 } 2458 2459 if (!strcmp(field_name, "trace_fields")) { 2460 ret = add_all_dynamic_fields(evlist, raw_trace, level); 2461 goto out; 2462 } 2463 2464 if (event_name == NULL) { 2465 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 2466 goto out; 2467 } 2468 2469 evsel = find_evsel(evlist, event_name); 2470 if (evsel == NULL) { 2471 pr_debug("Cannot find event: %s\n", event_name); 2472 ret = -ENOENT; 2473 goto out; 2474 } 2475 2476 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2477 pr_debug("%s is not a tracepoint event\n", event_name); 2478 ret = -EINVAL; 2479 goto out; 2480 } 2481 2482 if (!strcmp(field_name, "*")) { 2483 ret = add_evsel_fields(evsel, raw_trace, level); 2484 } else { 2485 field = tep_find_any_field(evsel->tp_format, field_name); 2486 if (field == NULL) { 2487 pr_debug("Cannot find event field for %s.%s\n", 2488 event_name, field_name); 2489 return -ENOENT; 2490 } 2491 2492 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2493 } 2494 2495 out: 2496 free(str); 2497 return ret; 2498 } 2499 2500 static int __sort_dimension__add(struct sort_dimension *sd, 2501 struct perf_hpp_list *list, 2502 int level) 2503 { 2504 if (sd->taken) 2505 return 0; 2506 2507 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 2508 return -1; 2509 2510 if (sd->entry->se_collapse) 2511 list->need_collapse = 1; 2512 2513 sd->taken = 1; 2514 2515 return 0; 2516 } 2517 2518 static int __hpp_dimension__add(struct hpp_dimension *hd, 2519 struct perf_hpp_list *list, 2520 int level) 2521 { 2522 struct perf_hpp_fmt *fmt; 2523 2524 if (hd->taken) 2525 return 0; 2526 2527 fmt = __hpp_dimension__alloc_hpp(hd, level); 2528 if (!fmt) 2529 return -1; 2530 2531 hd->taken = 1; 2532 perf_hpp_list__register_sort_field(list, fmt); 2533 return 0; 2534 } 2535 2536 static int __sort_dimension__add_output(struct perf_hpp_list *list, 2537 struct sort_dimension *sd) 2538 { 2539 if (sd->taken) 2540 return 0; 2541 2542 if (__sort_dimension__add_hpp_output(sd, list) < 0) 2543 return -1; 2544 2545 sd->taken = 1; 2546 return 0; 2547 } 2548 2549 static int __hpp_dimension__add_output(struct perf_hpp_list *list, 2550 struct hpp_dimension *hd) 2551 { 2552 struct perf_hpp_fmt *fmt; 2553 2554 if (hd->taken) 2555 return 0; 2556 2557 fmt = __hpp_dimension__alloc_hpp(hd, 0); 2558 if (!fmt) 2559 return -1; 2560 2561 hd->taken = 1; 2562 perf_hpp_list__column_register(list, fmt); 2563 return 0; 2564 } 2565 2566 int hpp_dimension__add_output(unsigned col) 2567 { 2568 BUG_ON(col >= PERF_HPP__MAX_INDEX); 2569 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 2570 } 2571 2572 int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 2573 struct evlist *evlist, 2574 int level) 2575 { 2576 unsigned int i; 2577 2578 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2579 struct sort_dimension *sd = &common_sort_dimensions[i]; 2580 2581 if (strncasecmp(tok, sd->name, strlen(tok))) 2582 continue; 2583 2584 if (sd->entry == &sort_parent) { 2585 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 2586 if (ret) { 2587 char err[BUFSIZ]; 2588 2589 regerror(ret, &parent_regex, err, sizeof(err)); 2590 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 2591 return -EINVAL; 2592 } 2593 list->parent = 1; 2594 } else if (sd->entry == &sort_sym) { 2595 list->sym = 1; 2596 /* 2597 * perf diff displays the performance difference amongst 2598 * two or more perf.data files. Those files could come 2599 * from different binaries. So we should not compare 2600 * their ips, but the name of symbol. 2601 */ 2602 if (sort__mode == SORT_MODE__DIFF) 2603 sd->entry->se_collapse = sort__sym_sort; 2604 2605 } else if (sd->entry == &sort_dso) { 2606 list->dso = 1; 2607 } else if (sd->entry == &sort_socket) { 2608 list->socket = 1; 2609 } else if (sd->entry == &sort_thread) { 2610 list->thread = 1; 2611 } else if (sd->entry == &sort_comm) { 2612 list->comm = 1; 2613 } 2614 2615 return __sort_dimension__add(sd, list, level); 2616 } 2617 2618 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2619 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2620 2621 if (strncasecmp(tok, hd->name, strlen(tok))) 2622 continue; 2623 2624 return __hpp_dimension__add(hd, list, level); 2625 } 2626 2627 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2628 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2629 2630 if (strncasecmp(tok, sd->name, strlen(tok))) 2631 continue; 2632 2633 if (sort__mode != SORT_MODE__BRANCH) 2634 return -EINVAL; 2635 2636 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 2637 list->sym = 1; 2638 2639 __sort_dimension__add(sd, list, level); 2640 return 0; 2641 } 2642 2643 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2644 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2645 2646 if (strncasecmp(tok, sd->name, strlen(tok))) 2647 continue; 2648 2649 if (sort__mode != SORT_MODE__MEMORY) 2650 return -EINVAL; 2651 2652 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0) 2653 return -EINVAL; 2654 2655 if (sd->entry == &sort_mem_daddr_sym) 2656 list->sym = 1; 2657 2658 __sort_dimension__add(sd, list, level); 2659 return 0; 2660 } 2661 2662 if (!add_dynamic_entry(evlist, tok, level)) 2663 return 0; 2664 2665 return -ESRCH; 2666 } 2667 2668 static int setup_sort_list(struct perf_hpp_list *list, char *str, 2669 struct evlist *evlist) 2670 { 2671 char *tmp, *tok; 2672 int ret = 0; 2673 int level = 0; 2674 int next_level = 1; 2675 bool in_group = false; 2676 2677 do { 2678 tok = str; 2679 tmp = strpbrk(str, "{}, "); 2680 if (tmp) { 2681 if (in_group) 2682 next_level = level; 2683 else 2684 next_level = level + 1; 2685 2686 if (*tmp == '{') 2687 in_group = true; 2688 else if (*tmp == '}') 2689 in_group = false; 2690 2691 *tmp = '\0'; 2692 str = tmp + 1; 2693 } 2694 2695 if (*tok) { 2696 ret = sort_dimension__add(list, tok, evlist, level); 2697 if (ret == -EINVAL) { 2698 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok))) 2699 pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 2700 else 2701 pr_err("Invalid --sort key: `%s'", tok); 2702 break; 2703 } else if (ret == -ESRCH) { 2704 pr_err("Unknown --sort key: `%s'", tok); 2705 break; 2706 } 2707 } 2708 2709 level = next_level; 2710 } while (tmp); 2711 2712 return ret; 2713 } 2714 2715 static const char *get_default_sort_order(struct evlist *evlist) 2716 { 2717 const char *default_sort_orders[] = { 2718 default_sort_order, 2719 default_branch_sort_order, 2720 default_mem_sort_order, 2721 default_top_sort_order, 2722 default_diff_sort_order, 2723 default_tracepoint_sort_order, 2724 }; 2725 bool use_trace = true; 2726 struct evsel *evsel; 2727 2728 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 2729 2730 if (evlist == NULL || perf_evlist__empty(evlist)) 2731 goto out_no_evlist; 2732 2733 evlist__for_each_entry(evlist, evsel) { 2734 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2735 use_trace = false; 2736 break; 2737 } 2738 } 2739 2740 if (use_trace) { 2741 sort__mode = SORT_MODE__TRACEPOINT; 2742 if (symbol_conf.raw_trace) 2743 return "trace_fields"; 2744 } 2745 out_no_evlist: 2746 return default_sort_orders[sort__mode]; 2747 } 2748 2749 static int setup_sort_order(struct evlist *evlist) 2750 { 2751 char *new_sort_order; 2752 2753 /* 2754 * Append '+'-prefixed sort order to the default sort 2755 * order string. 2756 */ 2757 if (!sort_order || is_strict_order(sort_order)) 2758 return 0; 2759 2760 if (sort_order[1] == '\0') { 2761 pr_err("Invalid --sort key: `+'"); 2762 return -EINVAL; 2763 } 2764 2765 /* 2766 * We allocate new sort_order string, but we never free it, 2767 * because it's checked over the rest of the code. 2768 */ 2769 if (asprintf(&new_sort_order, "%s,%s", 2770 get_default_sort_order(evlist), sort_order + 1) < 0) { 2771 pr_err("Not enough memory to set up --sort"); 2772 return -ENOMEM; 2773 } 2774 2775 sort_order = new_sort_order; 2776 return 0; 2777 } 2778 2779 /* 2780 * Adds 'pre,' prefix into 'str' is 'pre' is 2781 * not already part of 'str'. 2782 */ 2783 static char *prefix_if_not_in(const char *pre, char *str) 2784 { 2785 char *n; 2786 2787 if (!str || strstr(str, pre)) 2788 return str; 2789 2790 if (asprintf(&n, "%s,%s", pre, str) < 0) 2791 return NULL; 2792 2793 free(str); 2794 return n; 2795 } 2796 2797 static char *setup_overhead(char *keys) 2798 { 2799 if (sort__mode == SORT_MODE__DIFF) 2800 return keys; 2801 2802 keys = prefix_if_not_in("overhead", keys); 2803 2804 if (symbol_conf.cumulate_callchain) 2805 keys = prefix_if_not_in("overhead_children", keys); 2806 2807 return keys; 2808 } 2809 2810 static int __setup_sorting(struct evlist *evlist) 2811 { 2812 char *str; 2813 const char *sort_keys; 2814 int ret = 0; 2815 2816 ret = setup_sort_order(evlist); 2817 if (ret) 2818 return ret; 2819 2820 sort_keys = sort_order; 2821 if (sort_keys == NULL) { 2822 if (is_strict_order(field_order)) { 2823 /* 2824 * If user specified field order but no sort order, 2825 * we'll honor it and not add default sort orders. 2826 */ 2827 return 0; 2828 } 2829 2830 sort_keys = get_default_sort_order(evlist); 2831 } 2832 2833 str = strdup(sort_keys); 2834 if (str == NULL) { 2835 pr_err("Not enough memory to setup sort keys"); 2836 return -ENOMEM; 2837 } 2838 2839 /* 2840 * Prepend overhead fields for backward compatibility. 2841 */ 2842 if (!is_strict_order(field_order)) { 2843 str = setup_overhead(str); 2844 if (str == NULL) { 2845 pr_err("Not enough memory to setup overhead keys"); 2846 return -ENOMEM; 2847 } 2848 } 2849 2850 ret = setup_sort_list(&perf_hpp_list, str, evlist); 2851 2852 free(str); 2853 return ret; 2854 } 2855 2856 void perf_hpp__set_elide(int idx, bool elide) 2857 { 2858 struct perf_hpp_fmt *fmt; 2859 struct hpp_sort_entry *hse; 2860 2861 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2862 if (!perf_hpp__is_sort_entry(fmt)) 2863 continue; 2864 2865 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2866 if (hse->se->se_width_idx == idx) { 2867 fmt->elide = elide; 2868 break; 2869 } 2870 } 2871 } 2872 2873 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 2874 { 2875 if (list && strlist__nr_entries(list) == 1) { 2876 if (fp != NULL) 2877 fprintf(fp, "# %s: %s\n", list_name, 2878 strlist__entry(list, 0)->s); 2879 return true; 2880 } 2881 return false; 2882 } 2883 2884 static bool get_elide(int idx, FILE *output) 2885 { 2886 switch (idx) { 2887 case HISTC_SYMBOL: 2888 return __get_elide(symbol_conf.sym_list, "symbol", output); 2889 case HISTC_DSO: 2890 return __get_elide(symbol_conf.dso_list, "dso", output); 2891 case HISTC_COMM: 2892 return __get_elide(symbol_conf.comm_list, "comm", output); 2893 default: 2894 break; 2895 } 2896 2897 if (sort__mode != SORT_MODE__BRANCH) 2898 return false; 2899 2900 switch (idx) { 2901 case HISTC_SYMBOL_FROM: 2902 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 2903 case HISTC_SYMBOL_TO: 2904 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 2905 case HISTC_DSO_FROM: 2906 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 2907 case HISTC_DSO_TO: 2908 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 2909 default: 2910 break; 2911 } 2912 2913 return false; 2914 } 2915 2916 void sort__setup_elide(FILE *output) 2917 { 2918 struct perf_hpp_fmt *fmt; 2919 struct hpp_sort_entry *hse; 2920 2921 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2922 if (!perf_hpp__is_sort_entry(fmt)) 2923 continue; 2924 2925 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2926 fmt->elide = get_elide(hse->se->se_width_idx, output); 2927 } 2928 2929 /* 2930 * It makes no sense to elide all of sort entries. 2931 * Just revert them to show up again. 2932 */ 2933 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2934 if (!perf_hpp__is_sort_entry(fmt)) 2935 continue; 2936 2937 if (!fmt->elide) 2938 return; 2939 } 2940 2941 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2942 if (!perf_hpp__is_sort_entry(fmt)) 2943 continue; 2944 2945 fmt->elide = false; 2946 } 2947 } 2948 2949 int output_field_add(struct perf_hpp_list *list, char *tok) 2950 { 2951 unsigned int i; 2952 2953 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2954 struct sort_dimension *sd = &common_sort_dimensions[i]; 2955 2956 if (strncasecmp(tok, sd->name, strlen(tok))) 2957 continue; 2958 2959 return __sort_dimension__add_output(list, sd); 2960 } 2961 2962 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2963 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2964 2965 if (strncasecmp(tok, hd->name, strlen(tok))) 2966 continue; 2967 2968 return __hpp_dimension__add_output(list, hd); 2969 } 2970 2971 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2972 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2973 2974 if (strncasecmp(tok, sd->name, strlen(tok))) 2975 continue; 2976 2977 return __sort_dimension__add_output(list, sd); 2978 } 2979 2980 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2981 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2982 2983 if (strncasecmp(tok, sd->name, strlen(tok))) 2984 continue; 2985 2986 return __sort_dimension__add_output(list, sd); 2987 } 2988 2989 return -ESRCH; 2990 } 2991 2992 static int setup_output_list(struct perf_hpp_list *list, char *str) 2993 { 2994 char *tmp, *tok; 2995 int ret = 0; 2996 2997 for (tok = strtok_r(str, ", ", &tmp); 2998 tok; tok = strtok_r(NULL, ", ", &tmp)) { 2999 ret = output_field_add(list, tok); 3000 if (ret == -EINVAL) { 3001 ui__error("Invalid --fields key: `%s'", tok); 3002 break; 3003 } else if (ret == -ESRCH) { 3004 ui__error("Unknown --fields key: `%s'", tok); 3005 break; 3006 } 3007 } 3008 3009 return ret; 3010 } 3011 3012 void reset_dimensions(void) 3013 { 3014 unsigned int i; 3015 3016 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 3017 common_sort_dimensions[i].taken = 0; 3018 3019 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 3020 hpp_sort_dimensions[i].taken = 0; 3021 3022 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 3023 bstack_sort_dimensions[i].taken = 0; 3024 3025 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 3026 memory_sort_dimensions[i].taken = 0; 3027 } 3028 3029 bool is_strict_order(const char *order) 3030 { 3031 return order && (*order != '+'); 3032 } 3033 3034 static int __setup_output_field(void) 3035 { 3036 char *str, *strp; 3037 int ret = -EINVAL; 3038 3039 if (field_order == NULL) 3040 return 0; 3041 3042 strp = str = strdup(field_order); 3043 if (str == NULL) { 3044 pr_err("Not enough memory to setup output fields"); 3045 return -ENOMEM; 3046 } 3047 3048 if (!is_strict_order(field_order)) 3049 strp++; 3050 3051 if (!strlen(strp)) { 3052 pr_err("Invalid --fields key: `+'"); 3053 goto out; 3054 } 3055 3056 ret = setup_output_list(&perf_hpp_list, strp); 3057 3058 out: 3059 free(str); 3060 return ret; 3061 } 3062 3063 int setup_sorting(struct evlist *evlist) 3064 { 3065 int err; 3066 3067 err = __setup_sorting(evlist); 3068 if (err < 0) 3069 return err; 3070 3071 if (parent_pattern != default_parent_pattern) { 3072 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 3073 if (err < 0) 3074 return err; 3075 } 3076 3077 reset_dimensions(); 3078 3079 /* 3080 * perf diff doesn't use default hpp output fields. 3081 */ 3082 if (sort__mode != SORT_MODE__DIFF) 3083 perf_hpp__init(); 3084 3085 err = __setup_output_field(); 3086 if (err < 0) 3087 return err; 3088 3089 /* copy sort keys to output fields */ 3090 perf_hpp__setup_output_field(&perf_hpp_list); 3091 /* and then copy output fields to sort keys */ 3092 perf_hpp__append_sort_keys(&perf_hpp_list); 3093 3094 /* setup hists-specific output fields */ 3095 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 3096 return -1; 3097 3098 return 0; 3099 } 3100 3101 void reset_output_field(void) 3102 { 3103 perf_hpp_list.need_collapse = 0; 3104 perf_hpp_list.parent = 0; 3105 perf_hpp_list.sym = 0; 3106 perf_hpp_list.dso = 0; 3107 3108 field_order = NULL; 3109 sort_order = NULL; 3110 3111 reset_dimensions(); 3112 perf_hpp__reset_output_field(&perf_hpp_list); 3113 } 3114 3115 #define INDENT (3*8 + 1) 3116 3117 static void add_key(struct strbuf *sb, const char *str, int *llen) 3118 { 3119 if (*llen >= 75) { 3120 strbuf_addstr(sb, "\n\t\t\t "); 3121 *llen = INDENT; 3122 } 3123 strbuf_addf(sb, " %s", str); 3124 *llen += strlen(str) + 1; 3125 } 3126 3127 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n, 3128 int *llen) 3129 { 3130 int i; 3131 3132 for (i = 0; i < n; i++) 3133 add_key(sb, s[i].name, llen); 3134 } 3135 3136 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n, 3137 int *llen) 3138 { 3139 int i; 3140 3141 for (i = 0; i < n; i++) 3142 add_key(sb, s[i].name, llen); 3143 } 3144 3145 const char *sort_help(const char *prefix) 3146 { 3147 struct strbuf sb; 3148 char *s; 3149 int len = strlen(prefix) + INDENT; 3150 3151 strbuf_init(&sb, 300); 3152 strbuf_addstr(&sb, prefix); 3153 add_hpp_sort_string(&sb, hpp_sort_dimensions, 3154 ARRAY_SIZE(hpp_sort_dimensions), &len); 3155 add_sort_string(&sb, common_sort_dimensions, 3156 ARRAY_SIZE(common_sort_dimensions), &len); 3157 add_sort_string(&sb, bstack_sort_dimensions, 3158 ARRAY_SIZE(bstack_sort_dimensions), &len); 3159 add_sort_string(&sb, memory_sort_dimensions, 3160 ARRAY_SIZE(memory_sort_dimensions), &len); 3161 s = strbuf_detach(&sb, NULL); 3162 strbuf_release(&sb); 3163 return s; 3164 } 3165