1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <regex.h> 5 #include <linux/mman.h> 6 #include <linux/time64.h> 7 #include "debug.h" 8 #include "sort.h" 9 #include "hist.h" 10 #include "cacheline.h" 11 #include "comm.h" 12 #include "map.h" 13 #include "symbol.h" 14 #include "thread.h" 15 #include "evsel.h" 16 #include "evlist.h" 17 #include "srcline.h" 18 #include "strlist.h" 19 #include "strbuf.h" 20 #include <traceevent/event-parse.h> 21 #include "mem-events.h" 22 #include "annotate.h" 23 #include "time-utils.h" 24 #include <linux/kernel.h> 25 #include <linux/string.h> 26 27 regex_t parent_regex; 28 const char default_parent_pattern[] = "^sys_|^do_page_fault"; 29 const char *parent_pattern = default_parent_pattern; 30 const char *default_sort_order = "comm,dso,symbol"; 31 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 32 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked"; 33 const char default_top_sort_order[] = "dso,symbol"; 34 const char default_diff_sort_order[] = "dso,symbol"; 35 const char default_tracepoint_sort_order[] = "trace"; 36 const char *sort_order; 37 const char *field_order; 38 regex_t ignore_callees_regex; 39 int have_ignore_callees = 0; 40 enum sort_mode sort__mode = SORT_MODE__NORMAL; 41 42 /* 43 * Replaces all occurrences of a char used with the: 44 * 45 * -t, --field-separator 46 * 47 * option, that uses a special separator character and don't pad with spaces, 48 * replacing all occurrences of this separator in symbol names (and other 49 * output) with a '.' character, that thus it's the only non valid separator. 50 */ 51 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 52 { 53 int n; 54 va_list ap; 55 56 va_start(ap, fmt); 57 n = vsnprintf(bf, size, fmt, ap); 58 if (symbol_conf.field_sep && n > 0) { 59 char *sep = bf; 60 61 while (1) { 62 sep = strchr(sep, *symbol_conf.field_sep); 63 if (sep == NULL) 64 break; 65 *sep = '.'; 66 } 67 } 68 va_end(ap); 69 70 if (n >= (int)size) 71 return size - 1; 72 return n; 73 } 74 75 static int64_t cmp_null(const void *l, const void *r) 76 { 77 if (!l && !r) 78 return 0; 79 else if (!l) 80 return -1; 81 else 82 return 1; 83 } 84 85 /* --sort pid */ 86 87 static int64_t 88 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 89 { 90 return right->thread->tid - left->thread->tid; 91 } 92 93 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 94 size_t size, unsigned int width) 95 { 96 const char *comm = thread__comm_str(he->thread); 97 98 width = max(7U, width) - 8; 99 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid, 100 width, width, comm ?: ""); 101 } 102 103 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 104 { 105 const struct thread *th = arg; 106 107 if (type != HIST_FILTER__THREAD) 108 return -1; 109 110 return th && he->thread != th; 111 } 112 113 struct sort_entry sort_thread = { 114 .se_header = " Pid:Command", 115 .se_cmp = sort__thread_cmp, 116 .se_snprintf = hist_entry__thread_snprintf, 117 .se_filter = hist_entry__thread_filter, 118 .se_width_idx = HISTC_THREAD, 119 }; 120 121 /* --sort comm */ 122 123 /* 124 * We can't use pointer comparison in functions below, 125 * because it gives different results based on pointer 126 * values, which could break some sorting assumptions. 127 */ 128 static int64_t 129 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 130 { 131 return strcmp(comm__str(right->comm), comm__str(left->comm)); 132 } 133 134 static int64_t 135 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 136 { 137 return strcmp(comm__str(right->comm), comm__str(left->comm)); 138 } 139 140 static int64_t 141 sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 142 { 143 return strcmp(comm__str(right->comm), comm__str(left->comm)); 144 } 145 146 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 147 size_t size, unsigned int width) 148 { 149 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 150 } 151 152 struct sort_entry sort_comm = { 153 .se_header = "Command", 154 .se_cmp = sort__comm_cmp, 155 .se_collapse = sort__comm_collapse, 156 .se_sort = sort__comm_sort, 157 .se_snprintf = hist_entry__comm_snprintf, 158 .se_filter = hist_entry__thread_filter, 159 .se_width_idx = HISTC_COMM, 160 }; 161 162 /* --sort dso */ 163 164 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 165 { 166 struct dso *dso_l = map_l ? map_l->dso : NULL; 167 struct dso *dso_r = map_r ? map_r->dso : NULL; 168 const char *dso_name_l, *dso_name_r; 169 170 if (!dso_l || !dso_r) 171 return cmp_null(dso_r, dso_l); 172 173 if (verbose > 0) { 174 dso_name_l = dso_l->long_name; 175 dso_name_r = dso_r->long_name; 176 } else { 177 dso_name_l = dso_l->short_name; 178 dso_name_r = dso_r->short_name; 179 } 180 181 return strcmp(dso_name_l, dso_name_r); 182 } 183 184 static int64_t 185 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 186 { 187 return _sort__dso_cmp(right->ms.map, left->ms.map); 188 } 189 190 static int _hist_entry__dso_snprintf(struct map *map, char *bf, 191 size_t size, unsigned int width) 192 { 193 if (map && map->dso) { 194 const char *dso_name = verbose > 0 ? map->dso->long_name : 195 map->dso->short_name; 196 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 197 } 198 199 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]"); 200 } 201 202 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 203 size_t size, unsigned int width) 204 { 205 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 206 } 207 208 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 209 { 210 const struct dso *dso = arg; 211 212 if (type != HIST_FILTER__DSO) 213 return -1; 214 215 return dso && (!he->ms.map || he->ms.map->dso != dso); 216 } 217 218 struct sort_entry sort_dso = { 219 .se_header = "Shared Object", 220 .se_cmp = sort__dso_cmp, 221 .se_snprintf = hist_entry__dso_snprintf, 222 .se_filter = hist_entry__dso_filter, 223 .se_width_idx = HISTC_DSO, 224 }; 225 226 /* --sort symbol */ 227 228 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 229 { 230 return (int64_t)(right_ip - left_ip); 231 } 232 233 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 234 { 235 if (!sym_l || !sym_r) 236 return cmp_null(sym_l, sym_r); 237 238 if (sym_l == sym_r) 239 return 0; 240 241 if (sym_l->inlined || sym_r->inlined) { 242 int ret = strcmp(sym_l->name, sym_r->name); 243 244 if (ret) 245 return ret; 246 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start)) 247 return 0; 248 } 249 250 if (sym_l->start != sym_r->start) 251 return (int64_t)(sym_r->start - sym_l->start); 252 253 return (int64_t)(sym_r->end - sym_l->end); 254 } 255 256 static int64_t 257 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 258 { 259 int64_t ret; 260 261 if (!left->ms.sym && !right->ms.sym) 262 return _sort__addr_cmp(left->ip, right->ip); 263 264 /* 265 * comparing symbol address alone is not enough since it's a 266 * relative address within a dso. 267 */ 268 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) { 269 ret = sort__dso_cmp(left, right); 270 if (ret != 0) 271 return ret; 272 } 273 274 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 275 } 276 277 static int64_t 278 sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 279 { 280 if (!left->ms.sym || !right->ms.sym) 281 return cmp_null(left->ms.sym, right->ms.sym); 282 283 return strcmp(right->ms.sym->name, left->ms.sym->name); 284 } 285 286 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, 287 u64 ip, char level, char *bf, size_t size, 288 unsigned int width) 289 { 290 size_t ret = 0; 291 292 if (verbose > 0) { 293 char o = map ? dso__symtab_origin(map->dso) : '!'; 294 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 295 BITS_PER_LONG / 4 + 2, ip, o); 296 } 297 298 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 299 if (sym && map) { 300 if (sym->type == STT_OBJECT) { 301 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 302 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 303 ip - map->unmap_ip(map, sym->start)); 304 } else { 305 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 306 width - ret, 307 sym->name); 308 if (sym->inlined) 309 ret += repsep_snprintf(bf + ret, size - ret, 310 " (inlined)"); 311 } 312 } else { 313 size_t len = BITS_PER_LONG / 4; 314 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 315 len, ip); 316 } 317 318 return ret; 319 } 320 321 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, 322 size_t size, unsigned int width) 323 { 324 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip, 325 he->level, bf, size, width); 326 } 327 328 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 329 { 330 const char *sym = arg; 331 332 if (type != HIST_FILTER__SYMBOL) 333 return -1; 334 335 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 336 } 337 338 struct sort_entry sort_sym = { 339 .se_header = "Symbol", 340 .se_cmp = sort__sym_cmp, 341 .se_sort = sort__sym_sort, 342 .se_snprintf = hist_entry__sym_snprintf, 343 .se_filter = hist_entry__sym_filter, 344 .se_width_idx = HISTC_SYMBOL, 345 }; 346 347 /* --sort srcline */ 348 349 char *hist_entry__srcline(struct hist_entry *he) 350 { 351 return map__srcline(he->ms.map, he->ip, he->ms.sym); 352 } 353 354 static int64_t 355 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 356 { 357 if (!left->srcline) 358 left->srcline = hist_entry__srcline(left); 359 if (!right->srcline) 360 right->srcline = hist_entry__srcline(right); 361 362 return strcmp(right->srcline, left->srcline); 363 } 364 365 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 366 size_t size, unsigned int width) 367 { 368 if (!he->srcline) 369 he->srcline = hist_entry__srcline(he); 370 371 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 372 } 373 374 struct sort_entry sort_srcline = { 375 .se_header = "Source:Line", 376 .se_cmp = sort__srcline_cmp, 377 .se_snprintf = hist_entry__srcline_snprintf, 378 .se_width_idx = HISTC_SRCLINE, 379 }; 380 381 /* --sort srcline_from */ 382 383 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams) 384 { 385 return map__srcline(ams->map, ams->al_addr, ams->sym); 386 } 387 388 static int64_t 389 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 390 { 391 if (!left->branch_info->srcline_from) 392 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from); 393 394 if (!right->branch_info->srcline_from) 395 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from); 396 397 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 398 } 399 400 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 401 size_t size, unsigned int width) 402 { 403 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 404 } 405 406 struct sort_entry sort_srcline_from = { 407 .se_header = "From Source:Line", 408 .se_cmp = sort__srcline_from_cmp, 409 .se_snprintf = hist_entry__srcline_from_snprintf, 410 .se_width_idx = HISTC_SRCLINE_FROM, 411 }; 412 413 /* --sort srcline_to */ 414 415 static int64_t 416 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 417 { 418 if (!left->branch_info->srcline_to) 419 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to); 420 421 if (!right->branch_info->srcline_to) 422 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to); 423 424 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 425 } 426 427 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 428 size_t size, unsigned int width) 429 { 430 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 431 } 432 433 struct sort_entry sort_srcline_to = { 434 .se_header = "To Source:Line", 435 .se_cmp = sort__srcline_to_cmp, 436 .se_snprintf = hist_entry__srcline_to_snprintf, 437 .se_width_idx = HISTC_SRCLINE_TO, 438 }; 439 440 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf, 441 size_t size, unsigned int width) 442 { 443 444 struct symbol *sym = he->ms.sym; 445 struct annotation *notes; 446 double ipc = 0.0, coverage = 0.0; 447 char tmp[64]; 448 449 if (!sym) 450 return repsep_snprintf(bf, size, "%-*s", width, "-"); 451 452 notes = symbol__annotation(sym); 453 454 if (notes->hit_cycles) 455 ipc = notes->hit_insn / ((double)notes->hit_cycles); 456 457 if (notes->total_insn) { 458 coverage = notes->cover_insn * 100.0 / 459 ((double)notes->total_insn); 460 } 461 462 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage); 463 return repsep_snprintf(bf, size, "%-*s", width, tmp); 464 } 465 466 struct sort_entry sort_sym_ipc = { 467 .se_header = "IPC [IPC Coverage]", 468 .se_cmp = sort__sym_cmp, 469 .se_snprintf = hist_entry__sym_ipc_snprintf, 470 .se_width_idx = HISTC_SYMBOL_IPC, 471 }; 472 473 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he 474 __maybe_unused, 475 char *bf, size_t size, 476 unsigned int width) 477 { 478 char tmp[64]; 479 480 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-"); 481 return repsep_snprintf(bf, size, "%-*s", width, tmp); 482 } 483 484 struct sort_entry sort_sym_ipc_null = { 485 .se_header = "IPC [IPC Coverage]", 486 .se_cmp = sort__sym_cmp, 487 .se_snprintf = hist_entry__sym_ipc_null_snprintf, 488 .se_width_idx = HISTC_SYMBOL_IPC, 489 }; 490 491 /* --sort srcfile */ 492 493 static char no_srcfile[1]; 494 495 static char *hist_entry__get_srcfile(struct hist_entry *e) 496 { 497 char *sf, *p; 498 struct map *map = e->ms.map; 499 500 if (!map) 501 return no_srcfile; 502 503 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip), 504 e->ms.sym, false, true, true, e->ip); 505 if (!strcmp(sf, SRCLINE_UNKNOWN)) 506 return no_srcfile; 507 p = strchr(sf, ':'); 508 if (p && *sf) { 509 *p = 0; 510 return sf; 511 } 512 free(sf); 513 return no_srcfile; 514 } 515 516 static int64_t 517 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 518 { 519 if (!left->srcfile) 520 left->srcfile = hist_entry__get_srcfile(left); 521 if (!right->srcfile) 522 right->srcfile = hist_entry__get_srcfile(right); 523 524 return strcmp(right->srcfile, left->srcfile); 525 } 526 527 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 528 size_t size, unsigned int width) 529 { 530 if (!he->srcfile) 531 he->srcfile = hist_entry__get_srcfile(he); 532 533 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 534 } 535 536 struct sort_entry sort_srcfile = { 537 .se_header = "Source File", 538 .se_cmp = sort__srcfile_cmp, 539 .se_snprintf = hist_entry__srcfile_snprintf, 540 .se_width_idx = HISTC_SRCFILE, 541 }; 542 543 /* --sort parent */ 544 545 static int64_t 546 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 547 { 548 struct symbol *sym_l = left->parent; 549 struct symbol *sym_r = right->parent; 550 551 if (!sym_l || !sym_r) 552 return cmp_null(sym_l, sym_r); 553 554 return strcmp(sym_r->name, sym_l->name); 555 } 556 557 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 558 size_t size, unsigned int width) 559 { 560 return repsep_snprintf(bf, size, "%-*.*s", width, width, 561 he->parent ? he->parent->name : "[other]"); 562 } 563 564 struct sort_entry sort_parent = { 565 .se_header = "Parent symbol", 566 .se_cmp = sort__parent_cmp, 567 .se_snprintf = hist_entry__parent_snprintf, 568 .se_width_idx = HISTC_PARENT, 569 }; 570 571 /* --sort cpu */ 572 573 static int64_t 574 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 575 { 576 return right->cpu - left->cpu; 577 } 578 579 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 580 size_t size, unsigned int width) 581 { 582 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 583 } 584 585 struct sort_entry sort_cpu = { 586 .se_header = "CPU", 587 .se_cmp = sort__cpu_cmp, 588 .se_snprintf = hist_entry__cpu_snprintf, 589 .se_width_idx = HISTC_CPU, 590 }; 591 592 /* --sort cgroup_id */ 593 594 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev) 595 { 596 return (int64_t)(right_dev - left_dev); 597 } 598 599 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino) 600 { 601 return (int64_t)(right_ino - left_ino); 602 } 603 604 static int64_t 605 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right) 606 { 607 int64_t ret; 608 609 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev); 610 if (ret != 0) 611 return ret; 612 613 return _sort__cgroup_inode_cmp(right->cgroup_id.ino, 614 left->cgroup_id.ino); 615 } 616 617 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he, 618 char *bf, size_t size, 619 unsigned int width __maybe_unused) 620 { 621 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev, 622 he->cgroup_id.ino); 623 } 624 625 struct sort_entry sort_cgroup_id = { 626 .se_header = "cgroup id (dev/inode)", 627 .se_cmp = sort__cgroup_id_cmp, 628 .se_snprintf = hist_entry__cgroup_id_snprintf, 629 .se_width_idx = HISTC_CGROUP_ID, 630 }; 631 632 /* --sort socket */ 633 634 static int64_t 635 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 636 { 637 return right->socket - left->socket; 638 } 639 640 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 641 size_t size, unsigned int width) 642 { 643 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 644 } 645 646 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 647 { 648 int sk = *(const int *)arg; 649 650 if (type != HIST_FILTER__SOCKET) 651 return -1; 652 653 return sk >= 0 && he->socket != sk; 654 } 655 656 struct sort_entry sort_socket = { 657 .se_header = "Socket", 658 .se_cmp = sort__socket_cmp, 659 .se_snprintf = hist_entry__socket_snprintf, 660 .se_filter = hist_entry__socket_filter, 661 .se_width_idx = HISTC_SOCKET, 662 }; 663 664 /* --sort time */ 665 666 static int64_t 667 sort__time_cmp(struct hist_entry *left, struct hist_entry *right) 668 { 669 return right->time - left->time; 670 } 671 672 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf, 673 size_t size, unsigned int width) 674 { 675 char he_time[32]; 676 677 if (symbol_conf.nanosecs) 678 timestamp__scnprintf_nsec(he->time, he_time, 679 sizeof(he_time)); 680 else 681 timestamp__scnprintf_usec(he->time, he_time, 682 sizeof(he_time)); 683 684 return repsep_snprintf(bf, size, "%-.*s", width, he_time); 685 } 686 687 struct sort_entry sort_time = { 688 .se_header = "Time", 689 .se_cmp = sort__time_cmp, 690 .se_snprintf = hist_entry__time_snprintf, 691 .se_width_idx = HISTC_TIME, 692 }; 693 694 /* --sort trace */ 695 696 static char *get_trace_output(struct hist_entry *he) 697 { 698 struct trace_seq seq; 699 struct evsel *evsel; 700 struct tep_record rec = { 701 .data = he->raw_data, 702 .size = he->raw_size, 703 }; 704 705 evsel = hists_to_evsel(he->hists); 706 707 trace_seq_init(&seq); 708 if (symbol_conf.raw_trace) { 709 tep_print_fields(&seq, he->raw_data, he->raw_size, 710 evsel->tp_format); 711 } else { 712 tep_print_event(evsel->tp_format->tep, 713 &seq, &rec, "%s", TEP_PRINT_INFO); 714 } 715 /* 716 * Trim the buffer, it starts at 4KB and we're not going to 717 * add anything more to this buffer. 718 */ 719 return realloc(seq.buffer, seq.len + 1); 720 } 721 722 static int64_t 723 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 724 { 725 struct evsel *evsel; 726 727 evsel = hists_to_evsel(left->hists); 728 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 729 return 0; 730 731 if (left->trace_output == NULL) 732 left->trace_output = get_trace_output(left); 733 if (right->trace_output == NULL) 734 right->trace_output = get_trace_output(right); 735 736 return strcmp(right->trace_output, left->trace_output); 737 } 738 739 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 740 size_t size, unsigned int width) 741 { 742 struct evsel *evsel; 743 744 evsel = hists_to_evsel(he->hists); 745 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 746 return scnprintf(bf, size, "%-.*s", width, "N/A"); 747 748 if (he->trace_output == NULL) 749 he->trace_output = get_trace_output(he); 750 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 751 } 752 753 struct sort_entry sort_trace = { 754 .se_header = "Trace output", 755 .se_cmp = sort__trace_cmp, 756 .se_snprintf = hist_entry__trace_snprintf, 757 .se_width_idx = HISTC_TRACE, 758 }; 759 760 /* sort keys for branch stacks */ 761 762 static int64_t 763 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 764 { 765 if (!left->branch_info || !right->branch_info) 766 return cmp_null(left->branch_info, right->branch_info); 767 768 return _sort__dso_cmp(left->branch_info->from.map, 769 right->branch_info->from.map); 770 } 771 772 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 773 size_t size, unsigned int width) 774 { 775 if (he->branch_info) 776 return _hist_entry__dso_snprintf(he->branch_info->from.map, 777 bf, size, width); 778 else 779 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 780 } 781 782 static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 783 const void *arg) 784 { 785 const struct dso *dso = arg; 786 787 if (type != HIST_FILTER__DSO) 788 return -1; 789 790 return dso && (!he->branch_info || !he->branch_info->from.map || 791 he->branch_info->from.map->dso != dso); 792 } 793 794 static int64_t 795 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 796 { 797 if (!left->branch_info || !right->branch_info) 798 return cmp_null(left->branch_info, right->branch_info); 799 800 return _sort__dso_cmp(left->branch_info->to.map, 801 right->branch_info->to.map); 802 } 803 804 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 805 size_t size, unsigned int width) 806 { 807 if (he->branch_info) 808 return _hist_entry__dso_snprintf(he->branch_info->to.map, 809 bf, size, width); 810 else 811 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 812 } 813 814 static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 815 const void *arg) 816 { 817 const struct dso *dso = arg; 818 819 if (type != HIST_FILTER__DSO) 820 return -1; 821 822 return dso && (!he->branch_info || !he->branch_info->to.map || 823 he->branch_info->to.map->dso != dso); 824 } 825 826 static int64_t 827 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 828 { 829 struct addr_map_symbol *from_l = &left->branch_info->from; 830 struct addr_map_symbol *from_r = &right->branch_info->from; 831 832 if (!left->branch_info || !right->branch_info) 833 return cmp_null(left->branch_info, right->branch_info); 834 835 from_l = &left->branch_info->from; 836 from_r = &right->branch_info->from; 837 838 if (!from_l->sym && !from_r->sym) 839 return _sort__addr_cmp(from_l->addr, from_r->addr); 840 841 return _sort__sym_cmp(from_l->sym, from_r->sym); 842 } 843 844 static int64_t 845 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 846 { 847 struct addr_map_symbol *to_l, *to_r; 848 849 if (!left->branch_info || !right->branch_info) 850 return cmp_null(left->branch_info, right->branch_info); 851 852 to_l = &left->branch_info->to; 853 to_r = &right->branch_info->to; 854 855 if (!to_l->sym && !to_r->sym) 856 return _sort__addr_cmp(to_l->addr, to_r->addr); 857 858 return _sort__sym_cmp(to_l->sym, to_r->sym); 859 } 860 861 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 862 size_t size, unsigned int width) 863 { 864 if (he->branch_info) { 865 struct addr_map_symbol *from = &he->branch_info->from; 866 867 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, 868 he->level, bf, size, width); 869 } 870 871 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 872 } 873 874 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 875 size_t size, unsigned int width) 876 { 877 if (he->branch_info) { 878 struct addr_map_symbol *to = &he->branch_info->to; 879 880 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, 881 he->level, bf, size, width); 882 } 883 884 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 885 } 886 887 static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 888 const void *arg) 889 { 890 const char *sym = arg; 891 892 if (type != HIST_FILTER__SYMBOL) 893 return -1; 894 895 return sym && !(he->branch_info && he->branch_info->from.sym && 896 strstr(he->branch_info->from.sym->name, sym)); 897 } 898 899 static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 900 const void *arg) 901 { 902 const char *sym = arg; 903 904 if (type != HIST_FILTER__SYMBOL) 905 return -1; 906 907 return sym && !(he->branch_info && he->branch_info->to.sym && 908 strstr(he->branch_info->to.sym->name, sym)); 909 } 910 911 struct sort_entry sort_dso_from = { 912 .se_header = "Source Shared Object", 913 .se_cmp = sort__dso_from_cmp, 914 .se_snprintf = hist_entry__dso_from_snprintf, 915 .se_filter = hist_entry__dso_from_filter, 916 .se_width_idx = HISTC_DSO_FROM, 917 }; 918 919 struct sort_entry sort_dso_to = { 920 .se_header = "Target Shared Object", 921 .se_cmp = sort__dso_to_cmp, 922 .se_snprintf = hist_entry__dso_to_snprintf, 923 .se_filter = hist_entry__dso_to_filter, 924 .se_width_idx = HISTC_DSO_TO, 925 }; 926 927 struct sort_entry sort_sym_from = { 928 .se_header = "Source Symbol", 929 .se_cmp = sort__sym_from_cmp, 930 .se_snprintf = hist_entry__sym_from_snprintf, 931 .se_filter = hist_entry__sym_from_filter, 932 .se_width_idx = HISTC_SYMBOL_FROM, 933 }; 934 935 struct sort_entry sort_sym_to = { 936 .se_header = "Target Symbol", 937 .se_cmp = sort__sym_to_cmp, 938 .se_snprintf = hist_entry__sym_to_snprintf, 939 .se_filter = hist_entry__sym_to_filter, 940 .se_width_idx = HISTC_SYMBOL_TO, 941 }; 942 943 static int64_t 944 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 945 { 946 unsigned char mp, p; 947 948 if (!left->branch_info || !right->branch_info) 949 return cmp_null(left->branch_info, right->branch_info); 950 951 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 952 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 953 return mp || p; 954 } 955 956 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 957 size_t size, unsigned int width){ 958 static const char *out = "N/A"; 959 960 if (he->branch_info) { 961 if (he->branch_info->flags.predicted) 962 out = "N"; 963 else if (he->branch_info->flags.mispred) 964 out = "Y"; 965 } 966 967 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 968 } 969 970 static int64_t 971 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 972 { 973 if (!left->branch_info || !right->branch_info) 974 return cmp_null(left->branch_info, right->branch_info); 975 976 return left->branch_info->flags.cycles - 977 right->branch_info->flags.cycles; 978 } 979 980 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 981 size_t size, unsigned int width) 982 { 983 if (!he->branch_info) 984 return scnprintf(bf, size, "%-.*s", width, "N/A"); 985 if (he->branch_info->flags.cycles == 0) 986 return repsep_snprintf(bf, size, "%-*s", width, "-"); 987 return repsep_snprintf(bf, size, "%-*hd", width, 988 he->branch_info->flags.cycles); 989 } 990 991 struct sort_entry sort_cycles = { 992 .se_header = "Basic Block Cycles", 993 .se_cmp = sort__cycles_cmp, 994 .se_snprintf = hist_entry__cycles_snprintf, 995 .se_width_idx = HISTC_CYCLES, 996 }; 997 998 /* --sort daddr_sym */ 999 int64_t 1000 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1001 { 1002 uint64_t l = 0, r = 0; 1003 1004 if (left->mem_info) 1005 l = left->mem_info->daddr.addr; 1006 if (right->mem_info) 1007 r = right->mem_info->daddr.addr; 1008 1009 return (int64_t)(r - l); 1010 } 1011 1012 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 1013 size_t size, unsigned int width) 1014 { 1015 uint64_t addr = 0; 1016 struct map *map = NULL; 1017 struct symbol *sym = NULL; 1018 1019 if (he->mem_info) { 1020 addr = he->mem_info->daddr.addr; 1021 map = he->mem_info->daddr.map; 1022 sym = he->mem_info->daddr.sym; 1023 } 1024 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 1025 width); 1026 } 1027 1028 int64_t 1029 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 1030 { 1031 uint64_t l = 0, r = 0; 1032 1033 if (left->mem_info) 1034 l = left->mem_info->iaddr.addr; 1035 if (right->mem_info) 1036 r = right->mem_info->iaddr.addr; 1037 1038 return (int64_t)(r - l); 1039 } 1040 1041 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 1042 size_t size, unsigned int width) 1043 { 1044 uint64_t addr = 0; 1045 struct map *map = NULL; 1046 struct symbol *sym = NULL; 1047 1048 if (he->mem_info) { 1049 addr = he->mem_info->iaddr.addr; 1050 map = he->mem_info->iaddr.map; 1051 sym = he->mem_info->iaddr.sym; 1052 } 1053 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 1054 width); 1055 } 1056 1057 static int64_t 1058 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1059 { 1060 struct map *map_l = NULL; 1061 struct map *map_r = NULL; 1062 1063 if (left->mem_info) 1064 map_l = left->mem_info->daddr.map; 1065 if (right->mem_info) 1066 map_r = right->mem_info->daddr.map; 1067 1068 return _sort__dso_cmp(map_l, map_r); 1069 } 1070 1071 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 1072 size_t size, unsigned int width) 1073 { 1074 struct map *map = NULL; 1075 1076 if (he->mem_info) 1077 map = he->mem_info->daddr.map; 1078 1079 return _hist_entry__dso_snprintf(map, bf, size, width); 1080 } 1081 1082 static int64_t 1083 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 1084 { 1085 union perf_mem_data_src data_src_l; 1086 union perf_mem_data_src data_src_r; 1087 1088 if (left->mem_info) 1089 data_src_l = left->mem_info->data_src; 1090 else 1091 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 1092 1093 if (right->mem_info) 1094 data_src_r = right->mem_info->data_src; 1095 else 1096 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 1097 1098 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 1099 } 1100 1101 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 1102 size_t size, unsigned int width) 1103 { 1104 char out[10]; 1105 1106 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 1107 return repsep_snprintf(bf, size, "%.*s", width, out); 1108 } 1109 1110 static int64_t 1111 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 1112 { 1113 union perf_mem_data_src data_src_l; 1114 union perf_mem_data_src data_src_r; 1115 1116 if (left->mem_info) 1117 data_src_l = left->mem_info->data_src; 1118 else 1119 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 1120 1121 if (right->mem_info) 1122 data_src_r = right->mem_info->data_src; 1123 else 1124 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 1125 1126 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 1127 } 1128 1129 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1130 size_t size, unsigned int width) 1131 { 1132 char out[64]; 1133 1134 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1135 return repsep_snprintf(bf, size, "%-*s", width, out); 1136 } 1137 1138 static int64_t 1139 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1140 { 1141 union perf_mem_data_src data_src_l; 1142 union perf_mem_data_src data_src_r; 1143 1144 if (left->mem_info) 1145 data_src_l = left->mem_info->data_src; 1146 else 1147 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1148 1149 if (right->mem_info) 1150 data_src_r = right->mem_info->data_src; 1151 else 1152 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1153 1154 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1155 } 1156 1157 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1158 size_t size, unsigned int width) 1159 { 1160 char out[64]; 1161 1162 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1163 return repsep_snprintf(bf, size, "%-*s", width, out); 1164 } 1165 1166 static int64_t 1167 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1168 { 1169 union perf_mem_data_src data_src_l; 1170 union perf_mem_data_src data_src_r; 1171 1172 if (left->mem_info) 1173 data_src_l = left->mem_info->data_src; 1174 else 1175 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1176 1177 if (right->mem_info) 1178 data_src_r = right->mem_info->data_src; 1179 else 1180 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1181 1182 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1183 } 1184 1185 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1186 size_t size, unsigned int width) 1187 { 1188 char out[64]; 1189 1190 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1191 return repsep_snprintf(bf, size, "%-*s", width, out); 1192 } 1193 1194 int64_t 1195 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1196 { 1197 u64 l, r; 1198 struct map *l_map, *r_map; 1199 1200 if (!left->mem_info) return -1; 1201 if (!right->mem_info) return 1; 1202 1203 /* group event types together */ 1204 if (left->cpumode > right->cpumode) return -1; 1205 if (left->cpumode < right->cpumode) return 1; 1206 1207 l_map = left->mem_info->daddr.map; 1208 r_map = right->mem_info->daddr.map; 1209 1210 /* if both are NULL, jump to sort on al_addr instead */ 1211 if (!l_map && !r_map) 1212 goto addr; 1213 1214 if (!l_map) return -1; 1215 if (!r_map) return 1; 1216 1217 if (l_map->maj > r_map->maj) return -1; 1218 if (l_map->maj < r_map->maj) return 1; 1219 1220 if (l_map->min > r_map->min) return -1; 1221 if (l_map->min < r_map->min) return 1; 1222 1223 if (l_map->ino > r_map->ino) return -1; 1224 if (l_map->ino < r_map->ino) return 1; 1225 1226 if (l_map->ino_generation > r_map->ino_generation) return -1; 1227 if (l_map->ino_generation < r_map->ino_generation) return 1; 1228 1229 /* 1230 * Addresses with no major/minor numbers are assumed to be 1231 * anonymous in userspace. Sort those on pid then address. 1232 * 1233 * The kernel and non-zero major/minor mapped areas are 1234 * assumed to be unity mapped. Sort those on address. 1235 */ 1236 1237 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1238 (!(l_map->flags & MAP_SHARED)) && 1239 !l_map->maj && !l_map->min && !l_map->ino && 1240 !l_map->ino_generation) { 1241 /* userspace anonymous */ 1242 1243 if (left->thread->pid_ > right->thread->pid_) return -1; 1244 if (left->thread->pid_ < right->thread->pid_) return 1; 1245 } 1246 1247 addr: 1248 /* al_addr does all the right addr - start + offset calculations */ 1249 l = cl_address(left->mem_info->daddr.al_addr); 1250 r = cl_address(right->mem_info->daddr.al_addr); 1251 1252 if (l > r) return -1; 1253 if (l < r) return 1; 1254 1255 return 0; 1256 } 1257 1258 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1259 size_t size, unsigned int width) 1260 { 1261 1262 uint64_t addr = 0; 1263 struct map *map = NULL; 1264 struct symbol *sym = NULL; 1265 char level = he->level; 1266 1267 if (he->mem_info) { 1268 addr = cl_address(he->mem_info->daddr.al_addr); 1269 map = he->mem_info->daddr.map; 1270 sym = he->mem_info->daddr.sym; 1271 1272 /* print [s] for shared data mmaps */ 1273 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1274 map && !(map->prot & PROT_EXEC) && 1275 (map->flags & MAP_SHARED) && 1276 (map->maj || map->min || map->ino || 1277 map->ino_generation)) 1278 level = 's'; 1279 else if (!map) 1280 level = 'X'; 1281 } 1282 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size, 1283 width); 1284 } 1285 1286 struct sort_entry sort_mispredict = { 1287 .se_header = "Branch Mispredicted", 1288 .se_cmp = sort__mispredict_cmp, 1289 .se_snprintf = hist_entry__mispredict_snprintf, 1290 .se_width_idx = HISTC_MISPREDICT, 1291 }; 1292 1293 static u64 he_weight(struct hist_entry *he) 1294 { 1295 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; 1296 } 1297 1298 static int64_t 1299 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1300 { 1301 return he_weight(left) - he_weight(right); 1302 } 1303 1304 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1305 size_t size, unsigned int width) 1306 { 1307 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he)); 1308 } 1309 1310 struct sort_entry sort_local_weight = { 1311 .se_header = "Local Weight", 1312 .se_cmp = sort__local_weight_cmp, 1313 .se_snprintf = hist_entry__local_weight_snprintf, 1314 .se_width_idx = HISTC_LOCAL_WEIGHT, 1315 }; 1316 1317 static int64_t 1318 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1319 { 1320 return left->stat.weight - right->stat.weight; 1321 } 1322 1323 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1324 size_t size, unsigned int width) 1325 { 1326 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight); 1327 } 1328 1329 struct sort_entry sort_global_weight = { 1330 .se_header = "Weight", 1331 .se_cmp = sort__global_weight_cmp, 1332 .se_snprintf = hist_entry__global_weight_snprintf, 1333 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1334 }; 1335 1336 struct sort_entry sort_mem_daddr_sym = { 1337 .se_header = "Data Symbol", 1338 .se_cmp = sort__daddr_cmp, 1339 .se_snprintf = hist_entry__daddr_snprintf, 1340 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1341 }; 1342 1343 struct sort_entry sort_mem_iaddr_sym = { 1344 .se_header = "Code Symbol", 1345 .se_cmp = sort__iaddr_cmp, 1346 .se_snprintf = hist_entry__iaddr_snprintf, 1347 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1348 }; 1349 1350 struct sort_entry sort_mem_daddr_dso = { 1351 .se_header = "Data Object", 1352 .se_cmp = sort__dso_daddr_cmp, 1353 .se_snprintf = hist_entry__dso_daddr_snprintf, 1354 .se_width_idx = HISTC_MEM_DADDR_DSO, 1355 }; 1356 1357 struct sort_entry sort_mem_locked = { 1358 .se_header = "Locked", 1359 .se_cmp = sort__locked_cmp, 1360 .se_snprintf = hist_entry__locked_snprintf, 1361 .se_width_idx = HISTC_MEM_LOCKED, 1362 }; 1363 1364 struct sort_entry sort_mem_tlb = { 1365 .se_header = "TLB access", 1366 .se_cmp = sort__tlb_cmp, 1367 .se_snprintf = hist_entry__tlb_snprintf, 1368 .se_width_idx = HISTC_MEM_TLB, 1369 }; 1370 1371 struct sort_entry sort_mem_lvl = { 1372 .se_header = "Memory access", 1373 .se_cmp = sort__lvl_cmp, 1374 .se_snprintf = hist_entry__lvl_snprintf, 1375 .se_width_idx = HISTC_MEM_LVL, 1376 }; 1377 1378 struct sort_entry sort_mem_snoop = { 1379 .se_header = "Snoop", 1380 .se_cmp = sort__snoop_cmp, 1381 .se_snprintf = hist_entry__snoop_snprintf, 1382 .se_width_idx = HISTC_MEM_SNOOP, 1383 }; 1384 1385 struct sort_entry sort_mem_dcacheline = { 1386 .se_header = "Data Cacheline", 1387 .se_cmp = sort__dcacheline_cmp, 1388 .se_snprintf = hist_entry__dcacheline_snprintf, 1389 .se_width_idx = HISTC_MEM_DCACHELINE, 1390 }; 1391 1392 static int64_t 1393 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1394 { 1395 uint64_t l = 0, r = 0; 1396 1397 if (left->mem_info) 1398 l = left->mem_info->daddr.phys_addr; 1399 if (right->mem_info) 1400 r = right->mem_info->daddr.phys_addr; 1401 1402 return (int64_t)(r - l); 1403 } 1404 1405 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf, 1406 size_t size, unsigned int width) 1407 { 1408 uint64_t addr = 0; 1409 size_t ret = 0; 1410 size_t len = BITS_PER_LONG / 4; 1411 1412 addr = he->mem_info->daddr.phys_addr; 1413 1414 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level); 1415 1416 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr); 1417 1418 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, ""); 1419 1420 if (ret > width) 1421 bf[width] = '\0'; 1422 1423 return width; 1424 } 1425 1426 struct sort_entry sort_mem_phys_daddr = { 1427 .se_header = "Data Physical Address", 1428 .se_cmp = sort__phys_daddr_cmp, 1429 .se_snprintf = hist_entry__phys_daddr_snprintf, 1430 .se_width_idx = HISTC_MEM_PHYS_DADDR, 1431 }; 1432 1433 static int64_t 1434 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1435 { 1436 if (!left->branch_info || !right->branch_info) 1437 return cmp_null(left->branch_info, right->branch_info); 1438 1439 return left->branch_info->flags.abort != 1440 right->branch_info->flags.abort; 1441 } 1442 1443 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1444 size_t size, unsigned int width) 1445 { 1446 static const char *out = "N/A"; 1447 1448 if (he->branch_info) { 1449 if (he->branch_info->flags.abort) 1450 out = "A"; 1451 else 1452 out = "."; 1453 } 1454 1455 return repsep_snprintf(bf, size, "%-*s", width, out); 1456 } 1457 1458 struct sort_entry sort_abort = { 1459 .se_header = "Transaction abort", 1460 .se_cmp = sort__abort_cmp, 1461 .se_snprintf = hist_entry__abort_snprintf, 1462 .se_width_idx = HISTC_ABORT, 1463 }; 1464 1465 static int64_t 1466 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1467 { 1468 if (!left->branch_info || !right->branch_info) 1469 return cmp_null(left->branch_info, right->branch_info); 1470 1471 return left->branch_info->flags.in_tx != 1472 right->branch_info->flags.in_tx; 1473 } 1474 1475 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1476 size_t size, unsigned int width) 1477 { 1478 static const char *out = "N/A"; 1479 1480 if (he->branch_info) { 1481 if (he->branch_info->flags.in_tx) 1482 out = "T"; 1483 else 1484 out = "."; 1485 } 1486 1487 return repsep_snprintf(bf, size, "%-*s", width, out); 1488 } 1489 1490 struct sort_entry sort_in_tx = { 1491 .se_header = "Branch in transaction", 1492 .se_cmp = sort__in_tx_cmp, 1493 .se_snprintf = hist_entry__in_tx_snprintf, 1494 .se_width_idx = HISTC_IN_TX, 1495 }; 1496 1497 static int64_t 1498 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1499 { 1500 return left->transaction - right->transaction; 1501 } 1502 1503 static inline char *add_str(char *p, const char *str) 1504 { 1505 strcpy(p, str); 1506 return p + strlen(str); 1507 } 1508 1509 static struct txbit { 1510 unsigned flag; 1511 const char *name; 1512 int skip_for_len; 1513 } txbits[] = { 1514 { PERF_TXN_ELISION, "EL ", 0 }, 1515 { PERF_TXN_TRANSACTION, "TX ", 1 }, 1516 { PERF_TXN_SYNC, "SYNC ", 1 }, 1517 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 1518 { PERF_TXN_RETRY, "RETRY ", 0 }, 1519 { PERF_TXN_CONFLICT, "CON ", 0 }, 1520 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 1521 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 1522 { 0, NULL, 0 } 1523 }; 1524 1525 int hist_entry__transaction_len(void) 1526 { 1527 int i; 1528 int len = 0; 1529 1530 for (i = 0; txbits[i].name; i++) { 1531 if (!txbits[i].skip_for_len) 1532 len += strlen(txbits[i].name); 1533 } 1534 len += 4; /* :XX<space> */ 1535 return len; 1536 } 1537 1538 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 1539 size_t size, unsigned int width) 1540 { 1541 u64 t = he->transaction; 1542 char buf[128]; 1543 char *p = buf; 1544 int i; 1545 1546 buf[0] = 0; 1547 for (i = 0; txbits[i].name; i++) 1548 if (txbits[i].flag & t) 1549 p = add_str(p, txbits[i].name); 1550 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 1551 p = add_str(p, "NEITHER "); 1552 if (t & PERF_TXN_ABORT_MASK) { 1553 sprintf(p, ":%" PRIx64, 1554 (t & PERF_TXN_ABORT_MASK) >> 1555 PERF_TXN_ABORT_SHIFT); 1556 p += strlen(p); 1557 } 1558 1559 return repsep_snprintf(bf, size, "%-*s", width, buf); 1560 } 1561 1562 struct sort_entry sort_transaction = { 1563 .se_header = "Transaction ", 1564 .se_cmp = sort__transaction_cmp, 1565 .se_snprintf = hist_entry__transaction_snprintf, 1566 .se_width_idx = HISTC_TRANSACTION, 1567 }; 1568 1569 /* --sort symbol_size */ 1570 1571 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r) 1572 { 1573 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0; 1574 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0; 1575 1576 return size_l < size_r ? -1 : 1577 size_l == size_r ? 0 : 1; 1578 } 1579 1580 static int64_t 1581 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right) 1582 { 1583 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym); 1584 } 1585 1586 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf, 1587 size_t bf_size, unsigned int width) 1588 { 1589 if (sym) 1590 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym)); 1591 1592 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1593 } 1594 1595 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf, 1596 size_t size, unsigned int width) 1597 { 1598 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width); 1599 } 1600 1601 struct sort_entry sort_sym_size = { 1602 .se_header = "Symbol size", 1603 .se_cmp = sort__sym_size_cmp, 1604 .se_snprintf = hist_entry__sym_size_snprintf, 1605 .se_width_idx = HISTC_SYM_SIZE, 1606 }; 1607 1608 /* --sort dso_size */ 1609 1610 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r) 1611 { 1612 int64_t size_l = map_l != NULL ? map__size(map_l) : 0; 1613 int64_t size_r = map_r != NULL ? map__size(map_r) : 0; 1614 1615 return size_l < size_r ? -1 : 1616 size_l == size_r ? 0 : 1; 1617 } 1618 1619 static int64_t 1620 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right) 1621 { 1622 return _sort__dso_size_cmp(right->ms.map, left->ms.map); 1623 } 1624 1625 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf, 1626 size_t bf_size, unsigned int width) 1627 { 1628 if (map && map->dso) 1629 return repsep_snprintf(bf, bf_size, "%*d", width, 1630 map__size(map)); 1631 1632 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1633 } 1634 1635 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf, 1636 size_t size, unsigned int width) 1637 { 1638 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width); 1639 } 1640 1641 struct sort_entry sort_dso_size = { 1642 .se_header = "DSO size", 1643 .se_cmp = sort__dso_size_cmp, 1644 .se_snprintf = hist_entry__dso_size_snprintf, 1645 .se_width_idx = HISTC_DSO_SIZE, 1646 }; 1647 1648 1649 struct sort_dimension { 1650 const char *name; 1651 struct sort_entry *entry; 1652 int taken; 1653 }; 1654 1655 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 1656 1657 static struct sort_dimension common_sort_dimensions[] = { 1658 DIM(SORT_PID, "pid", sort_thread), 1659 DIM(SORT_COMM, "comm", sort_comm), 1660 DIM(SORT_DSO, "dso", sort_dso), 1661 DIM(SORT_SYM, "symbol", sort_sym), 1662 DIM(SORT_PARENT, "parent", sort_parent), 1663 DIM(SORT_CPU, "cpu", sort_cpu), 1664 DIM(SORT_SOCKET, "socket", sort_socket), 1665 DIM(SORT_SRCLINE, "srcline", sort_srcline), 1666 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 1667 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 1668 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 1669 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 1670 DIM(SORT_TRACE, "trace", sort_trace), 1671 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 1672 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size), 1673 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 1674 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null), 1675 DIM(SORT_TIME, "time", sort_time), 1676 }; 1677 1678 #undef DIM 1679 1680 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 1681 1682 static struct sort_dimension bstack_sort_dimensions[] = { 1683 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 1684 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 1685 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 1686 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 1687 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 1688 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 1689 DIM(SORT_ABORT, "abort", sort_abort), 1690 DIM(SORT_CYCLES, "cycles", sort_cycles), 1691 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 1692 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 1693 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc), 1694 }; 1695 1696 #undef DIM 1697 1698 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 1699 1700 static struct sort_dimension memory_sort_dimensions[] = { 1701 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 1702 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 1703 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 1704 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 1705 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 1706 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 1707 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 1708 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 1709 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr), 1710 }; 1711 1712 #undef DIM 1713 1714 struct hpp_dimension { 1715 const char *name; 1716 struct perf_hpp_fmt *fmt; 1717 int taken; 1718 }; 1719 1720 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 1721 1722 static struct hpp_dimension hpp_sort_dimensions[] = { 1723 DIM(PERF_HPP__OVERHEAD, "overhead"), 1724 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 1725 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 1726 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 1727 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 1728 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 1729 DIM(PERF_HPP__SAMPLES, "sample"), 1730 DIM(PERF_HPP__PERIOD, "period"), 1731 }; 1732 1733 #undef DIM 1734 1735 struct hpp_sort_entry { 1736 struct perf_hpp_fmt hpp; 1737 struct sort_entry *se; 1738 }; 1739 1740 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 1741 { 1742 struct hpp_sort_entry *hse; 1743 1744 if (!perf_hpp__is_sort_entry(fmt)) 1745 return; 1746 1747 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1748 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 1749 } 1750 1751 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1752 struct hists *hists, int line __maybe_unused, 1753 int *span __maybe_unused) 1754 { 1755 struct hpp_sort_entry *hse; 1756 size_t len = fmt->user_len; 1757 1758 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1759 1760 if (!len) 1761 len = hists__col_len(hists, hse->se->se_width_idx); 1762 1763 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 1764 } 1765 1766 static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 1767 struct perf_hpp *hpp __maybe_unused, 1768 struct hists *hists) 1769 { 1770 struct hpp_sort_entry *hse; 1771 size_t len = fmt->user_len; 1772 1773 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1774 1775 if (!len) 1776 len = hists__col_len(hists, hse->se->se_width_idx); 1777 1778 return len; 1779 } 1780 1781 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1782 struct hist_entry *he) 1783 { 1784 struct hpp_sort_entry *hse; 1785 size_t len = fmt->user_len; 1786 1787 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1788 1789 if (!len) 1790 len = hists__col_len(he->hists, hse->se->se_width_idx); 1791 1792 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 1793 } 1794 1795 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 1796 struct hist_entry *a, struct hist_entry *b) 1797 { 1798 struct hpp_sort_entry *hse; 1799 1800 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1801 return hse->se->se_cmp(a, b); 1802 } 1803 1804 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 1805 struct hist_entry *a, struct hist_entry *b) 1806 { 1807 struct hpp_sort_entry *hse; 1808 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 1809 1810 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1811 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 1812 return collapse_fn(a, b); 1813 } 1814 1815 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 1816 struct hist_entry *a, struct hist_entry *b) 1817 { 1818 struct hpp_sort_entry *hse; 1819 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 1820 1821 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1822 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 1823 return sort_fn(a, b); 1824 } 1825 1826 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 1827 { 1828 return format->header == __sort__hpp_header; 1829 } 1830 1831 #define MK_SORT_ENTRY_CHK(key) \ 1832 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 1833 { \ 1834 struct hpp_sort_entry *hse; \ 1835 \ 1836 if (!perf_hpp__is_sort_entry(fmt)) \ 1837 return false; \ 1838 \ 1839 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 1840 return hse->se == &sort_ ## key ; \ 1841 } 1842 1843 MK_SORT_ENTRY_CHK(trace) 1844 MK_SORT_ENTRY_CHK(srcline) 1845 MK_SORT_ENTRY_CHK(srcfile) 1846 MK_SORT_ENTRY_CHK(thread) 1847 MK_SORT_ENTRY_CHK(comm) 1848 MK_SORT_ENTRY_CHK(dso) 1849 MK_SORT_ENTRY_CHK(sym) 1850 1851 1852 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 1853 { 1854 struct hpp_sort_entry *hse_a; 1855 struct hpp_sort_entry *hse_b; 1856 1857 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 1858 return false; 1859 1860 hse_a = container_of(a, struct hpp_sort_entry, hpp); 1861 hse_b = container_of(b, struct hpp_sort_entry, hpp); 1862 1863 return hse_a->se == hse_b->se; 1864 } 1865 1866 static void hse_free(struct perf_hpp_fmt *fmt) 1867 { 1868 struct hpp_sort_entry *hse; 1869 1870 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1871 free(hse); 1872 } 1873 1874 static struct hpp_sort_entry * 1875 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 1876 { 1877 struct hpp_sort_entry *hse; 1878 1879 hse = malloc(sizeof(*hse)); 1880 if (hse == NULL) { 1881 pr_err("Memory allocation failed\n"); 1882 return NULL; 1883 } 1884 1885 hse->se = sd->entry; 1886 hse->hpp.name = sd->entry->se_header; 1887 hse->hpp.header = __sort__hpp_header; 1888 hse->hpp.width = __sort__hpp_width; 1889 hse->hpp.entry = __sort__hpp_entry; 1890 hse->hpp.color = NULL; 1891 1892 hse->hpp.cmp = __sort__hpp_cmp; 1893 hse->hpp.collapse = __sort__hpp_collapse; 1894 hse->hpp.sort = __sort__hpp_sort; 1895 hse->hpp.equal = __sort__hpp_equal; 1896 hse->hpp.free = hse_free; 1897 1898 INIT_LIST_HEAD(&hse->hpp.list); 1899 INIT_LIST_HEAD(&hse->hpp.sort_list); 1900 hse->hpp.elide = false; 1901 hse->hpp.len = 0; 1902 hse->hpp.user_len = 0; 1903 hse->hpp.level = level; 1904 1905 return hse; 1906 } 1907 1908 static void hpp_free(struct perf_hpp_fmt *fmt) 1909 { 1910 free(fmt); 1911 } 1912 1913 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 1914 int level) 1915 { 1916 struct perf_hpp_fmt *fmt; 1917 1918 fmt = memdup(hd->fmt, sizeof(*fmt)); 1919 if (fmt) { 1920 INIT_LIST_HEAD(&fmt->list); 1921 INIT_LIST_HEAD(&fmt->sort_list); 1922 fmt->free = hpp_free; 1923 fmt->level = level; 1924 } 1925 1926 return fmt; 1927 } 1928 1929 int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 1930 { 1931 struct perf_hpp_fmt *fmt; 1932 struct hpp_sort_entry *hse; 1933 int ret = -1; 1934 int r; 1935 1936 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1937 if (!perf_hpp__is_sort_entry(fmt)) 1938 continue; 1939 1940 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1941 if (hse->se->se_filter == NULL) 1942 continue; 1943 1944 /* 1945 * hist entry is filtered if any of sort key in the hpp list 1946 * is applied. But it should skip non-matched filter types. 1947 */ 1948 r = hse->se->se_filter(he, type, arg); 1949 if (r >= 0) { 1950 if (ret < 0) 1951 ret = 0; 1952 ret |= r; 1953 } 1954 } 1955 1956 return ret; 1957 } 1958 1959 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 1960 struct perf_hpp_list *list, 1961 int level) 1962 { 1963 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 1964 1965 if (hse == NULL) 1966 return -1; 1967 1968 perf_hpp_list__register_sort_field(list, &hse->hpp); 1969 return 0; 1970 } 1971 1972 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 1973 struct perf_hpp_list *list) 1974 { 1975 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 1976 1977 if (hse == NULL) 1978 return -1; 1979 1980 perf_hpp_list__column_register(list, &hse->hpp); 1981 return 0; 1982 } 1983 1984 struct hpp_dynamic_entry { 1985 struct perf_hpp_fmt hpp; 1986 struct evsel *evsel; 1987 struct tep_format_field *field; 1988 unsigned dynamic_len; 1989 bool raw_trace; 1990 }; 1991 1992 static int hde_width(struct hpp_dynamic_entry *hde) 1993 { 1994 if (!hde->hpp.len) { 1995 int len = hde->dynamic_len; 1996 int namelen = strlen(hde->field->name); 1997 int fieldlen = hde->field->size; 1998 1999 if (namelen > len) 2000 len = namelen; 2001 2002 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) { 2003 /* length for print hex numbers */ 2004 fieldlen = hde->field->size * 2 + 2; 2005 } 2006 if (fieldlen > len) 2007 len = fieldlen; 2008 2009 hde->hpp.len = len; 2010 } 2011 return hde->hpp.len; 2012 } 2013 2014 static void update_dynamic_len(struct hpp_dynamic_entry *hde, 2015 struct hist_entry *he) 2016 { 2017 char *str, *pos; 2018 struct tep_format_field *field = hde->field; 2019 size_t namelen; 2020 bool last = false; 2021 2022 if (hde->raw_trace) 2023 return; 2024 2025 /* parse pretty print result and update max length */ 2026 if (!he->trace_output) 2027 he->trace_output = get_trace_output(he); 2028 2029 namelen = strlen(field->name); 2030 str = he->trace_output; 2031 2032 while (str) { 2033 pos = strchr(str, ' '); 2034 if (pos == NULL) { 2035 last = true; 2036 pos = str + strlen(str); 2037 } 2038 2039 if (!strncmp(str, field->name, namelen)) { 2040 size_t len; 2041 2042 str += namelen + 1; 2043 len = pos - str; 2044 2045 if (len > hde->dynamic_len) 2046 hde->dynamic_len = len; 2047 break; 2048 } 2049 2050 if (last) 2051 str = NULL; 2052 else 2053 str = pos + 1; 2054 } 2055 } 2056 2057 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2058 struct hists *hists __maybe_unused, 2059 int line __maybe_unused, 2060 int *span __maybe_unused) 2061 { 2062 struct hpp_dynamic_entry *hde; 2063 size_t len = fmt->user_len; 2064 2065 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2066 2067 if (!len) 2068 len = hde_width(hde); 2069 2070 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 2071 } 2072 2073 static int __sort__hde_width(struct perf_hpp_fmt *fmt, 2074 struct perf_hpp *hpp __maybe_unused, 2075 struct hists *hists __maybe_unused) 2076 { 2077 struct hpp_dynamic_entry *hde; 2078 size_t len = fmt->user_len; 2079 2080 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2081 2082 if (!len) 2083 len = hde_width(hde); 2084 2085 return len; 2086 } 2087 2088 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 2089 { 2090 struct hpp_dynamic_entry *hde; 2091 2092 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2093 2094 return hists_to_evsel(hists) == hde->evsel; 2095 } 2096 2097 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2098 struct hist_entry *he) 2099 { 2100 struct hpp_dynamic_entry *hde; 2101 size_t len = fmt->user_len; 2102 char *str, *pos; 2103 struct tep_format_field *field; 2104 size_t namelen; 2105 bool last = false; 2106 int ret; 2107 2108 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2109 2110 if (!len) 2111 len = hde_width(hde); 2112 2113 if (hde->raw_trace) 2114 goto raw_field; 2115 2116 if (!he->trace_output) 2117 he->trace_output = get_trace_output(he); 2118 2119 field = hde->field; 2120 namelen = strlen(field->name); 2121 str = he->trace_output; 2122 2123 while (str) { 2124 pos = strchr(str, ' '); 2125 if (pos == NULL) { 2126 last = true; 2127 pos = str + strlen(str); 2128 } 2129 2130 if (!strncmp(str, field->name, namelen)) { 2131 str += namelen + 1; 2132 str = strndup(str, pos - str); 2133 2134 if (str == NULL) 2135 return scnprintf(hpp->buf, hpp->size, 2136 "%*.*s", len, len, "ERROR"); 2137 break; 2138 } 2139 2140 if (last) 2141 str = NULL; 2142 else 2143 str = pos + 1; 2144 } 2145 2146 if (str == NULL) { 2147 struct trace_seq seq; 2148 raw_field: 2149 trace_seq_init(&seq); 2150 tep_print_field(&seq, he->raw_data, hde->field); 2151 str = seq.buffer; 2152 } 2153 2154 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 2155 free(str); 2156 return ret; 2157 } 2158 2159 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 2160 struct hist_entry *a, struct hist_entry *b) 2161 { 2162 struct hpp_dynamic_entry *hde; 2163 struct tep_format_field *field; 2164 unsigned offset, size; 2165 2166 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2167 2168 if (b == NULL) { 2169 update_dynamic_len(hde, a); 2170 return 0; 2171 } 2172 2173 field = hde->field; 2174 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2175 unsigned long long dyn; 2176 2177 tep_read_number_field(field, a->raw_data, &dyn); 2178 offset = dyn & 0xffff; 2179 size = (dyn >> 16) & 0xffff; 2180 2181 /* record max width for output */ 2182 if (size > hde->dynamic_len) 2183 hde->dynamic_len = size; 2184 } else { 2185 offset = field->offset; 2186 size = field->size; 2187 } 2188 2189 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 2190 } 2191 2192 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 2193 { 2194 return fmt->cmp == __sort__hde_cmp; 2195 } 2196 2197 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2198 { 2199 struct hpp_dynamic_entry *hde_a; 2200 struct hpp_dynamic_entry *hde_b; 2201 2202 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 2203 return false; 2204 2205 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 2206 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 2207 2208 return hde_a->field == hde_b->field; 2209 } 2210 2211 static void hde_free(struct perf_hpp_fmt *fmt) 2212 { 2213 struct hpp_dynamic_entry *hde; 2214 2215 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2216 free(hde); 2217 } 2218 2219 static struct hpp_dynamic_entry * 2220 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field, 2221 int level) 2222 { 2223 struct hpp_dynamic_entry *hde; 2224 2225 hde = malloc(sizeof(*hde)); 2226 if (hde == NULL) { 2227 pr_debug("Memory allocation failed\n"); 2228 return NULL; 2229 } 2230 2231 hde->evsel = evsel; 2232 hde->field = field; 2233 hde->dynamic_len = 0; 2234 2235 hde->hpp.name = field->name; 2236 hde->hpp.header = __sort__hde_header; 2237 hde->hpp.width = __sort__hde_width; 2238 hde->hpp.entry = __sort__hde_entry; 2239 hde->hpp.color = NULL; 2240 2241 hde->hpp.cmp = __sort__hde_cmp; 2242 hde->hpp.collapse = __sort__hde_cmp; 2243 hde->hpp.sort = __sort__hde_cmp; 2244 hde->hpp.equal = __sort__hde_equal; 2245 hde->hpp.free = hde_free; 2246 2247 INIT_LIST_HEAD(&hde->hpp.list); 2248 INIT_LIST_HEAD(&hde->hpp.sort_list); 2249 hde->hpp.elide = false; 2250 hde->hpp.len = 0; 2251 hde->hpp.user_len = 0; 2252 hde->hpp.level = level; 2253 2254 return hde; 2255 } 2256 2257 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 2258 { 2259 struct perf_hpp_fmt *new_fmt = NULL; 2260 2261 if (perf_hpp__is_sort_entry(fmt)) { 2262 struct hpp_sort_entry *hse, *new_hse; 2263 2264 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2265 new_hse = memdup(hse, sizeof(*hse)); 2266 if (new_hse) 2267 new_fmt = &new_hse->hpp; 2268 } else if (perf_hpp__is_dynamic_entry(fmt)) { 2269 struct hpp_dynamic_entry *hde, *new_hde; 2270 2271 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2272 new_hde = memdup(hde, sizeof(*hde)); 2273 if (new_hde) 2274 new_fmt = &new_hde->hpp; 2275 } else { 2276 new_fmt = memdup(fmt, sizeof(*fmt)); 2277 } 2278 2279 INIT_LIST_HEAD(&new_fmt->list); 2280 INIT_LIST_HEAD(&new_fmt->sort_list); 2281 2282 return new_fmt; 2283 } 2284 2285 static int parse_field_name(char *str, char **event, char **field, char **opt) 2286 { 2287 char *event_name, *field_name, *opt_name; 2288 2289 event_name = str; 2290 field_name = strchr(str, '.'); 2291 2292 if (field_name) { 2293 *field_name++ = '\0'; 2294 } else { 2295 event_name = NULL; 2296 field_name = str; 2297 } 2298 2299 opt_name = strchr(field_name, '/'); 2300 if (opt_name) 2301 *opt_name++ = '\0'; 2302 2303 *event = event_name; 2304 *field = field_name; 2305 *opt = opt_name; 2306 2307 return 0; 2308 } 2309 2310 /* find match evsel using a given event name. The event name can be: 2311 * 1. '%' + event index (e.g. '%1' for first event) 2312 * 2. full event name (e.g. sched:sched_switch) 2313 * 3. partial event name (should not contain ':') 2314 */ 2315 static struct evsel *find_evsel(struct evlist *evlist, char *event_name) 2316 { 2317 struct evsel *evsel = NULL; 2318 struct evsel *pos; 2319 bool full_name; 2320 2321 /* case 1 */ 2322 if (event_name[0] == '%') { 2323 int nr = strtol(event_name+1, NULL, 0); 2324 2325 if (nr > evlist->core.nr_entries) 2326 return NULL; 2327 2328 evsel = perf_evlist__first(evlist); 2329 while (--nr > 0) 2330 evsel = perf_evsel__next(evsel); 2331 2332 return evsel; 2333 } 2334 2335 full_name = !!strchr(event_name, ':'); 2336 evlist__for_each_entry(evlist, pos) { 2337 /* case 2 */ 2338 if (full_name && !strcmp(pos->name, event_name)) 2339 return pos; 2340 /* case 3 */ 2341 if (!full_name && strstr(pos->name, event_name)) { 2342 if (evsel) { 2343 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 2344 event_name, evsel->name, pos->name); 2345 return NULL; 2346 } 2347 evsel = pos; 2348 } 2349 } 2350 2351 return evsel; 2352 } 2353 2354 static int __dynamic_dimension__add(struct evsel *evsel, 2355 struct tep_format_field *field, 2356 bool raw_trace, int level) 2357 { 2358 struct hpp_dynamic_entry *hde; 2359 2360 hde = __alloc_dynamic_entry(evsel, field, level); 2361 if (hde == NULL) 2362 return -ENOMEM; 2363 2364 hde->raw_trace = raw_trace; 2365 2366 perf_hpp__register_sort_field(&hde->hpp); 2367 return 0; 2368 } 2369 2370 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level) 2371 { 2372 int ret; 2373 struct tep_format_field *field; 2374 2375 field = evsel->tp_format->format.fields; 2376 while (field) { 2377 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2378 if (ret < 0) 2379 return ret; 2380 2381 field = field->next; 2382 } 2383 return 0; 2384 } 2385 2386 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace, 2387 int level) 2388 { 2389 int ret; 2390 struct evsel *evsel; 2391 2392 evlist__for_each_entry(evlist, evsel) { 2393 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 2394 continue; 2395 2396 ret = add_evsel_fields(evsel, raw_trace, level); 2397 if (ret < 0) 2398 return ret; 2399 } 2400 return 0; 2401 } 2402 2403 static int add_all_matching_fields(struct evlist *evlist, 2404 char *field_name, bool raw_trace, int level) 2405 { 2406 int ret = -ESRCH; 2407 struct evsel *evsel; 2408 struct tep_format_field *field; 2409 2410 evlist__for_each_entry(evlist, evsel) { 2411 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 2412 continue; 2413 2414 field = tep_find_any_field(evsel->tp_format, field_name); 2415 if (field == NULL) 2416 continue; 2417 2418 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2419 if (ret < 0) 2420 break; 2421 } 2422 return ret; 2423 } 2424 2425 static int add_dynamic_entry(struct evlist *evlist, const char *tok, 2426 int level) 2427 { 2428 char *str, *event_name, *field_name, *opt_name; 2429 struct evsel *evsel; 2430 struct tep_format_field *field; 2431 bool raw_trace = symbol_conf.raw_trace; 2432 int ret = 0; 2433 2434 if (evlist == NULL) 2435 return -ENOENT; 2436 2437 str = strdup(tok); 2438 if (str == NULL) 2439 return -ENOMEM; 2440 2441 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 2442 ret = -EINVAL; 2443 goto out; 2444 } 2445 2446 if (opt_name) { 2447 if (strcmp(opt_name, "raw")) { 2448 pr_debug("unsupported field option %s\n", opt_name); 2449 ret = -EINVAL; 2450 goto out; 2451 } 2452 raw_trace = true; 2453 } 2454 2455 if (!strcmp(field_name, "trace_fields")) { 2456 ret = add_all_dynamic_fields(evlist, raw_trace, level); 2457 goto out; 2458 } 2459 2460 if (event_name == NULL) { 2461 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 2462 goto out; 2463 } 2464 2465 evsel = find_evsel(evlist, event_name); 2466 if (evsel == NULL) { 2467 pr_debug("Cannot find event: %s\n", event_name); 2468 ret = -ENOENT; 2469 goto out; 2470 } 2471 2472 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2473 pr_debug("%s is not a tracepoint event\n", event_name); 2474 ret = -EINVAL; 2475 goto out; 2476 } 2477 2478 if (!strcmp(field_name, "*")) { 2479 ret = add_evsel_fields(evsel, raw_trace, level); 2480 } else { 2481 field = tep_find_any_field(evsel->tp_format, field_name); 2482 if (field == NULL) { 2483 pr_debug("Cannot find event field for %s.%s\n", 2484 event_name, field_name); 2485 return -ENOENT; 2486 } 2487 2488 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2489 } 2490 2491 out: 2492 free(str); 2493 return ret; 2494 } 2495 2496 static int __sort_dimension__add(struct sort_dimension *sd, 2497 struct perf_hpp_list *list, 2498 int level) 2499 { 2500 if (sd->taken) 2501 return 0; 2502 2503 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 2504 return -1; 2505 2506 if (sd->entry->se_collapse) 2507 list->need_collapse = 1; 2508 2509 sd->taken = 1; 2510 2511 return 0; 2512 } 2513 2514 static int __hpp_dimension__add(struct hpp_dimension *hd, 2515 struct perf_hpp_list *list, 2516 int level) 2517 { 2518 struct perf_hpp_fmt *fmt; 2519 2520 if (hd->taken) 2521 return 0; 2522 2523 fmt = __hpp_dimension__alloc_hpp(hd, level); 2524 if (!fmt) 2525 return -1; 2526 2527 hd->taken = 1; 2528 perf_hpp_list__register_sort_field(list, fmt); 2529 return 0; 2530 } 2531 2532 static int __sort_dimension__add_output(struct perf_hpp_list *list, 2533 struct sort_dimension *sd) 2534 { 2535 if (sd->taken) 2536 return 0; 2537 2538 if (__sort_dimension__add_hpp_output(sd, list) < 0) 2539 return -1; 2540 2541 sd->taken = 1; 2542 return 0; 2543 } 2544 2545 static int __hpp_dimension__add_output(struct perf_hpp_list *list, 2546 struct hpp_dimension *hd) 2547 { 2548 struct perf_hpp_fmt *fmt; 2549 2550 if (hd->taken) 2551 return 0; 2552 2553 fmt = __hpp_dimension__alloc_hpp(hd, 0); 2554 if (!fmt) 2555 return -1; 2556 2557 hd->taken = 1; 2558 perf_hpp_list__column_register(list, fmt); 2559 return 0; 2560 } 2561 2562 int hpp_dimension__add_output(unsigned col) 2563 { 2564 BUG_ON(col >= PERF_HPP__MAX_INDEX); 2565 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 2566 } 2567 2568 int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 2569 struct evlist *evlist, 2570 int level) 2571 { 2572 unsigned int i; 2573 2574 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2575 struct sort_dimension *sd = &common_sort_dimensions[i]; 2576 2577 if (strncasecmp(tok, sd->name, strlen(tok))) 2578 continue; 2579 2580 if (sd->entry == &sort_parent) { 2581 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 2582 if (ret) { 2583 char err[BUFSIZ]; 2584 2585 regerror(ret, &parent_regex, err, sizeof(err)); 2586 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 2587 return -EINVAL; 2588 } 2589 list->parent = 1; 2590 } else if (sd->entry == &sort_sym) { 2591 list->sym = 1; 2592 /* 2593 * perf diff displays the performance difference amongst 2594 * two or more perf.data files. Those files could come 2595 * from different binaries. So we should not compare 2596 * their ips, but the name of symbol. 2597 */ 2598 if (sort__mode == SORT_MODE__DIFF) 2599 sd->entry->se_collapse = sort__sym_sort; 2600 2601 } else if (sd->entry == &sort_dso) { 2602 list->dso = 1; 2603 } else if (sd->entry == &sort_socket) { 2604 list->socket = 1; 2605 } else if (sd->entry == &sort_thread) { 2606 list->thread = 1; 2607 } else if (sd->entry == &sort_comm) { 2608 list->comm = 1; 2609 } 2610 2611 return __sort_dimension__add(sd, list, level); 2612 } 2613 2614 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2615 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2616 2617 if (strncasecmp(tok, hd->name, strlen(tok))) 2618 continue; 2619 2620 return __hpp_dimension__add(hd, list, level); 2621 } 2622 2623 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2624 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2625 2626 if (strncasecmp(tok, sd->name, strlen(tok))) 2627 continue; 2628 2629 if (sort__mode != SORT_MODE__BRANCH) 2630 return -EINVAL; 2631 2632 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 2633 list->sym = 1; 2634 2635 __sort_dimension__add(sd, list, level); 2636 return 0; 2637 } 2638 2639 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2640 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2641 2642 if (strncasecmp(tok, sd->name, strlen(tok))) 2643 continue; 2644 2645 if (sort__mode != SORT_MODE__MEMORY) 2646 return -EINVAL; 2647 2648 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0) 2649 return -EINVAL; 2650 2651 if (sd->entry == &sort_mem_daddr_sym) 2652 list->sym = 1; 2653 2654 __sort_dimension__add(sd, list, level); 2655 return 0; 2656 } 2657 2658 if (!add_dynamic_entry(evlist, tok, level)) 2659 return 0; 2660 2661 return -ESRCH; 2662 } 2663 2664 static int setup_sort_list(struct perf_hpp_list *list, char *str, 2665 struct evlist *evlist) 2666 { 2667 char *tmp, *tok; 2668 int ret = 0; 2669 int level = 0; 2670 int next_level = 1; 2671 bool in_group = false; 2672 2673 do { 2674 tok = str; 2675 tmp = strpbrk(str, "{}, "); 2676 if (tmp) { 2677 if (in_group) 2678 next_level = level; 2679 else 2680 next_level = level + 1; 2681 2682 if (*tmp == '{') 2683 in_group = true; 2684 else if (*tmp == '}') 2685 in_group = false; 2686 2687 *tmp = '\0'; 2688 str = tmp + 1; 2689 } 2690 2691 if (*tok) { 2692 ret = sort_dimension__add(list, tok, evlist, level); 2693 if (ret == -EINVAL) { 2694 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok))) 2695 pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 2696 else 2697 pr_err("Invalid --sort key: `%s'", tok); 2698 break; 2699 } else if (ret == -ESRCH) { 2700 pr_err("Unknown --sort key: `%s'", tok); 2701 break; 2702 } 2703 } 2704 2705 level = next_level; 2706 } while (tmp); 2707 2708 return ret; 2709 } 2710 2711 static const char *get_default_sort_order(struct evlist *evlist) 2712 { 2713 const char *default_sort_orders[] = { 2714 default_sort_order, 2715 default_branch_sort_order, 2716 default_mem_sort_order, 2717 default_top_sort_order, 2718 default_diff_sort_order, 2719 default_tracepoint_sort_order, 2720 }; 2721 bool use_trace = true; 2722 struct evsel *evsel; 2723 2724 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 2725 2726 if (evlist == NULL || perf_evlist__empty(evlist)) 2727 goto out_no_evlist; 2728 2729 evlist__for_each_entry(evlist, evsel) { 2730 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2731 use_trace = false; 2732 break; 2733 } 2734 } 2735 2736 if (use_trace) { 2737 sort__mode = SORT_MODE__TRACEPOINT; 2738 if (symbol_conf.raw_trace) 2739 return "trace_fields"; 2740 } 2741 out_no_evlist: 2742 return default_sort_orders[sort__mode]; 2743 } 2744 2745 static int setup_sort_order(struct evlist *evlist) 2746 { 2747 char *new_sort_order; 2748 2749 /* 2750 * Append '+'-prefixed sort order to the default sort 2751 * order string. 2752 */ 2753 if (!sort_order || is_strict_order(sort_order)) 2754 return 0; 2755 2756 if (sort_order[1] == '\0') { 2757 pr_err("Invalid --sort key: `+'"); 2758 return -EINVAL; 2759 } 2760 2761 /* 2762 * We allocate new sort_order string, but we never free it, 2763 * because it's checked over the rest of the code. 2764 */ 2765 if (asprintf(&new_sort_order, "%s,%s", 2766 get_default_sort_order(evlist), sort_order + 1) < 0) { 2767 pr_err("Not enough memory to set up --sort"); 2768 return -ENOMEM; 2769 } 2770 2771 sort_order = new_sort_order; 2772 return 0; 2773 } 2774 2775 /* 2776 * Adds 'pre,' prefix into 'str' is 'pre' is 2777 * not already part of 'str'. 2778 */ 2779 static char *prefix_if_not_in(const char *pre, char *str) 2780 { 2781 char *n; 2782 2783 if (!str || strstr(str, pre)) 2784 return str; 2785 2786 if (asprintf(&n, "%s,%s", pre, str) < 0) 2787 return NULL; 2788 2789 free(str); 2790 return n; 2791 } 2792 2793 static char *setup_overhead(char *keys) 2794 { 2795 if (sort__mode == SORT_MODE__DIFF) 2796 return keys; 2797 2798 keys = prefix_if_not_in("overhead", keys); 2799 2800 if (symbol_conf.cumulate_callchain) 2801 keys = prefix_if_not_in("overhead_children", keys); 2802 2803 return keys; 2804 } 2805 2806 static int __setup_sorting(struct evlist *evlist) 2807 { 2808 char *str; 2809 const char *sort_keys; 2810 int ret = 0; 2811 2812 ret = setup_sort_order(evlist); 2813 if (ret) 2814 return ret; 2815 2816 sort_keys = sort_order; 2817 if (sort_keys == NULL) { 2818 if (is_strict_order(field_order)) { 2819 /* 2820 * If user specified field order but no sort order, 2821 * we'll honor it and not add default sort orders. 2822 */ 2823 return 0; 2824 } 2825 2826 sort_keys = get_default_sort_order(evlist); 2827 } 2828 2829 str = strdup(sort_keys); 2830 if (str == NULL) { 2831 pr_err("Not enough memory to setup sort keys"); 2832 return -ENOMEM; 2833 } 2834 2835 /* 2836 * Prepend overhead fields for backward compatibility. 2837 */ 2838 if (!is_strict_order(field_order)) { 2839 str = setup_overhead(str); 2840 if (str == NULL) { 2841 pr_err("Not enough memory to setup overhead keys"); 2842 return -ENOMEM; 2843 } 2844 } 2845 2846 ret = setup_sort_list(&perf_hpp_list, str, evlist); 2847 2848 free(str); 2849 return ret; 2850 } 2851 2852 void perf_hpp__set_elide(int idx, bool elide) 2853 { 2854 struct perf_hpp_fmt *fmt; 2855 struct hpp_sort_entry *hse; 2856 2857 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2858 if (!perf_hpp__is_sort_entry(fmt)) 2859 continue; 2860 2861 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2862 if (hse->se->se_width_idx == idx) { 2863 fmt->elide = elide; 2864 break; 2865 } 2866 } 2867 } 2868 2869 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 2870 { 2871 if (list && strlist__nr_entries(list) == 1) { 2872 if (fp != NULL) 2873 fprintf(fp, "# %s: %s\n", list_name, 2874 strlist__entry(list, 0)->s); 2875 return true; 2876 } 2877 return false; 2878 } 2879 2880 static bool get_elide(int idx, FILE *output) 2881 { 2882 switch (idx) { 2883 case HISTC_SYMBOL: 2884 return __get_elide(symbol_conf.sym_list, "symbol", output); 2885 case HISTC_DSO: 2886 return __get_elide(symbol_conf.dso_list, "dso", output); 2887 case HISTC_COMM: 2888 return __get_elide(symbol_conf.comm_list, "comm", output); 2889 default: 2890 break; 2891 } 2892 2893 if (sort__mode != SORT_MODE__BRANCH) 2894 return false; 2895 2896 switch (idx) { 2897 case HISTC_SYMBOL_FROM: 2898 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 2899 case HISTC_SYMBOL_TO: 2900 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 2901 case HISTC_DSO_FROM: 2902 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 2903 case HISTC_DSO_TO: 2904 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 2905 default: 2906 break; 2907 } 2908 2909 return false; 2910 } 2911 2912 void sort__setup_elide(FILE *output) 2913 { 2914 struct perf_hpp_fmt *fmt; 2915 struct hpp_sort_entry *hse; 2916 2917 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2918 if (!perf_hpp__is_sort_entry(fmt)) 2919 continue; 2920 2921 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2922 fmt->elide = get_elide(hse->se->se_width_idx, output); 2923 } 2924 2925 /* 2926 * It makes no sense to elide all of sort entries. 2927 * Just revert them to show up again. 2928 */ 2929 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2930 if (!perf_hpp__is_sort_entry(fmt)) 2931 continue; 2932 2933 if (!fmt->elide) 2934 return; 2935 } 2936 2937 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2938 if (!perf_hpp__is_sort_entry(fmt)) 2939 continue; 2940 2941 fmt->elide = false; 2942 } 2943 } 2944 2945 int output_field_add(struct perf_hpp_list *list, char *tok) 2946 { 2947 unsigned int i; 2948 2949 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2950 struct sort_dimension *sd = &common_sort_dimensions[i]; 2951 2952 if (strncasecmp(tok, sd->name, strlen(tok))) 2953 continue; 2954 2955 return __sort_dimension__add_output(list, sd); 2956 } 2957 2958 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2959 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2960 2961 if (strncasecmp(tok, hd->name, strlen(tok))) 2962 continue; 2963 2964 return __hpp_dimension__add_output(list, hd); 2965 } 2966 2967 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2968 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2969 2970 if (strncasecmp(tok, sd->name, strlen(tok))) 2971 continue; 2972 2973 return __sort_dimension__add_output(list, sd); 2974 } 2975 2976 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2977 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2978 2979 if (strncasecmp(tok, sd->name, strlen(tok))) 2980 continue; 2981 2982 return __sort_dimension__add_output(list, sd); 2983 } 2984 2985 return -ESRCH; 2986 } 2987 2988 static int setup_output_list(struct perf_hpp_list *list, char *str) 2989 { 2990 char *tmp, *tok; 2991 int ret = 0; 2992 2993 for (tok = strtok_r(str, ", ", &tmp); 2994 tok; tok = strtok_r(NULL, ", ", &tmp)) { 2995 ret = output_field_add(list, tok); 2996 if (ret == -EINVAL) { 2997 ui__error("Invalid --fields key: `%s'", tok); 2998 break; 2999 } else if (ret == -ESRCH) { 3000 ui__error("Unknown --fields key: `%s'", tok); 3001 break; 3002 } 3003 } 3004 3005 return ret; 3006 } 3007 3008 void reset_dimensions(void) 3009 { 3010 unsigned int i; 3011 3012 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 3013 common_sort_dimensions[i].taken = 0; 3014 3015 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 3016 hpp_sort_dimensions[i].taken = 0; 3017 3018 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 3019 bstack_sort_dimensions[i].taken = 0; 3020 3021 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 3022 memory_sort_dimensions[i].taken = 0; 3023 } 3024 3025 bool is_strict_order(const char *order) 3026 { 3027 return order && (*order != '+'); 3028 } 3029 3030 static int __setup_output_field(void) 3031 { 3032 char *str, *strp; 3033 int ret = -EINVAL; 3034 3035 if (field_order == NULL) 3036 return 0; 3037 3038 strp = str = strdup(field_order); 3039 if (str == NULL) { 3040 pr_err("Not enough memory to setup output fields"); 3041 return -ENOMEM; 3042 } 3043 3044 if (!is_strict_order(field_order)) 3045 strp++; 3046 3047 if (!strlen(strp)) { 3048 pr_err("Invalid --fields key: `+'"); 3049 goto out; 3050 } 3051 3052 ret = setup_output_list(&perf_hpp_list, strp); 3053 3054 out: 3055 free(str); 3056 return ret; 3057 } 3058 3059 int setup_sorting(struct evlist *evlist) 3060 { 3061 int err; 3062 3063 err = __setup_sorting(evlist); 3064 if (err < 0) 3065 return err; 3066 3067 if (parent_pattern != default_parent_pattern) { 3068 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 3069 if (err < 0) 3070 return err; 3071 } 3072 3073 reset_dimensions(); 3074 3075 /* 3076 * perf diff doesn't use default hpp output fields. 3077 */ 3078 if (sort__mode != SORT_MODE__DIFF) 3079 perf_hpp__init(); 3080 3081 err = __setup_output_field(); 3082 if (err < 0) 3083 return err; 3084 3085 /* copy sort keys to output fields */ 3086 perf_hpp__setup_output_field(&perf_hpp_list); 3087 /* and then copy output fields to sort keys */ 3088 perf_hpp__append_sort_keys(&perf_hpp_list); 3089 3090 /* setup hists-specific output fields */ 3091 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 3092 return -1; 3093 3094 return 0; 3095 } 3096 3097 void reset_output_field(void) 3098 { 3099 perf_hpp_list.need_collapse = 0; 3100 perf_hpp_list.parent = 0; 3101 perf_hpp_list.sym = 0; 3102 perf_hpp_list.dso = 0; 3103 3104 field_order = NULL; 3105 sort_order = NULL; 3106 3107 reset_dimensions(); 3108 perf_hpp__reset_output_field(&perf_hpp_list); 3109 } 3110 3111 #define INDENT (3*8 + 1) 3112 3113 static void add_key(struct strbuf *sb, const char *str, int *llen) 3114 { 3115 if (*llen >= 75) { 3116 strbuf_addstr(sb, "\n\t\t\t "); 3117 *llen = INDENT; 3118 } 3119 strbuf_addf(sb, " %s", str); 3120 *llen += strlen(str) + 1; 3121 } 3122 3123 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n, 3124 int *llen) 3125 { 3126 int i; 3127 3128 for (i = 0; i < n; i++) 3129 add_key(sb, s[i].name, llen); 3130 } 3131 3132 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n, 3133 int *llen) 3134 { 3135 int i; 3136 3137 for (i = 0; i < n; i++) 3138 add_key(sb, s[i].name, llen); 3139 } 3140 3141 const char *sort_help(const char *prefix) 3142 { 3143 struct strbuf sb; 3144 char *s; 3145 int len = strlen(prefix) + INDENT; 3146 3147 strbuf_init(&sb, 300); 3148 strbuf_addstr(&sb, prefix); 3149 add_hpp_sort_string(&sb, hpp_sort_dimensions, 3150 ARRAY_SIZE(hpp_sort_dimensions), &len); 3151 add_sort_string(&sb, common_sort_dimensions, 3152 ARRAY_SIZE(common_sort_dimensions), &len); 3153 add_sort_string(&sb, bstack_sort_dimensions, 3154 ARRAY_SIZE(bstack_sort_dimensions), &len); 3155 add_sort_string(&sb, memory_sort_dimensions, 3156 ARRAY_SIZE(memory_sort_dimensions), &len); 3157 s = strbuf_detach(&sb, NULL); 3158 strbuf_release(&sb); 3159 return s; 3160 } 3161