1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <regex.h> 5 #include <linux/mman.h> 6 #include <linux/time64.h> 7 #include "sort.h" 8 #include "hist.h" 9 #include "cacheline.h" 10 #include "comm.h" 11 #include "map.h" 12 #include "symbol.h" 13 #include "thread.h" 14 #include "evsel.h" 15 #include "evlist.h" 16 #include "srcline.h" 17 #include "strlist.h" 18 #include "strbuf.h" 19 #include <traceevent/event-parse.h> 20 #include "mem-events.h" 21 #include "annotate.h" 22 #include "time-utils.h" 23 #include <linux/kernel.h> 24 25 regex_t parent_regex; 26 const char default_parent_pattern[] = "^sys_|^do_page_fault"; 27 const char *parent_pattern = default_parent_pattern; 28 const char *default_sort_order = "comm,dso,symbol"; 29 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 30 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked"; 31 const char default_top_sort_order[] = "dso,symbol"; 32 const char default_diff_sort_order[] = "dso,symbol"; 33 const char default_tracepoint_sort_order[] = "trace"; 34 const char *sort_order; 35 const char *field_order; 36 regex_t ignore_callees_regex; 37 int have_ignore_callees = 0; 38 enum sort_mode sort__mode = SORT_MODE__NORMAL; 39 40 /* 41 * Replaces all occurrences of a char used with the: 42 * 43 * -t, --field-separator 44 * 45 * option, that uses a special separator character and don't pad with spaces, 46 * replacing all occurrences of this separator in symbol names (and other 47 * output) with a '.' character, that thus it's the only non valid separator. 48 */ 49 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 50 { 51 int n; 52 va_list ap; 53 54 va_start(ap, fmt); 55 n = vsnprintf(bf, size, fmt, ap); 56 if (symbol_conf.field_sep && n > 0) { 57 char *sep = bf; 58 59 while (1) { 60 sep = strchr(sep, *symbol_conf.field_sep); 61 if (sep == NULL) 62 break; 63 *sep = '.'; 64 } 65 } 66 va_end(ap); 67 68 if (n >= (int)size) 69 return size - 1; 70 return n; 71 } 72 73 static int64_t cmp_null(const void *l, const void *r) 74 { 75 if (!l && !r) 76 return 0; 77 else if (!l) 78 return -1; 79 else 80 return 1; 81 } 82 83 /* --sort pid */ 84 85 static int64_t 86 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 87 { 88 return right->thread->tid - left->thread->tid; 89 } 90 91 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 92 size_t size, unsigned int width) 93 { 94 const char *comm = thread__comm_str(he->thread); 95 96 width = max(7U, width) - 8; 97 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid, 98 width, width, comm ?: ""); 99 } 100 101 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 102 { 103 const struct thread *th = arg; 104 105 if (type != HIST_FILTER__THREAD) 106 return -1; 107 108 return th && he->thread != th; 109 } 110 111 struct sort_entry sort_thread = { 112 .se_header = " Pid:Command", 113 .se_cmp = sort__thread_cmp, 114 .se_snprintf = hist_entry__thread_snprintf, 115 .se_filter = hist_entry__thread_filter, 116 .se_width_idx = HISTC_THREAD, 117 }; 118 119 /* --sort comm */ 120 121 /* 122 * We can't use pointer comparison in functions below, 123 * because it gives different results based on pointer 124 * values, which could break some sorting assumptions. 125 */ 126 static int64_t 127 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 128 { 129 return strcmp(comm__str(right->comm), comm__str(left->comm)); 130 } 131 132 static int64_t 133 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 134 { 135 return strcmp(comm__str(right->comm), comm__str(left->comm)); 136 } 137 138 static int64_t 139 sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 140 { 141 return strcmp(comm__str(right->comm), comm__str(left->comm)); 142 } 143 144 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 145 size_t size, unsigned int width) 146 { 147 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 148 } 149 150 struct sort_entry sort_comm = { 151 .se_header = "Command", 152 .se_cmp = sort__comm_cmp, 153 .se_collapse = sort__comm_collapse, 154 .se_sort = sort__comm_sort, 155 .se_snprintf = hist_entry__comm_snprintf, 156 .se_filter = hist_entry__thread_filter, 157 .se_width_idx = HISTC_COMM, 158 }; 159 160 /* --sort dso */ 161 162 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 163 { 164 struct dso *dso_l = map_l ? map_l->dso : NULL; 165 struct dso *dso_r = map_r ? map_r->dso : NULL; 166 const char *dso_name_l, *dso_name_r; 167 168 if (!dso_l || !dso_r) 169 return cmp_null(dso_r, dso_l); 170 171 if (verbose > 0) { 172 dso_name_l = dso_l->long_name; 173 dso_name_r = dso_r->long_name; 174 } else { 175 dso_name_l = dso_l->short_name; 176 dso_name_r = dso_r->short_name; 177 } 178 179 return strcmp(dso_name_l, dso_name_r); 180 } 181 182 static int64_t 183 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 184 { 185 return _sort__dso_cmp(right->ms.map, left->ms.map); 186 } 187 188 static int _hist_entry__dso_snprintf(struct map *map, char *bf, 189 size_t size, unsigned int width) 190 { 191 if (map && map->dso) { 192 const char *dso_name = verbose > 0 ? map->dso->long_name : 193 map->dso->short_name; 194 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 195 } 196 197 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]"); 198 } 199 200 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 201 size_t size, unsigned int width) 202 { 203 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 204 } 205 206 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 207 { 208 const struct dso *dso = arg; 209 210 if (type != HIST_FILTER__DSO) 211 return -1; 212 213 return dso && (!he->ms.map || he->ms.map->dso != dso); 214 } 215 216 struct sort_entry sort_dso = { 217 .se_header = "Shared Object", 218 .se_cmp = sort__dso_cmp, 219 .se_snprintf = hist_entry__dso_snprintf, 220 .se_filter = hist_entry__dso_filter, 221 .se_width_idx = HISTC_DSO, 222 }; 223 224 /* --sort symbol */ 225 226 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 227 { 228 return (int64_t)(right_ip - left_ip); 229 } 230 231 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 232 { 233 if (!sym_l || !sym_r) 234 return cmp_null(sym_l, sym_r); 235 236 if (sym_l == sym_r) 237 return 0; 238 239 if (sym_l->inlined || sym_r->inlined) { 240 int ret = strcmp(sym_l->name, sym_r->name); 241 242 if (ret) 243 return ret; 244 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start)) 245 return 0; 246 } 247 248 if (sym_l->start != sym_r->start) 249 return (int64_t)(sym_r->start - sym_l->start); 250 251 return (int64_t)(sym_r->end - sym_l->end); 252 } 253 254 static int64_t 255 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 256 { 257 int64_t ret; 258 259 if (!left->ms.sym && !right->ms.sym) 260 return _sort__addr_cmp(left->ip, right->ip); 261 262 /* 263 * comparing symbol address alone is not enough since it's a 264 * relative address within a dso. 265 */ 266 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) { 267 ret = sort__dso_cmp(left, right); 268 if (ret != 0) 269 return ret; 270 } 271 272 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 273 } 274 275 static int64_t 276 sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 277 { 278 if (!left->ms.sym || !right->ms.sym) 279 return cmp_null(left->ms.sym, right->ms.sym); 280 281 return strcmp(right->ms.sym->name, left->ms.sym->name); 282 } 283 284 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, 285 u64 ip, char level, char *bf, size_t size, 286 unsigned int width) 287 { 288 size_t ret = 0; 289 290 if (verbose > 0) { 291 char o = map ? dso__symtab_origin(map->dso) : '!'; 292 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 293 BITS_PER_LONG / 4 + 2, ip, o); 294 } 295 296 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 297 if (sym && map) { 298 if (sym->type == STT_OBJECT) { 299 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 300 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 301 ip - map->unmap_ip(map, sym->start)); 302 } else { 303 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 304 width - ret, 305 sym->name); 306 if (sym->inlined) 307 ret += repsep_snprintf(bf + ret, size - ret, 308 " (inlined)"); 309 } 310 } else { 311 size_t len = BITS_PER_LONG / 4; 312 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 313 len, ip); 314 } 315 316 return ret; 317 } 318 319 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, 320 size_t size, unsigned int width) 321 { 322 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip, 323 he->level, bf, size, width); 324 } 325 326 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 327 { 328 const char *sym = arg; 329 330 if (type != HIST_FILTER__SYMBOL) 331 return -1; 332 333 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 334 } 335 336 struct sort_entry sort_sym = { 337 .se_header = "Symbol", 338 .se_cmp = sort__sym_cmp, 339 .se_sort = sort__sym_sort, 340 .se_snprintf = hist_entry__sym_snprintf, 341 .se_filter = hist_entry__sym_filter, 342 .se_width_idx = HISTC_SYMBOL, 343 }; 344 345 /* --sort srcline */ 346 347 char *hist_entry__srcline(struct hist_entry *he) 348 { 349 return map__srcline(he->ms.map, he->ip, he->ms.sym); 350 } 351 352 static int64_t 353 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 354 { 355 if (!left->srcline) 356 left->srcline = hist_entry__srcline(left); 357 if (!right->srcline) 358 right->srcline = hist_entry__srcline(right); 359 360 return strcmp(right->srcline, left->srcline); 361 } 362 363 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 364 size_t size, unsigned int width) 365 { 366 if (!he->srcline) 367 he->srcline = hist_entry__srcline(he); 368 369 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 370 } 371 372 struct sort_entry sort_srcline = { 373 .se_header = "Source:Line", 374 .se_cmp = sort__srcline_cmp, 375 .se_snprintf = hist_entry__srcline_snprintf, 376 .se_width_idx = HISTC_SRCLINE, 377 }; 378 379 /* --sort srcline_from */ 380 381 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams) 382 { 383 return map__srcline(ams->map, ams->al_addr, ams->sym); 384 } 385 386 static int64_t 387 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 388 { 389 if (!left->branch_info->srcline_from) 390 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from); 391 392 if (!right->branch_info->srcline_from) 393 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from); 394 395 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 396 } 397 398 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 399 size_t size, unsigned int width) 400 { 401 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 402 } 403 404 struct sort_entry sort_srcline_from = { 405 .se_header = "From Source:Line", 406 .se_cmp = sort__srcline_from_cmp, 407 .se_snprintf = hist_entry__srcline_from_snprintf, 408 .se_width_idx = HISTC_SRCLINE_FROM, 409 }; 410 411 /* --sort srcline_to */ 412 413 static int64_t 414 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 415 { 416 if (!left->branch_info->srcline_to) 417 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to); 418 419 if (!right->branch_info->srcline_to) 420 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to); 421 422 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 423 } 424 425 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 426 size_t size, unsigned int width) 427 { 428 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 429 } 430 431 struct sort_entry sort_srcline_to = { 432 .se_header = "To Source:Line", 433 .se_cmp = sort__srcline_to_cmp, 434 .se_snprintf = hist_entry__srcline_to_snprintf, 435 .se_width_idx = HISTC_SRCLINE_TO, 436 }; 437 438 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf, 439 size_t size, unsigned int width) 440 { 441 442 struct symbol *sym = he->ms.sym; 443 struct annotation *notes; 444 double ipc = 0.0, coverage = 0.0; 445 char tmp[64]; 446 447 if (!sym) 448 return repsep_snprintf(bf, size, "%-*s", width, "-"); 449 450 notes = symbol__annotation(sym); 451 452 if (notes->hit_cycles) 453 ipc = notes->hit_insn / ((double)notes->hit_cycles); 454 455 if (notes->total_insn) { 456 coverage = notes->cover_insn * 100.0 / 457 ((double)notes->total_insn); 458 } 459 460 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage); 461 return repsep_snprintf(bf, size, "%-*s", width, tmp); 462 } 463 464 struct sort_entry sort_sym_ipc = { 465 .se_header = "IPC [IPC Coverage]", 466 .se_cmp = sort__sym_cmp, 467 .se_snprintf = hist_entry__sym_ipc_snprintf, 468 .se_width_idx = HISTC_SYMBOL_IPC, 469 }; 470 471 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he 472 __maybe_unused, 473 char *bf, size_t size, 474 unsigned int width) 475 { 476 char tmp[64]; 477 478 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-"); 479 return repsep_snprintf(bf, size, "%-*s", width, tmp); 480 } 481 482 struct sort_entry sort_sym_ipc_null = { 483 .se_header = "IPC [IPC Coverage]", 484 .se_cmp = sort__sym_cmp, 485 .se_snprintf = hist_entry__sym_ipc_null_snprintf, 486 .se_width_idx = HISTC_SYMBOL_IPC, 487 }; 488 489 /* --sort srcfile */ 490 491 static char no_srcfile[1]; 492 493 static char *hist_entry__get_srcfile(struct hist_entry *e) 494 { 495 char *sf, *p; 496 struct map *map = e->ms.map; 497 498 if (!map) 499 return no_srcfile; 500 501 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip), 502 e->ms.sym, false, true, true, e->ip); 503 if (!strcmp(sf, SRCLINE_UNKNOWN)) 504 return no_srcfile; 505 p = strchr(sf, ':'); 506 if (p && *sf) { 507 *p = 0; 508 return sf; 509 } 510 free(sf); 511 return no_srcfile; 512 } 513 514 static int64_t 515 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 516 { 517 if (!left->srcfile) 518 left->srcfile = hist_entry__get_srcfile(left); 519 if (!right->srcfile) 520 right->srcfile = hist_entry__get_srcfile(right); 521 522 return strcmp(right->srcfile, left->srcfile); 523 } 524 525 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 526 size_t size, unsigned int width) 527 { 528 if (!he->srcfile) 529 he->srcfile = hist_entry__get_srcfile(he); 530 531 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 532 } 533 534 struct sort_entry sort_srcfile = { 535 .se_header = "Source File", 536 .se_cmp = sort__srcfile_cmp, 537 .se_snprintf = hist_entry__srcfile_snprintf, 538 .se_width_idx = HISTC_SRCFILE, 539 }; 540 541 /* --sort parent */ 542 543 static int64_t 544 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 545 { 546 struct symbol *sym_l = left->parent; 547 struct symbol *sym_r = right->parent; 548 549 if (!sym_l || !sym_r) 550 return cmp_null(sym_l, sym_r); 551 552 return strcmp(sym_r->name, sym_l->name); 553 } 554 555 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 556 size_t size, unsigned int width) 557 { 558 return repsep_snprintf(bf, size, "%-*.*s", width, width, 559 he->parent ? he->parent->name : "[other]"); 560 } 561 562 struct sort_entry sort_parent = { 563 .se_header = "Parent symbol", 564 .se_cmp = sort__parent_cmp, 565 .se_snprintf = hist_entry__parent_snprintf, 566 .se_width_idx = HISTC_PARENT, 567 }; 568 569 /* --sort cpu */ 570 571 static int64_t 572 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 573 { 574 return right->cpu - left->cpu; 575 } 576 577 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 578 size_t size, unsigned int width) 579 { 580 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 581 } 582 583 struct sort_entry sort_cpu = { 584 .se_header = "CPU", 585 .se_cmp = sort__cpu_cmp, 586 .se_snprintf = hist_entry__cpu_snprintf, 587 .se_width_idx = HISTC_CPU, 588 }; 589 590 /* --sort cgroup_id */ 591 592 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev) 593 { 594 return (int64_t)(right_dev - left_dev); 595 } 596 597 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino) 598 { 599 return (int64_t)(right_ino - left_ino); 600 } 601 602 static int64_t 603 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right) 604 { 605 int64_t ret; 606 607 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev); 608 if (ret != 0) 609 return ret; 610 611 return _sort__cgroup_inode_cmp(right->cgroup_id.ino, 612 left->cgroup_id.ino); 613 } 614 615 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he, 616 char *bf, size_t size, 617 unsigned int width __maybe_unused) 618 { 619 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev, 620 he->cgroup_id.ino); 621 } 622 623 struct sort_entry sort_cgroup_id = { 624 .se_header = "cgroup id (dev/inode)", 625 .se_cmp = sort__cgroup_id_cmp, 626 .se_snprintf = hist_entry__cgroup_id_snprintf, 627 .se_width_idx = HISTC_CGROUP_ID, 628 }; 629 630 /* --sort socket */ 631 632 static int64_t 633 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 634 { 635 return right->socket - left->socket; 636 } 637 638 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 639 size_t size, unsigned int width) 640 { 641 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 642 } 643 644 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 645 { 646 int sk = *(const int *)arg; 647 648 if (type != HIST_FILTER__SOCKET) 649 return -1; 650 651 return sk >= 0 && he->socket != sk; 652 } 653 654 struct sort_entry sort_socket = { 655 .se_header = "Socket", 656 .se_cmp = sort__socket_cmp, 657 .se_snprintf = hist_entry__socket_snprintf, 658 .se_filter = hist_entry__socket_filter, 659 .se_width_idx = HISTC_SOCKET, 660 }; 661 662 /* --sort time */ 663 664 static int64_t 665 sort__time_cmp(struct hist_entry *left, struct hist_entry *right) 666 { 667 return right->time - left->time; 668 } 669 670 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf, 671 size_t size, unsigned int width) 672 { 673 char he_time[32]; 674 675 if (symbol_conf.nanosecs) 676 timestamp__scnprintf_nsec(he->time, he_time, 677 sizeof(he_time)); 678 else 679 timestamp__scnprintf_usec(he->time, he_time, 680 sizeof(he_time)); 681 682 return repsep_snprintf(bf, size, "%-.*s", width, he_time); 683 } 684 685 struct sort_entry sort_time = { 686 .se_header = "Time", 687 .se_cmp = sort__time_cmp, 688 .se_snprintf = hist_entry__time_snprintf, 689 .se_width_idx = HISTC_TIME, 690 }; 691 692 /* --sort trace */ 693 694 static char *get_trace_output(struct hist_entry *he) 695 { 696 struct trace_seq seq; 697 struct evsel *evsel; 698 struct tep_record rec = { 699 .data = he->raw_data, 700 .size = he->raw_size, 701 }; 702 703 evsel = hists_to_evsel(he->hists); 704 705 trace_seq_init(&seq); 706 if (symbol_conf.raw_trace) { 707 tep_print_fields(&seq, he->raw_data, he->raw_size, 708 evsel->tp_format); 709 } else { 710 tep_event_info(&seq, evsel->tp_format, &rec); 711 } 712 /* 713 * Trim the buffer, it starts at 4KB and we're not going to 714 * add anything more to this buffer. 715 */ 716 return realloc(seq.buffer, seq.len + 1); 717 } 718 719 static int64_t 720 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 721 { 722 struct evsel *evsel; 723 724 evsel = hists_to_evsel(left->hists); 725 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 726 return 0; 727 728 if (left->trace_output == NULL) 729 left->trace_output = get_trace_output(left); 730 if (right->trace_output == NULL) 731 right->trace_output = get_trace_output(right); 732 733 return strcmp(right->trace_output, left->trace_output); 734 } 735 736 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 737 size_t size, unsigned int width) 738 { 739 struct evsel *evsel; 740 741 evsel = hists_to_evsel(he->hists); 742 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 743 return scnprintf(bf, size, "%-.*s", width, "N/A"); 744 745 if (he->trace_output == NULL) 746 he->trace_output = get_trace_output(he); 747 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 748 } 749 750 struct sort_entry sort_trace = { 751 .se_header = "Trace output", 752 .se_cmp = sort__trace_cmp, 753 .se_snprintf = hist_entry__trace_snprintf, 754 .se_width_idx = HISTC_TRACE, 755 }; 756 757 /* sort keys for branch stacks */ 758 759 static int64_t 760 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 761 { 762 if (!left->branch_info || !right->branch_info) 763 return cmp_null(left->branch_info, right->branch_info); 764 765 return _sort__dso_cmp(left->branch_info->from.map, 766 right->branch_info->from.map); 767 } 768 769 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 770 size_t size, unsigned int width) 771 { 772 if (he->branch_info) 773 return _hist_entry__dso_snprintf(he->branch_info->from.map, 774 bf, size, width); 775 else 776 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 777 } 778 779 static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 780 const void *arg) 781 { 782 const struct dso *dso = arg; 783 784 if (type != HIST_FILTER__DSO) 785 return -1; 786 787 return dso && (!he->branch_info || !he->branch_info->from.map || 788 he->branch_info->from.map->dso != dso); 789 } 790 791 static int64_t 792 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 793 { 794 if (!left->branch_info || !right->branch_info) 795 return cmp_null(left->branch_info, right->branch_info); 796 797 return _sort__dso_cmp(left->branch_info->to.map, 798 right->branch_info->to.map); 799 } 800 801 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 802 size_t size, unsigned int width) 803 { 804 if (he->branch_info) 805 return _hist_entry__dso_snprintf(he->branch_info->to.map, 806 bf, size, width); 807 else 808 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 809 } 810 811 static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 812 const void *arg) 813 { 814 const struct dso *dso = arg; 815 816 if (type != HIST_FILTER__DSO) 817 return -1; 818 819 return dso && (!he->branch_info || !he->branch_info->to.map || 820 he->branch_info->to.map->dso != dso); 821 } 822 823 static int64_t 824 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 825 { 826 struct addr_map_symbol *from_l = &left->branch_info->from; 827 struct addr_map_symbol *from_r = &right->branch_info->from; 828 829 if (!left->branch_info || !right->branch_info) 830 return cmp_null(left->branch_info, right->branch_info); 831 832 from_l = &left->branch_info->from; 833 from_r = &right->branch_info->from; 834 835 if (!from_l->sym && !from_r->sym) 836 return _sort__addr_cmp(from_l->addr, from_r->addr); 837 838 return _sort__sym_cmp(from_l->sym, from_r->sym); 839 } 840 841 static int64_t 842 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 843 { 844 struct addr_map_symbol *to_l, *to_r; 845 846 if (!left->branch_info || !right->branch_info) 847 return cmp_null(left->branch_info, right->branch_info); 848 849 to_l = &left->branch_info->to; 850 to_r = &right->branch_info->to; 851 852 if (!to_l->sym && !to_r->sym) 853 return _sort__addr_cmp(to_l->addr, to_r->addr); 854 855 return _sort__sym_cmp(to_l->sym, to_r->sym); 856 } 857 858 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 859 size_t size, unsigned int width) 860 { 861 if (he->branch_info) { 862 struct addr_map_symbol *from = &he->branch_info->from; 863 864 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, 865 he->level, bf, size, width); 866 } 867 868 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 869 } 870 871 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 872 size_t size, unsigned int width) 873 { 874 if (he->branch_info) { 875 struct addr_map_symbol *to = &he->branch_info->to; 876 877 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, 878 he->level, bf, size, width); 879 } 880 881 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 882 } 883 884 static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 885 const void *arg) 886 { 887 const char *sym = arg; 888 889 if (type != HIST_FILTER__SYMBOL) 890 return -1; 891 892 return sym && !(he->branch_info && he->branch_info->from.sym && 893 strstr(he->branch_info->from.sym->name, sym)); 894 } 895 896 static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 897 const void *arg) 898 { 899 const char *sym = arg; 900 901 if (type != HIST_FILTER__SYMBOL) 902 return -1; 903 904 return sym && !(he->branch_info && he->branch_info->to.sym && 905 strstr(he->branch_info->to.sym->name, sym)); 906 } 907 908 struct sort_entry sort_dso_from = { 909 .se_header = "Source Shared Object", 910 .se_cmp = sort__dso_from_cmp, 911 .se_snprintf = hist_entry__dso_from_snprintf, 912 .se_filter = hist_entry__dso_from_filter, 913 .se_width_idx = HISTC_DSO_FROM, 914 }; 915 916 struct sort_entry sort_dso_to = { 917 .se_header = "Target Shared Object", 918 .se_cmp = sort__dso_to_cmp, 919 .se_snprintf = hist_entry__dso_to_snprintf, 920 .se_filter = hist_entry__dso_to_filter, 921 .se_width_idx = HISTC_DSO_TO, 922 }; 923 924 struct sort_entry sort_sym_from = { 925 .se_header = "Source Symbol", 926 .se_cmp = sort__sym_from_cmp, 927 .se_snprintf = hist_entry__sym_from_snprintf, 928 .se_filter = hist_entry__sym_from_filter, 929 .se_width_idx = HISTC_SYMBOL_FROM, 930 }; 931 932 struct sort_entry sort_sym_to = { 933 .se_header = "Target Symbol", 934 .se_cmp = sort__sym_to_cmp, 935 .se_snprintf = hist_entry__sym_to_snprintf, 936 .se_filter = hist_entry__sym_to_filter, 937 .se_width_idx = HISTC_SYMBOL_TO, 938 }; 939 940 static int64_t 941 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 942 { 943 unsigned char mp, p; 944 945 if (!left->branch_info || !right->branch_info) 946 return cmp_null(left->branch_info, right->branch_info); 947 948 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 949 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 950 return mp || p; 951 } 952 953 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 954 size_t size, unsigned int width){ 955 static const char *out = "N/A"; 956 957 if (he->branch_info) { 958 if (he->branch_info->flags.predicted) 959 out = "N"; 960 else if (he->branch_info->flags.mispred) 961 out = "Y"; 962 } 963 964 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 965 } 966 967 static int64_t 968 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 969 { 970 if (!left->branch_info || !right->branch_info) 971 return cmp_null(left->branch_info, right->branch_info); 972 973 return left->branch_info->flags.cycles - 974 right->branch_info->flags.cycles; 975 } 976 977 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 978 size_t size, unsigned int width) 979 { 980 if (!he->branch_info) 981 return scnprintf(bf, size, "%-.*s", width, "N/A"); 982 if (he->branch_info->flags.cycles == 0) 983 return repsep_snprintf(bf, size, "%-*s", width, "-"); 984 return repsep_snprintf(bf, size, "%-*hd", width, 985 he->branch_info->flags.cycles); 986 } 987 988 struct sort_entry sort_cycles = { 989 .se_header = "Basic Block Cycles", 990 .se_cmp = sort__cycles_cmp, 991 .se_snprintf = hist_entry__cycles_snprintf, 992 .se_width_idx = HISTC_CYCLES, 993 }; 994 995 /* --sort daddr_sym */ 996 int64_t 997 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 998 { 999 uint64_t l = 0, r = 0; 1000 1001 if (left->mem_info) 1002 l = left->mem_info->daddr.addr; 1003 if (right->mem_info) 1004 r = right->mem_info->daddr.addr; 1005 1006 return (int64_t)(r - l); 1007 } 1008 1009 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 1010 size_t size, unsigned int width) 1011 { 1012 uint64_t addr = 0; 1013 struct map *map = NULL; 1014 struct symbol *sym = NULL; 1015 1016 if (he->mem_info) { 1017 addr = he->mem_info->daddr.addr; 1018 map = he->mem_info->daddr.map; 1019 sym = he->mem_info->daddr.sym; 1020 } 1021 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 1022 width); 1023 } 1024 1025 int64_t 1026 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 1027 { 1028 uint64_t l = 0, r = 0; 1029 1030 if (left->mem_info) 1031 l = left->mem_info->iaddr.addr; 1032 if (right->mem_info) 1033 r = right->mem_info->iaddr.addr; 1034 1035 return (int64_t)(r - l); 1036 } 1037 1038 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 1039 size_t size, unsigned int width) 1040 { 1041 uint64_t addr = 0; 1042 struct map *map = NULL; 1043 struct symbol *sym = NULL; 1044 1045 if (he->mem_info) { 1046 addr = he->mem_info->iaddr.addr; 1047 map = he->mem_info->iaddr.map; 1048 sym = he->mem_info->iaddr.sym; 1049 } 1050 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 1051 width); 1052 } 1053 1054 static int64_t 1055 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1056 { 1057 struct map *map_l = NULL; 1058 struct map *map_r = NULL; 1059 1060 if (left->mem_info) 1061 map_l = left->mem_info->daddr.map; 1062 if (right->mem_info) 1063 map_r = right->mem_info->daddr.map; 1064 1065 return _sort__dso_cmp(map_l, map_r); 1066 } 1067 1068 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 1069 size_t size, unsigned int width) 1070 { 1071 struct map *map = NULL; 1072 1073 if (he->mem_info) 1074 map = he->mem_info->daddr.map; 1075 1076 return _hist_entry__dso_snprintf(map, bf, size, width); 1077 } 1078 1079 static int64_t 1080 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 1081 { 1082 union perf_mem_data_src data_src_l; 1083 union perf_mem_data_src data_src_r; 1084 1085 if (left->mem_info) 1086 data_src_l = left->mem_info->data_src; 1087 else 1088 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 1089 1090 if (right->mem_info) 1091 data_src_r = right->mem_info->data_src; 1092 else 1093 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 1094 1095 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 1096 } 1097 1098 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 1099 size_t size, unsigned int width) 1100 { 1101 char out[10]; 1102 1103 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 1104 return repsep_snprintf(bf, size, "%.*s", width, out); 1105 } 1106 1107 static int64_t 1108 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 1109 { 1110 union perf_mem_data_src data_src_l; 1111 union perf_mem_data_src data_src_r; 1112 1113 if (left->mem_info) 1114 data_src_l = left->mem_info->data_src; 1115 else 1116 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 1117 1118 if (right->mem_info) 1119 data_src_r = right->mem_info->data_src; 1120 else 1121 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 1122 1123 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 1124 } 1125 1126 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1127 size_t size, unsigned int width) 1128 { 1129 char out[64]; 1130 1131 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1132 return repsep_snprintf(bf, size, "%-*s", width, out); 1133 } 1134 1135 static int64_t 1136 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1137 { 1138 union perf_mem_data_src data_src_l; 1139 union perf_mem_data_src data_src_r; 1140 1141 if (left->mem_info) 1142 data_src_l = left->mem_info->data_src; 1143 else 1144 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1145 1146 if (right->mem_info) 1147 data_src_r = right->mem_info->data_src; 1148 else 1149 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1150 1151 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1152 } 1153 1154 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1155 size_t size, unsigned int width) 1156 { 1157 char out[64]; 1158 1159 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1160 return repsep_snprintf(bf, size, "%-*s", width, out); 1161 } 1162 1163 static int64_t 1164 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1165 { 1166 union perf_mem_data_src data_src_l; 1167 union perf_mem_data_src data_src_r; 1168 1169 if (left->mem_info) 1170 data_src_l = left->mem_info->data_src; 1171 else 1172 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1173 1174 if (right->mem_info) 1175 data_src_r = right->mem_info->data_src; 1176 else 1177 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1178 1179 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1180 } 1181 1182 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1183 size_t size, unsigned int width) 1184 { 1185 char out[64]; 1186 1187 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1188 return repsep_snprintf(bf, size, "%-*s", width, out); 1189 } 1190 1191 int64_t 1192 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1193 { 1194 u64 l, r; 1195 struct map *l_map, *r_map; 1196 1197 if (!left->mem_info) return -1; 1198 if (!right->mem_info) return 1; 1199 1200 /* group event types together */ 1201 if (left->cpumode > right->cpumode) return -1; 1202 if (left->cpumode < right->cpumode) return 1; 1203 1204 l_map = left->mem_info->daddr.map; 1205 r_map = right->mem_info->daddr.map; 1206 1207 /* if both are NULL, jump to sort on al_addr instead */ 1208 if (!l_map && !r_map) 1209 goto addr; 1210 1211 if (!l_map) return -1; 1212 if (!r_map) return 1; 1213 1214 if (l_map->maj > r_map->maj) return -1; 1215 if (l_map->maj < r_map->maj) return 1; 1216 1217 if (l_map->min > r_map->min) return -1; 1218 if (l_map->min < r_map->min) return 1; 1219 1220 if (l_map->ino > r_map->ino) return -1; 1221 if (l_map->ino < r_map->ino) return 1; 1222 1223 if (l_map->ino_generation > r_map->ino_generation) return -1; 1224 if (l_map->ino_generation < r_map->ino_generation) return 1; 1225 1226 /* 1227 * Addresses with no major/minor numbers are assumed to be 1228 * anonymous in userspace. Sort those on pid then address. 1229 * 1230 * The kernel and non-zero major/minor mapped areas are 1231 * assumed to be unity mapped. Sort those on address. 1232 */ 1233 1234 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1235 (!(l_map->flags & MAP_SHARED)) && 1236 !l_map->maj && !l_map->min && !l_map->ino && 1237 !l_map->ino_generation) { 1238 /* userspace anonymous */ 1239 1240 if (left->thread->pid_ > right->thread->pid_) return -1; 1241 if (left->thread->pid_ < right->thread->pid_) return 1; 1242 } 1243 1244 addr: 1245 /* al_addr does all the right addr - start + offset calculations */ 1246 l = cl_address(left->mem_info->daddr.al_addr); 1247 r = cl_address(right->mem_info->daddr.al_addr); 1248 1249 if (l > r) return -1; 1250 if (l < r) return 1; 1251 1252 return 0; 1253 } 1254 1255 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1256 size_t size, unsigned int width) 1257 { 1258 1259 uint64_t addr = 0; 1260 struct map *map = NULL; 1261 struct symbol *sym = NULL; 1262 char level = he->level; 1263 1264 if (he->mem_info) { 1265 addr = cl_address(he->mem_info->daddr.al_addr); 1266 map = he->mem_info->daddr.map; 1267 sym = he->mem_info->daddr.sym; 1268 1269 /* print [s] for shared data mmaps */ 1270 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1271 map && !(map->prot & PROT_EXEC) && 1272 (map->flags & MAP_SHARED) && 1273 (map->maj || map->min || map->ino || 1274 map->ino_generation)) 1275 level = 's'; 1276 else if (!map) 1277 level = 'X'; 1278 } 1279 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size, 1280 width); 1281 } 1282 1283 struct sort_entry sort_mispredict = { 1284 .se_header = "Branch Mispredicted", 1285 .se_cmp = sort__mispredict_cmp, 1286 .se_snprintf = hist_entry__mispredict_snprintf, 1287 .se_width_idx = HISTC_MISPREDICT, 1288 }; 1289 1290 static u64 he_weight(struct hist_entry *he) 1291 { 1292 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; 1293 } 1294 1295 static int64_t 1296 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1297 { 1298 return he_weight(left) - he_weight(right); 1299 } 1300 1301 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1302 size_t size, unsigned int width) 1303 { 1304 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he)); 1305 } 1306 1307 struct sort_entry sort_local_weight = { 1308 .se_header = "Local Weight", 1309 .se_cmp = sort__local_weight_cmp, 1310 .se_snprintf = hist_entry__local_weight_snprintf, 1311 .se_width_idx = HISTC_LOCAL_WEIGHT, 1312 }; 1313 1314 static int64_t 1315 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1316 { 1317 return left->stat.weight - right->stat.weight; 1318 } 1319 1320 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1321 size_t size, unsigned int width) 1322 { 1323 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight); 1324 } 1325 1326 struct sort_entry sort_global_weight = { 1327 .se_header = "Weight", 1328 .se_cmp = sort__global_weight_cmp, 1329 .se_snprintf = hist_entry__global_weight_snprintf, 1330 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1331 }; 1332 1333 struct sort_entry sort_mem_daddr_sym = { 1334 .se_header = "Data Symbol", 1335 .se_cmp = sort__daddr_cmp, 1336 .se_snprintf = hist_entry__daddr_snprintf, 1337 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1338 }; 1339 1340 struct sort_entry sort_mem_iaddr_sym = { 1341 .se_header = "Code Symbol", 1342 .se_cmp = sort__iaddr_cmp, 1343 .se_snprintf = hist_entry__iaddr_snprintf, 1344 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1345 }; 1346 1347 struct sort_entry sort_mem_daddr_dso = { 1348 .se_header = "Data Object", 1349 .se_cmp = sort__dso_daddr_cmp, 1350 .se_snprintf = hist_entry__dso_daddr_snprintf, 1351 .se_width_idx = HISTC_MEM_DADDR_DSO, 1352 }; 1353 1354 struct sort_entry sort_mem_locked = { 1355 .se_header = "Locked", 1356 .se_cmp = sort__locked_cmp, 1357 .se_snprintf = hist_entry__locked_snprintf, 1358 .se_width_idx = HISTC_MEM_LOCKED, 1359 }; 1360 1361 struct sort_entry sort_mem_tlb = { 1362 .se_header = "TLB access", 1363 .se_cmp = sort__tlb_cmp, 1364 .se_snprintf = hist_entry__tlb_snprintf, 1365 .se_width_idx = HISTC_MEM_TLB, 1366 }; 1367 1368 struct sort_entry sort_mem_lvl = { 1369 .se_header = "Memory access", 1370 .se_cmp = sort__lvl_cmp, 1371 .se_snprintf = hist_entry__lvl_snprintf, 1372 .se_width_idx = HISTC_MEM_LVL, 1373 }; 1374 1375 struct sort_entry sort_mem_snoop = { 1376 .se_header = "Snoop", 1377 .se_cmp = sort__snoop_cmp, 1378 .se_snprintf = hist_entry__snoop_snprintf, 1379 .se_width_idx = HISTC_MEM_SNOOP, 1380 }; 1381 1382 struct sort_entry sort_mem_dcacheline = { 1383 .se_header = "Data Cacheline", 1384 .se_cmp = sort__dcacheline_cmp, 1385 .se_snprintf = hist_entry__dcacheline_snprintf, 1386 .se_width_idx = HISTC_MEM_DCACHELINE, 1387 }; 1388 1389 static int64_t 1390 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1391 { 1392 uint64_t l = 0, r = 0; 1393 1394 if (left->mem_info) 1395 l = left->mem_info->daddr.phys_addr; 1396 if (right->mem_info) 1397 r = right->mem_info->daddr.phys_addr; 1398 1399 return (int64_t)(r - l); 1400 } 1401 1402 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf, 1403 size_t size, unsigned int width) 1404 { 1405 uint64_t addr = 0; 1406 size_t ret = 0; 1407 size_t len = BITS_PER_LONG / 4; 1408 1409 addr = he->mem_info->daddr.phys_addr; 1410 1411 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level); 1412 1413 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr); 1414 1415 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, ""); 1416 1417 if (ret > width) 1418 bf[width] = '\0'; 1419 1420 return width; 1421 } 1422 1423 struct sort_entry sort_mem_phys_daddr = { 1424 .se_header = "Data Physical Address", 1425 .se_cmp = sort__phys_daddr_cmp, 1426 .se_snprintf = hist_entry__phys_daddr_snprintf, 1427 .se_width_idx = HISTC_MEM_PHYS_DADDR, 1428 }; 1429 1430 static int64_t 1431 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1432 { 1433 if (!left->branch_info || !right->branch_info) 1434 return cmp_null(left->branch_info, right->branch_info); 1435 1436 return left->branch_info->flags.abort != 1437 right->branch_info->flags.abort; 1438 } 1439 1440 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1441 size_t size, unsigned int width) 1442 { 1443 static const char *out = "N/A"; 1444 1445 if (he->branch_info) { 1446 if (he->branch_info->flags.abort) 1447 out = "A"; 1448 else 1449 out = "."; 1450 } 1451 1452 return repsep_snprintf(bf, size, "%-*s", width, out); 1453 } 1454 1455 struct sort_entry sort_abort = { 1456 .se_header = "Transaction abort", 1457 .se_cmp = sort__abort_cmp, 1458 .se_snprintf = hist_entry__abort_snprintf, 1459 .se_width_idx = HISTC_ABORT, 1460 }; 1461 1462 static int64_t 1463 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1464 { 1465 if (!left->branch_info || !right->branch_info) 1466 return cmp_null(left->branch_info, right->branch_info); 1467 1468 return left->branch_info->flags.in_tx != 1469 right->branch_info->flags.in_tx; 1470 } 1471 1472 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1473 size_t size, unsigned int width) 1474 { 1475 static const char *out = "N/A"; 1476 1477 if (he->branch_info) { 1478 if (he->branch_info->flags.in_tx) 1479 out = "T"; 1480 else 1481 out = "."; 1482 } 1483 1484 return repsep_snprintf(bf, size, "%-*s", width, out); 1485 } 1486 1487 struct sort_entry sort_in_tx = { 1488 .se_header = "Branch in transaction", 1489 .se_cmp = sort__in_tx_cmp, 1490 .se_snprintf = hist_entry__in_tx_snprintf, 1491 .se_width_idx = HISTC_IN_TX, 1492 }; 1493 1494 static int64_t 1495 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1496 { 1497 return left->transaction - right->transaction; 1498 } 1499 1500 static inline char *add_str(char *p, const char *str) 1501 { 1502 strcpy(p, str); 1503 return p + strlen(str); 1504 } 1505 1506 static struct txbit { 1507 unsigned flag; 1508 const char *name; 1509 int skip_for_len; 1510 } txbits[] = { 1511 { PERF_TXN_ELISION, "EL ", 0 }, 1512 { PERF_TXN_TRANSACTION, "TX ", 1 }, 1513 { PERF_TXN_SYNC, "SYNC ", 1 }, 1514 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 1515 { PERF_TXN_RETRY, "RETRY ", 0 }, 1516 { PERF_TXN_CONFLICT, "CON ", 0 }, 1517 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 1518 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 1519 { 0, NULL, 0 } 1520 }; 1521 1522 int hist_entry__transaction_len(void) 1523 { 1524 int i; 1525 int len = 0; 1526 1527 for (i = 0; txbits[i].name; i++) { 1528 if (!txbits[i].skip_for_len) 1529 len += strlen(txbits[i].name); 1530 } 1531 len += 4; /* :XX<space> */ 1532 return len; 1533 } 1534 1535 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 1536 size_t size, unsigned int width) 1537 { 1538 u64 t = he->transaction; 1539 char buf[128]; 1540 char *p = buf; 1541 int i; 1542 1543 buf[0] = 0; 1544 for (i = 0; txbits[i].name; i++) 1545 if (txbits[i].flag & t) 1546 p = add_str(p, txbits[i].name); 1547 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 1548 p = add_str(p, "NEITHER "); 1549 if (t & PERF_TXN_ABORT_MASK) { 1550 sprintf(p, ":%" PRIx64, 1551 (t & PERF_TXN_ABORT_MASK) >> 1552 PERF_TXN_ABORT_SHIFT); 1553 p += strlen(p); 1554 } 1555 1556 return repsep_snprintf(bf, size, "%-*s", width, buf); 1557 } 1558 1559 struct sort_entry sort_transaction = { 1560 .se_header = "Transaction ", 1561 .se_cmp = sort__transaction_cmp, 1562 .se_snprintf = hist_entry__transaction_snprintf, 1563 .se_width_idx = HISTC_TRANSACTION, 1564 }; 1565 1566 /* --sort symbol_size */ 1567 1568 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r) 1569 { 1570 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0; 1571 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0; 1572 1573 return size_l < size_r ? -1 : 1574 size_l == size_r ? 0 : 1; 1575 } 1576 1577 static int64_t 1578 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right) 1579 { 1580 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym); 1581 } 1582 1583 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf, 1584 size_t bf_size, unsigned int width) 1585 { 1586 if (sym) 1587 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym)); 1588 1589 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1590 } 1591 1592 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf, 1593 size_t size, unsigned int width) 1594 { 1595 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width); 1596 } 1597 1598 struct sort_entry sort_sym_size = { 1599 .se_header = "Symbol size", 1600 .se_cmp = sort__sym_size_cmp, 1601 .se_snprintf = hist_entry__sym_size_snprintf, 1602 .se_width_idx = HISTC_SYM_SIZE, 1603 }; 1604 1605 /* --sort dso_size */ 1606 1607 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r) 1608 { 1609 int64_t size_l = map_l != NULL ? map__size(map_l) : 0; 1610 int64_t size_r = map_r != NULL ? map__size(map_r) : 0; 1611 1612 return size_l < size_r ? -1 : 1613 size_l == size_r ? 0 : 1; 1614 } 1615 1616 static int64_t 1617 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right) 1618 { 1619 return _sort__dso_size_cmp(right->ms.map, left->ms.map); 1620 } 1621 1622 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf, 1623 size_t bf_size, unsigned int width) 1624 { 1625 if (map && map->dso) 1626 return repsep_snprintf(bf, bf_size, "%*d", width, 1627 map__size(map)); 1628 1629 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1630 } 1631 1632 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf, 1633 size_t size, unsigned int width) 1634 { 1635 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width); 1636 } 1637 1638 struct sort_entry sort_dso_size = { 1639 .se_header = "DSO size", 1640 .se_cmp = sort__dso_size_cmp, 1641 .se_snprintf = hist_entry__dso_size_snprintf, 1642 .se_width_idx = HISTC_DSO_SIZE, 1643 }; 1644 1645 1646 struct sort_dimension { 1647 const char *name; 1648 struct sort_entry *entry; 1649 int taken; 1650 }; 1651 1652 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 1653 1654 static struct sort_dimension common_sort_dimensions[] = { 1655 DIM(SORT_PID, "pid", sort_thread), 1656 DIM(SORT_COMM, "comm", sort_comm), 1657 DIM(SORT_DSO, "dso", sort_dso), 1658 DIM(SORT_SYM, "symbol", sort_sym), 1659 DIM(SORT_PARENT, "parent", sort_parent), 1660 DIM(SORT_CPU, "cpu", sort_cpu), 1661 DIM(SORT_SOCKET, "socket", sort_socket), 1662 DIM(SORT_SRCLINE, "srcline", sort_srcline), 1663 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 1664 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 1665 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 1666 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 1667 DIM(SORT_TRACE, "trace", sort_trace), 1668 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 1669 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size), 1670 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 1671 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null), 1672 DIM(SORT_TIME, "time", sort_time), 1673 }; 1674 1675 #undef DIM 1676 1677 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 1678 1679 static struct sort_dimension bstack_sort_dimensions[] = { 1680 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 1681 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 1682 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 1683 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 1684 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 1685 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 1686 DIM(SORT_ABORT, "abort", sort_abort), 1687 DIM(SORT_CYCLES, "cycles", sort_cycles), 1688 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 1689 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 1690 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc), 1691 }; 1692 1693 #undef DIM 1694 1695 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 1696 1697 static struct sort_dimension memory_sort_dimensions[] = { 1698 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 1699 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 1700 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 1701 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 1702 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 1703 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 1704 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 1705 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 1706 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr), 1707 }; 1708 1709 #undef DIM 1710 1711 struct hpp_dimension { 1712 const char *name; 1713 struct perf_hpp_fmt *fmt; 1714 int taken; 1715 }; 1716 1717 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 1718 1719 static struct hpp_dimension hpp_sort_dimensions[] = { 1720 DIM(PERF_HPP__OVERHEAD, "overhead"), 1721 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 1722 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 1723 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 1724 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 1725 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 1726 DIM(PERF_HPP__SAMPLES, "sample"), 1727 DIM(PERF_HPP__PERIOD, "period"), 1728 }; 1729 1730 #undef DIM 1731 1732 struct hpp_sort_entry { 1733 struct perf_hpp_fmt hpp; 1734 struct sort_entry *se; 1735 }; 1736 1737 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 1738 { 1739 struct hpp_sort_entry *hse; 1740 1741 if (!perf_hpp__is_sort_entry(fmt)) 1742 return; 1743 1744 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1745 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 1746 } 1747 1748 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1749 struct hists *hists, int line __maybe_unused, 1750 int *span __maybe_unused) 1751 { 1752 struct hpp_sort_entry *hse; 1753 size_t len = fmt->user_len; 1754 1755 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1756 1757 if (!len) 1758 len = hists__col_len(hists, hse->se->se_width_idx); 1759 1760 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 1761 } 1762 1763 static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 1764 struct perf_hpp *hpp __maybe_unused, 1765 struct hists *hists) 1766 { 1767 struct hpp_sort_entry *hse; 1768 size_t len = fmt->user_len; 1769 1770 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1771 1772 if (!len) 1773 len = hists__col_len(hists, hse->se->se_width_idx); 1774 1775 return len; 1776 } 1777 1778 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1779 struct hist_entry *he) 1780 { 1781 struct hpp_sort_entry *hse; 1782 size_t len = fmt->user_len; 1783 1784 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1785 1786 if (!len) 1787 len = hists__col_len(he->hists, hse->se->se_width_idx); 1788 1789 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 1790 } 1791 1792 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 1793 struct hist_entry *a, struct hist_entry *b) 1794 { 1795 struct hpp_sort_entry *hse; 1796 1797 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1798 return hse->se->se_cmp(a, b); 1799 } 1800 1801 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 1802 struct hist_entry *a, struct hist_entry *b) 1803 { 1804 struct hpp_sort_entry *hse; 1805 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 1806 1807 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1808 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 1809 return collapse_fn(a, b); 1810 } 1811 1812 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 1813 struct hist_entry *a, struct hist_entry *b) 1814 { 1815 struct hpp_sort_entry *hse; 1816 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 1817 1818 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1819 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 1820 return sort_fn(a, b); 1821 } 1822 1823 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 1824 { 1825 return format->header == __sort__hpp_header; 1826 } 1827 1828 #define MK_SORT_ENTRY_CHK(key) \ 1829 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 1830 { \ 1831 struct hpp_sort_entry *hse; \ 1832 \ 1833 if (!perf_hpp__is_sort_entry(fmt)) \ 1834 return false; \ 1835 \ 1836 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 1837 return hse->se == &sort_ ## key ; \ 1838 } 1839 1840 MK_SORT_ENTRY_CHK(trace) 1841 MK_SORT_ENTRY_CHK(srcline) 1842 MK_SORT_ENTRY_CHK(srcfile) 1843 MK_SORT_ENTRY_CHK(thread) 1844 MK_SORT_ENTRY_CHK(comm) 1845 MK_SORT_ENTRY_CHK(dso) 1846 MK_SORT_ENTRY_CHK(sym) 1847 1848 1849 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 1850 { 1851 struct hpp_sort_entry *hse_a; 1852 struct hpp_sort_entry *hse_b; 1853 1854 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 1855 return false; 1856 1857 hse_a = container_of(a, struct hpp_sort_entry, hpp); 1858 hse_b = container_of(b, struct hpp_sort_entry, hpp); 1859 1860 return hse_a->se == hse_b->se; 1861 } 1862 1863 static void hse_free(struct perf_hpp_fmt *fmt) 1864 { 1865 struct hpp_sort_entry *hse; 1866 1867 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1868 free(hse); 1869 } 1870 1871 static struct hpp_sort_entry * 1872 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 1873 { 1874 struct hpp_sort_entry *hse; 1875 1876 hse = malloc(sizeof(*hse)); 1877 if (hse == NULL) { 1878 pr_err("Memory allocation failed\n"); 1879 return NULL; 1880 } 1881 1882 hse->se = sd->entry; 1883 hse->hpp.name = sd->entry->se_header; 1884 hse->hpp.header = __sort__hpp_header; 1885 hse->hpp.width = __sort__hpp_width; 1886 hse->hpp.entry = __sort__hpp_entry; 1887 hse->hpp.color = NULL; 1888 1889 hse->hpp.cmp = __sort__hpp_cmp; 1890 hse->hpp.collapse = __sort__hpp_collapse; 1891 hse->hpp.sort = __sort__hpp_sort; 1892 hse->hpp.equal = __sort__hpp_equal; 1893 hse->hpp.free = hse_free; 1894 1895 INIT_LIST_HEAD(&hse->hpp.list); 1896 INIT_LIST_HEAD(&hse->hpp.sort_list); 1897 hse->hpp.elide = false; 1898 hse->hpp.len = 0; 1899 hse->hpp.user_len = 0; 1900 hse->hpp.level = level; 1901 1902 return hse; 1903 } 1904 1905 static void hpp_free(struct perf_hpp_fmt *fmt) 1906 { 1907 free(fmt); 1908 } 1909 1910 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 1911 int level) 1912 { 1913 struct perf_hpp_fmt *fmt; 1914 1915 fmt = memdup(hd->fmt, sizeof(*fmt)); 1916 if (fmt) { 1917 INIT_LIST_HEAD(&fmt->list); 1918 INIT_LIST_HEAD(&fmt->sort_list); 1919 fmt->free = hpp_free; 1920 fmt->level = level; 1921 } 1922 1923 return fmt; 1924 } 1925 1926 int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 1927 { 1928 struct perf_hpp_fmt *fmt; 1929 struct hpp_sort_entry *hse; 1930 int ret = -1; 1931 int r; 1932 1933 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1934 if (!perf_hpp__is_sort_entry(fmt)) 1935 continue; 1936 1937 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1938 if (hse->se->se_filter == NULL) 1939 continue; 1940 1941 /* 1942 * hist entry is filtered if any of sort key in the hpp list 1943 * is applied. But it should skip non-matched filter types. 1944 */ 1945 r = hse->se->se_filter(he, type, arg); 1946 if (r >= 0) { 1947 if (ret < 0) 1948 ret = 0; 1949 ret |= r; 1950 } 1951 } 1952 1953 return ret; 1954 } 1955 1956 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 1957 struct perf_hpp_list *list, 1958 int level) 1959 { 1960 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 1961 1962 if (hse == NULL) 1963 return -1; 1964 1965 perf_hpp_list__register_sort_field(list, &hse->hpp); 1966 return 0; 1967 } 1968 1969 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 1970 struct perf_hpp_list *list) 1971 { 1972 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 1973 1974 if (hse == NULL) 1975 return -1; 1976 1977 perf_hpp_list__column_register(list, &hse->hpp); 1978 return 0; 1979 } 1980 1981 struct hpp_dynamic_entry { 1982 struct perf_hpp_fmt hpp; 1983 struct evsel *evsel; 1984 struct tep_format_field *field; 1985 unsigned dynamic_len; 1986 bool raw_trace; 1987 }; 1988 1989 static int hde_width(struct hpp_dynamic_entry *hde) 1990 { 1991 if (!hde->hpp.len) { 1992 int len = hde->dynamic_len; 1993 int namelen = strlen(hde->field->name); 1994 int fieldlen = hde->field->size; 1995 1996 if (namelen > len) 1997 len = namelen; 1998 1999 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) { 2000 /* length for print hex numbers */ 2001 fieldlen = hde->field->size * 2 + 2; 2002 } 2003 if (fieldlen > len) 2004 len = fieldlen; 2005 2006 hde->hpp.len = len; 2007 } 2008 return hde->hpp.len; 2009 } 2010 2011 static void update_dynamic_len(struct hpp_dynamic_entry *hde, 2012 struct hist_entry *he) 2013 { 2014 char *str, *pos; 2015 struct tep_format_field *field = hde->field; 2016 size_t namelen; 2017 bool last = false; 2018 2019 if (hde->raw_trace) 2020 return; 2021 2022 /* parse pretty print result and update max length */ 2023 if (!he->trace_output) 2024 he->trace_output = get_trace_output(he); 2025 2026 namelen = strlen(field->name); 2027 str = he->trace_output; 2028 2029 while (str) { 2030 pos = strchr(str, ' '); 2031 if (pos == NULL) { 2032 last = true; 2033 pos = str + strlen(str); 2034 } 2035 2036 if (!strncmp(str, field->name, namelen)) { 2037 size_t len; 2038 2039 str += namelen + 1; 2040 len = pos - str; 2041 2042 if (len > hde->dynamic_len) 2043 hde->dynamic_len = len; 2044 break; 2045 } 2046 2047 if (last) 2048 str = NULL; 2049 else 2050 str = pos + 1; 2051 } 2052 } 2053 2054 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2055 struct hists *hists __maybe_unused, 2056 int line __maybe_unused, 2057 int *span __maybe_unused) 2058 { 2059 struct hpp_dynamic_entry *hde; 2060 size_t len = fmt->user_len; 2061 2062 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2063 2064 if (!len) 2065 len = hde_width(hde); 2066 2067 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 2068 } 2069 2070 static int __sort__hde_width(struct perf_hpp_fmt *fmt, 2071 struct perf_hpp *hpp __maybe_unused, 2072 struct hists *hists __maybe_unused) 2073 { 2074 struct hpp_dynamic_entry *hde; 2075 size_t len = fmt->user_len; 2076 2077 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2078 2079 if (!len) 2080 len = hde_width(hde); 2081 2082 return len; 2083 } 2084 2085 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 2086 { 2087 struct hpp_dynamic_entry *hde; 2088 2089 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2090 2091 return hists_to_evsel(hists) == hde->evsel; 2092 } 2093 2094 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2095 struct hist_entry *he) 2096 { 2097 struct hpp_dynamic_entry *hde; 2098 size_t len = fmt->user_len; 2099 char *str, *pos; 2100 struct tep_format_field *field; 2101 size_t namelen; 2102 bool last = false; 2103 int ret; 2104 2105 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2106 2107 if (!len) 2108 len = hde_width(hde); 2109 2110 if (hde->raw_trace) 2111 goto raw_field; 2112 2113 if (!he->trace_output) 2114 he->trace_output = get_trace_output(he); 2115 2116 field = hde->field; 2117 namelen = strlen(field->name); 2118 str = he->trace_output; 2119 2120 while (str) { 2121 pos = strchr(str, ' '); 2122 if (pos == NULL) { 2123 last = true; 2124 pos = str + strlen(str); 2125 } 2126 2127 if (!strncmp(str, field->name, namelen)) { 2128 str += namelen + 1; 2129 str = strndup(str, pos - str); 2130 2131 if (str == NULL) 2132 return scnprintf(hpp->buf, hpp->size, 2133 "%*.*s", len, len, "ERROR"); 2134 break; 2135 } 2136 2137 if (last) 2138 str = NULL; 2139 else 2140 str = pos + 1; 2141 } 2142 2143 if (str == NULL) { 2144 struct trace_seq seq; 2145 raw_field: 2146 trace_seq_init(&seq); 2147 tep_print_field(&seq, he->raw_data, hde->field); 2148 str = seq.buffer; 2149 } 2150 2151 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 2152 free(str); 2153 return ret; 2154 } 2155 2156 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 2157 struct hist_entry *a, struct hist_entry *b) 2158 { 2159 struct hpp_dynamic_entry *hde; 2160 struct tep_format_field *field; 2161 unsigned offset, size; 2162 2163 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2164 2165 if (b == NULL) { 2166 update_dynamic_len(hde, a); 2167 return 0; 2168 } 2169 2170 field = hde->field; 2171 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2172 unsigned long long dyn; 2173 2174 tep_read_number_field(field, a->raw_data, &dyn); 2175 offset = dyn & 0xffff; 2176 size = (dyn >> 16) & 0xffff; 2177 2178 /* record max width for output */ 2179 if (size > hde->dynamic_len) 2180 hde->dynamic_len = size; 2181 } else { 2182 offset = field->offset; 2183 size = field->size; 2184 } 2185 2186 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 2187 } 2188 2189 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 2190 { 2191 return fmt->cmp == __sort__hde_cmp; 2192 } 2193 2194 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2195 { 2196 struct hpp_dynamic_entry *hde_a; 2197 struct hpp_dynamic_entry *hde_b; 2198 2199 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 2200 return false; 2201 2202 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 2203 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 2204 2205 return hde_a->field == hde_b->field; 2206 } 2207 2208 static void hde_free(struct perf_hpp_fmt *fmt) 2209 { 2210 struct hpp_dynamic_entry *hde; 2211 2212 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2213 free(hde); 2214 } 2215 2216 static struct hpp_dynamic_entry * 2217 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field, 2218 int level) 2219 { 2220 struct hpp_dynamic_entry *hde; 2221 2222 hde = malloc(sizeof(*hde)); 2223 if (hde == NULL) { 2224 pr_debug("Memory allocation failed\n"); 2225 return NULL; 2226 } 2227 2228 hde->evsel = evsel; 2229 hde->field = field; 2230 hde->dynamic_len = 0; 2231 2232 hde->hpp.name = field->name; 2233 hde->hpp.header = __sort__hde_header; 2234 hde->hpp.width = __sort__hde_width; 2235 hde->hpp.entry = __sort__hde_entry; 2236 hde->hpp.color = NULL; 2237 2238 hde->hpp.cmp = __sort__hde_cmp; 2239 hde->hpp.collapse = __sort__hde_cmp; 2240 hde->hpp.sort = __sort__hde_cmp; 2241 hde->hpp.equal = __sort__hde_equal; 2242 hde->hpp.free = hde_free; 2243 2244 INIT_LIST_HEAD(&hde->hpp.list); 2245 INIT_LIST_HEAD(&hde->hpp.sort_list); 2246 hde->hpp.elide = false; 2247 hde->hpp.len = 0; 2248 hde->hpp.user_len = 0; 2249 hde->hpp.level = level; 2250 2251 return hde; 2252 } 2253 2254 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 2255 { 2256 struct perf_hpp_fmt *new_fmt = NULL; 2257 2258 if (perf_hpp__is_sort_entry(fmt)) { 2259 struct hpp_sort_entry *hse, *new_hse; 2260 2261 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2262 new_hse = memdup(hse, sizeof(*hse)); 2263 if (new_hse) 2264 new_fmt = &new_hse->hpp; 2265 } else if (perf_hpp__is_dynamic_entry(fmt)) { 2266 struct hpp_dynamic_entry *hde, *new_hde; 2267 2268 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2269 new_hde = memdup(hde, sizeof(*hde)); 2270 if (new_hde) 2271 new_fmt = &new_hde->hpp; 2272 } else { 2273 new_fmt = memdup(fmt, sizeof(*fmt)); 2274 } 2275 2276 INIT_LIST_HEAD(&new_fmt->list); 2277 INIT_LIST_HEAD(&new_fmt->sort_list); 2278 2279 return new_fmt; 2280 } 2281 2282 static int parse_field_name(char *str, char **event, char **field, char **opt) 2283 { 2284 char *event_name, *field_name, *opt_name; 2285 2286 event_name = str; 2287 field_name = strchr(str, '.'); 2288 2289 if (field_name) { 2290 *field_name++ = '\0'; 2291 } else { 2292 event_name = NULL; 2293 field_name = str; 2294 } 2295 2296 opt_name = strchr(field_name, '/'); 2297 if (opt_name) 2298 *opt_name++ = '\0'; 2299 2300 *event = event_name; 2301 *field = field_name; 2302 *opt = opt_name; 2303 2304 return 0; 2305 } 2306 2307 /* find match evsel using a given event name. The event name can be: 2308 * 1. '%' + event index (e.g. '%1' for first event) 2309 * 2. full event name (e.g. sched:sched_switch) 2310 * 3. partial event name (should not contain ':') 2311 */ 2312 static struct evsel *find_evsel(struct evlist *evlist, char *event_name) 2313 { 2314 struct evsel *evsel = NULL; 2315 struct evsel *pos; 2316 bool full_name; 2317 2318 /* case 1 */ 2319 if (event_name[0] == '%') { 2320 int nr = strtol(event_name+1, NULL, 0); 2321 2322 if (nr > evlist->core.nr_entries) 2323 return NULL; 2324 2325 evsel = perf_evlist__first(evlist); 2326 while (--nr > 0) 2327 evsel = perf_evsel__next(evsel); 2328 2329 return evsel; 2330 } 2331 2332 full_name = !!strchr(event_name, ':'); 2333 evlist__for_each_entry(evlist, pos) { 2334 /* case 2 */ 2335 if (full_name && !strcmp(pos->name, event_name)) 2336 return pos; 2337 /* case 3 */ 2338 if (!full_name && strstr(pos->name, event_name)) { 2339 if (evsel) { 2340 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 2341 event_name, evsel->name, pos->name); 2342 return NULL; 2343 } 2344 evsel = pos; 2345 } 2346 } 2347 2348 return evsel; 2349 } 2350 2351 static int __dynamic_dimension__add(struct evsel *evsel, 2352 struct tep_format_field *field, 2353 bool raw_trace, int level) 2354 { 2355 struct hpp_dynamic_entry *hde; 2356 2357 hde = __alloc_dynamic_entry(evsel, field, level); 2358 if (hde == NULL) 2359 return -ENOMEM; 2360 2361 hde->raw_trace = raw_trace; 2362 2363 perf_hpp__register_sort_field(&hde->hpp); 2364 return 0; 2365 } 2366 2367 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level) 2368 { 2369 int ret; 2370 struct tep_format_field *field; 2371 2372 field = evsel->tp_format->format.fields; 2373 while (field) { 2374 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2375 if (ret < 0) 2376 return ret; 2377 2378 field = field->next; 2379 } 2380 return 0; 2381 } 2382 2383 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace, 2384 int level) 2385 { 2386 int ret; 2387 struct evsel *evsel; 2388 2389 evlist__for_each_entry(evlist, evsel) { 2390 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 2391 continue; 2392 2393 ret = add_evsel_fields(evsel, raw_trace, level); 2394 if (ret < 0) 2395 return ret; 2396 } 2397 return 0; 2398 } 2399 2400 static int add_all_matching_fields(struct evlist *evlist, 2401 char *field_name, bool raw_trace, int level) 2402 { 2403 int ret = -ESRCH; 2404 struct evsel *evsel; 2405 struct tep_format_field *field; 2406 2407 evlist__for_each_entry(evlist, evsel) { 2408 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 2409 continue; 2410 2411 field = tep_find_any_field(evsel->tp_format, field_name); 2412 if (field == NULL) 2413 continue; 2414 2415 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2416 if (ret < 0) 2417 break; 2418 } 2419 return ret; 2420 } 2421 2422 static int add_dynamic_entry(struct evlist *evlist, const char *tok, 2423 int level) 2424 { 2425 char *str, *event_name, *field_name, *opt_name; 2426 struct evsel *evsel; 2427 struct tep_format_field *field; 2428 bool raw_trace = symbol_conf.raw_trace; 2429 int ret = 0; 2430 2431 if (evlist == NULL) 2432 return -ENOENT; 2433 2434 str = strdup(tok); 2435 if (str == NULL) 2436 return -ENOMEM; 2437 2438 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 2439 ret = -EINVAL; 2440 goto out; 2441 } 2442 2443 if (opt_name) { 2444 if (strcmp(opt_name, "raw")) { 2445 pr_debug("unsupported field option %s\n", opt_name); 2446 ret = -EINVAL; 2447 goto out; 2448 } 2449 raw_trace = true; 2450 } 2451 2452 if (!strcmp(field_name, "trace_fields")) { 2453 ret = add_all_dynamic_fields(evlist, raw_trace, level); 2454 goto out; 2455 } 2456 2457 if (event_name == NULL) { 2458 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 2459 goto out; 2460 } 2461 2462 evsel = find_evsel(evlist, event_name); 2463 if (evsel == NULL) { 2464 pr_debug("Cannot find event: %s\n", event_name); 2465 ret = -ENOENT; 2466 goto out; 2467 } 2468 2469 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2470 pr_debug("%s is not a tracepoint event\n", event_name); 2471 ret = -EINVAL; 2472 goto out; 2473 } 2474 2475 if (!strcmp(field_name, "*")) { 2476 ret = add_evsel_fields(evsel, raw_trace, level); 2477 } else { 2478 field = tep_find_any_field(evsel->tp_format, field_name); 2479 if (field == NULL) { 2480 pr_debug("Cannot find event field for %s.%s\n", 2481 event_name, field_name); 2482 return -ENOENT; 2483 } 2484 2485 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2486 } 2487 2488 out: 2489 free(str); 2490 return ret; 2491 } 2492 2493 static int __sort_dimension__add(struct sort_dimension *sd, 2494 struct perf_hpp_list *list, 2495 int level) 2496 { 2497 if (sd->taken) 2498 return 0; 2499 2500 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 2501 return -1; 2502 2503 if (sd->entry->se_collapse) 2504 list->need_collapse = 1; 2505 2506 sd->taken = 1; 2507 2508 return 0; 2509 } 2510 2511 static int __hpp_dimension__add(struct hpp_dimension *hd, 2512 struct perf_hpp_list *list, 2513 int level) 2514 { 2515 struct perf_hpp_fmt *fmt; 2516 2517 if (hd->taken) 2518 return 0; 2519 2520 fmt = __hpp_dimension__alloc_hpp(hd, level); 2521 if (!fmt) 2522 return -1; 2523 2524 hd->taken = 1; 2525 perf_hpp_list__register_sort_field(list, fmt); 2526 return 0; 2527 } 2528 2529 static int __sort_dimension__add_output(struct perf_hpp_list *list, 2530 struct sort_dimension *sd) 2531 { 2532 if (sd->taken) 2533 return 0; 2534 2535 if (__sort_dimension__add_hpp_output(sd, list) < 0) 2536 return -1; 2537 2538 sd->taken = 1; 2539 return 0; 2540 } 2541 2542 static int __hpp_dimension__add_output(struct perf_hpp_list *list, 2543 struct hpp_dimension *hd) 2544 { 2545 struct perf_hpp_fmt *fmt; 2546 2547 if (hd->taken) 2548 return 0; 2549 2550 fmt = __hpp_dimension__alloc_hpp(hd, 0); 2551 if (!fmt) 2552 return -1; 2553 2554 hd->taken = 1; 2555 perf_hpp_list__column_register(list, fmt); 2556 return 0; 2557 } 2558 2559 int hpp_dimension__add_output(unsigned col) 2560 { 2561 BUG_ON(col >= PERF_HPP__MAX_INDEX); 2562 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 2563 } 2564 2565 int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 2566 struct evlist *evlist, 2567 int level) 2568 { 2569 unsigned int i; 2570 2571 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2572 struct sort_dimension *sd = &common_sort_dimensions[i]; 2573 2574 if (strncasecmp(tok, sd->name, strlen(tok))) 2575 continue; 2576 2577 if (sd->entry == &sort_parent) { 2578 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 2579 if (ret) { 2580 char err[BUFSIZ]; 2581 2582 regerror(ret, &parent_regex, err, sizeof(err)); 2583 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 2584 return -EINVAL; 2585 } 2586 list->parent = 1; 2587 } else if (sd->entry == &sort_sym) { 2588 list->sym = 1; 2589 /* 2590 * perf diff displays the performance difference amongst 2591 * two or more perf.data files. Those files could come 2592 * from different binaries. So we should not compare 2593 * their ips, but the name of symbol. 2594 */ 2595 if (sort__mode == SORT_MODE__DIFF) 2596 sd->entry->se_collapse = sort__sym_sort; 2597 2598 } else if (sd->entry == &sort_dso) { 2599 list->dso = 1; 2600 } else if (sd->entry == &sort_socket) { 2601 list->socket = 1; 2602 } else if (sd->entry == &sort_thread) { 2603 list->thread = 1; 2604 } else if (sd->entry == &sort_comm) { 2605 list->comm = 1; 2606 } 2607 2608 return __sort_dimension__add(sd, list, level); 2609 } 2610 2611 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2612 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2613 2614 if (strncasecmp(tok, hd->name, strlen(tok))) 2615 continue; 2616 2617 return __hpp_dimension__add(hd, list, level); 2618 } 2619 2620 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2621 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2622 2623 if (strncasecmp(tok, sd->name, strlen(tok))) 2624 continue; 2625 2626 if (sort__mode != SORT_MODE__BRANCH) 2627 return -EINVAL; 2628 2629 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 2630 list->sym = 1; 2631 2632 __sort_dimension__add(sd, list, level); 2633 return 0; 2634 } 2635 2636 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2637 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2638 2639 if (strncasecmp(tok, sd->name, strlen(tok))) 2640 continue; 2641 2642 if (sort__mode != SORT_MODE__MEMORY) 2643 return -EINVAL; 2644 2645 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0) 2646 return -EINVAL; 2647 2648 if (sd->entry == &sort_mem_daddr_sym) 2649 list->sym = 1; 2650 2651 __sort_dimension__add(sd, list, level); 2652 return 0; 2653 } 2654 2655 if (!add_dynamic_entry(evlist, tok, level)) 2656 return 0; 2657 2658 return -ESRCH; 2659 } 2660 2661 static int setup_sort_list(struct perf_hpp_list *list, char *str, 2662 struct evlist *evlist) 2663 { 2664 char *tmp, *tok; 2665 int ret = 0; 2666 int level = 0; 2667 int next_level = 1; 2668 bool in_group = false; 2669 2670 do { 2671 tok = str; 2672 tmp = strpbrk(str, "{}, "); 2673 if (tmp) { 2674 if (in_group) 2675 next_level = level; 2676 else 2677 next_level = level + 1; 2678 2679 if (*tmp == '{') 2680 in_group = true; 2681 else if (*tmp == '}') 2682 in_group = false; 2683 2684 *tmp = '\0'; 2685 str = tmp + 1; 2686 } 2687 2688 if (*tok) { 2689 ret = sort_dimension__add(list, tok, evlist, level); 2690 if (ret == -EINVAL) { 2691 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok))) 2692 pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 2693 else 2694 pr_err("Invalid --sort key: `%s'", tok); 2695 break; 2696 } else if (ret == -ESRCH) { 2697 pr_err("Unknown --sort key: `%s'", tok); 2698 break; 2699 } 2700 } 2701 2702 level = next_level; 2703 } while (tmp); 2704 2705 return ret; 2706 } 2707 2708 static const char *get_default_sort_order(struct evlist *evlist) 2709 { 2710 const char *default_sort_orders[] = { 2711 default_sort_order, 2712 default_branch_sort_order, 2713 default_mem_sort_order, 2714 default_top_sort_order, 2715 default_diff_sort_order, 2716 default_tracepoint_sort_order, 2717 }; 2718 bool use_trace = true; 2719 struct evsel *evsel; 2720 2721 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 2722 2723 if (evlist == NULL || perf_evlist__empty(evlist)) 2724 goto out_no_evlist; 2725 2726 evlist__for_each_entry(evlist, evsel) { 2727 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2728 use_trace = false; 2729 break; 2730 } 2731 } 2732 2733 if (use_trace) { 2734 sort__mode = SORT_MODE__TRACEPOINT; 2735 if (symbol_conf.raw_trace) 2736 return "trace_fields"; 2737 } 2738 out_no_evlist: 2739 return default_sort_orders[sort__mode]; 2740 } 2741 2742 static int setup_sort_order(struct evlist *evlist) 2743 { 2744 char *new_sort_order; 2745 2746 /* 2747 * Append '+'-prefixed sort order to the default sort 2748 * order string. 2749 */ 2750 if (!sort_order || is_strict_order(sort_order)) 2751 return 0; 2752 2753 if (sort_order[1] == '\0') { 2754 pr_err("Invalid --sort key: `+'"); 2755 return -EINVAL; 2756 } 2757 2758 /* 2759 * We allocate new sort_order string, but we never free it, 2760 * because it's checked over the rest of the code. 2761 */ 2762 if (asprintf(&new_sort_order, "%s,%s", 2763 get_default_sort_order(evlist), sort_order + 1) < 0) { 2764 pr_err("Not enough memory to set up --sort"); 2765 return -ENOMEM; 2766 } 2767 2768 sort_order = new_sort_order; 2769 return 0; 2770 } 2771 2772 /* 2773 * Adds 'pre,' prefix into 'str' is 'pre' is 2774 * not already part of 'str'. 2775 */ 2776 static char *prefix_if_not_in(const char *pre, char *str) 2777 { 2778 char *n; 2779 2780 if (!str || strstr(str, pre)) 2781 return str; 2782 2783 if (asprintf(&n, "%s,%s", pre, str) < 0) 2784 return NULL; 2785 2786 free(str); 2787 return n; 2788 } 2789 2790 static char *setup_overhead(char *keys) 2791 { 2792 if (sort__mode == SORT_MODE__DIFF) 2793 return keys; 2794 2795 keys = prefix_if_not_in("overhead", keys); 2796 2797 if (symbol_conf.cumulate_callchain) 2798 keys = prefix_if_not_in("overhead_children", keys); 2799 2800 return keys; 2801 } 2802 2803 static int __setup_sorting(struct evlist *evlist) 2804 { 2805 char *str; 2806 const char *sort_keys; 2807 int ret = 0; 2808 2809 ret = setup_sort_order(evlist); 2810 if (ret) 2811 return ret; 2812 2813 sort_keys = sort_order; 2814 if (sort_keys == NULL) { 2815 if (is_strict_order(field_order)) { 2816 /* 2817 * If user specified field order but no sort order, 2818 * we'll honor it and not add default sort orders. 2819 */ 2820 return 0; 2821 } 2822 2823 sort_keys = get_default_sort_order(evlist); 2824 } 2825 2826 str = strdup(sort_keys); 2827 if (str == NULL) { 2828 pr_err("Not enough memory to setup sort keys"); 2829 return -ENOMEM; 2830 } 2831 2832 /* 2833 * Prepend overhead fields for backward compatibility. 2834 */ 2835 if (!is_strict_order(field_order)) { 2836 str = setup_overhead(str); 2837 if (str == NULL) { 2838 pr_err("Not enough memory to setup overhead keys"); 2839 return -ENOMEM; 2840 } 2841 } 2842 2843 ret = setup_sort_list(&perf_hpp_list, str, evlist); 2844 2845 free(str); 2846 return ret; 2847 } 2848 2849 void perf_hpp__set_elide(int idx, bool elide) 2850 { 2851 struct perf_hpp_fmt *fmt; 2852 struct hpp_sort_entry *hse; 2853 2854 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2855 if (!perf_hpp__is_sort_entry(fmt)) 2856 continue; 2857 2858 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2859 if (hse->se->se_width_idx == idx) { 2860 fmt->elide = elide; 2861 break; 2862 } 2863 } 2864 } 2865 2866 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 2867 { 2868 if (list && strlist__nr_entries(list) == 1) { 2869 if (fp != NULL) 2870 fprintf(fp, "# %s: %s\n", list_name, 2871 strlist__entry(list, 0)->s); 2872 return true; 2873 } 2874 return false; 2875 } 2876 2877 static bool get_elide(int idx, FILE *output) 2878 { 2879 switch (idx) { 2880 case HISTC_SYMBOL: 2881 return __get_elide(symbol_conf.sym_list, "symbol", output); 2882 case HISTC_DSO: 2883 return __get_elide(symbol_conf.dso_list, "dso", output); 2884 case HISTC_COMM: 2885 return __get_elide(symbol_conf.comm_list, "comm", output); 2886 default: 2887 break; 2888 } 2889 2890 if (sort__mode != SORT_MODE__BRANCH) 2891 return false; 2892 2893 switch (idx) { 2894 case HISTC_SYMBOL_FROM: 2895 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 2896 case HISTC_SYMBOL_TO: 2897 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 2898 case HISTC_DSO_FROM: 2899 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 2900 case HISTC_DSO_TO: 2901 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 2902 default: 2903 break; 2904 } 2905 2906 return false; 2907 } 2908 2909 void sort__setup_elide(FILE *output) 2910 { 2911 struct perf_hpp_fmt *fmt; 2912 struct hpp_sort_entry *hse; 2913 2914 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2915 if (!perf_hpp__is_sort_entry(fmt)) 2916 continue; 2917 2918 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2919 fmt->elide = get_elide(hse->se->se_width_idx, output); 2920 } 2921 2922 /* 2923 * It makes no sense to elide all of sort entries. 2924 * Just revert them to show up again. 2925 */ 2926 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2927 if (!perf_hpp__is_sort_entry(fmt)) 2928 continue; 2929 2930 if (!fmt->elide) 2931 return; 2932 } 2933 2934 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2935 if (!perf_hpp__is_sort_entry(fmt)) 2936 continue; 2937 2938 fmt->elide = false; 2939 } 2940 } 2941 2942 int output_field_add(struct perf_hpp_list *list, char *tok) 2943 { 2944 unsigned int i; 2945 2946 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2947 struct sort_dimension *sd = &common_sort_dimensions[i]; 2948 2949 if (strncasecmp(tok, sd->name, strlen(tok))) 2950 continue; 2951 2952 return __sort_dimension__add_output(list, sd); 2953 } 2954 2955 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2956 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2957 2958 if (strncasecmp(tok, hd->name, strlen(tok))) 2959 continue; 2960 2961 return __hpp_dimension__add_output(list, hd); 2962 } 2963 2964 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2965 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2966 2967 if (strncasecmp(tok, sd->name, strlen(tok))) 2968 continue; 2969 2970 return __sort_dimension__add_output(list, sd); 2971 } 2972 2973 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2974 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2975 2976 if (strncasecmp(tok, sd->name, strlen(tok))) 2977 continue; 2978 2979 return __sort_dimension__add_output(list, sd); 2980 } 2981 2982 return -ESRCH; 2983 } 2984 2985 static int setup_output_list(struct perf_hpp_list *list, char *str) 2986 { 2987 char *tmp, *tok; 2988 int ret = 0; 2989 2990 for (tok = strtok_r(str, ", ", &tmp); 2991 tok; tok = strtok_r(NULL, ", ", &tmp)) { 2992 ret = output_field_add(list, tok); 2993 if (ret == -EINVAL) { 2994 ui__error("Invalid --fields key: `%s'", tok); 2995 break; 2996 } else if (ret == -ESRCH) { 2997 ui__error("Unknown --fields key: `%s'", tok); 2998 break; 2999 } 3000 } 3001 3002 return ret; 3003 } 3004 3005 void reset_dimensions(void) 3006 { 3007 unsigned int i; 3008 3009 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 3010 common_sort_dimensions[i].taken = 0; 3011 3012 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 3013 hpp_sort_dimensions[i].taken = 0; 3014 3015 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 3016 bstack_sort_dimensions[i].taken = 0; 3017 3018 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 3019 memory_sort_dimensions[i].taken = 0; 3020 } 3021 3022 bool is_strict_order(const char *order) 3023 { 3024 return order && (*order != '+'); 3025 } 3026 3027 static int __setup_output_field(void) 3028 { 3029 char *str, *strp; 3030 int ret = -EINVAL; 3031 3032 if (field_order == NULL) 3033 return 0; 3034 3035 strp = str = strdup(field_order); 3036 if (str == NULL) { 3037 pr_err("Not enough memory to setup output fields"); 3038 return -ENOMEM; 3039 } 3040 3041 if (!is_strict_order(field_order)) 3042 strp++; 3043 3044 if (!strlen(strp)) { 3045 pr_err("Invalid --fields key: `+'"); 3046 goto out; 3047 } 3048 3049 ret = setup_output_list(&perf_hpp_list, strp); 3050 3051 out: 3052 free(str); 3053 return ret; 3054 } 3055 3056 int setup_sorting(struct evlist *evlist) 3057 { 3058 int err; 3059 3060 err = __setup_sorting(evlist); 3061 if (err < 0) 3062 return err; 3063 3064 if (parent_pattern != default_parent_pattern) { 3065 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 3066 if (err < 0) 3067 return err; 3068 } 3069 3070 reset_dimensions(); 3071 3072 /* 3073 * perf diff doesn't use default hpp output fields. 3074 */ 3075 if (sort__mode != SORT_MODE__DIFF) 3076 perf_hpp__init(); 3077 3078 err = __setup_output_field(); 3079 if (err < 0) 3080 return err; 3081 3082 /* copy sort keys to output fields */ 3083 perf_hpp__setup_output_field(&perf_hpp_list); 3084 /* and then copy output fields to sort keys */ 3085 perf_hpp__append_sort_keys(&perf_hpp_list); 3086 3087 /* setup hists-specific output fields */ 3088 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 3089 return -1; 3090 3091 return 0; 3092 } 3093 3094 void reset_output_field(void) 3095 { 3096 perf_hpp_list.need_collapse = 0; 3097 perf_hpp_list.parent = 0; 3098 perf_hpp_list.sym = 0; 3099 perf_hpp_list.dso = 0; 3100 3101 field_order = NULL; 3102 sort_order = NULL; 3103 3104 reset_dimensions(); 3105 perf_hpp__reset_output_field(&perf_hpp_list); 3106 } 3107 3108 #define INDENT (3*8 + 1) 3109 3110 static void add_key(struct strbuf *sb, const char *str, int *llen) 3111 { 3112 if (*llen >= 75) { 3113 strbuf_addstr(sb, "\n\t\t\t "); 3114 *llen = INDENT; 3115 } 3116 strbuf_addf(sb, " %s", str); 3117 *llen += strlen(str) + 1; 3118 } 3119 3120 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n, 3121 int *llen) 3122 { 3123 int i; 3124 3125 for (i = 0; i < n; i++) 3126 add_key(sb, s[i].name, llen); 3127 } 3128 3129 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n, 3130 int *llen) 3131 { 3132 int i; 3133 3134 for (i = 0; i < n; i++) 3135 add_key(sb, s[i].name, llen); 3136 } 3137 3138 const char *sort_help(const char *prefix) 3139 { 3140 struct strbuf sb; 3141 char *s; 3142 int len = strlen(prefix) + INDENT; 3143 3144 strbuf_init(&sb, 300); 3145 strbuf_addstr(&sb, prefix); 3146 add_hpp_sort_string(&sb, hpp_sort_dimensions, 3147 ARRAY_SIZE(hpp_sort_dimensions), &len); 3148 add_sort_string(&sb, common_sort_dimensions, 3149 ARRAY_SIZE(common_sort_dimensions), &len); 3150 add_sort_string(&sb, bstack_sort_dimensions, 3151 ARRAY_SIZE(bstack_sort_dimensions), &len); 3152 add_sort_string(&sb, memory_sort_dimensions, 3153 ARRAY_SIZE(memory_sort_dimensions), &len); 3154 s = strbuf_detach(&sb, NULL); 3155 strbuf_release(&sb); 3156 return s; 3157 } 3158