1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <regex.h> 5 #include <linux/mman.h> 6 #include <linux/time64.h> 7 #include "debug.h" 8 #include "sort.h" 9 #include "hist.h" 10 #include "cacheline.h" 11 #include "comm.h" 12 #include "map.h" 13 #include "symbol.h" 14 #include "thread.h" 15 #include "evsel.h" 16 #include "evlist.h" 17 #include "srcline.h" 18 #include "strlist.h" 19 #include "strbuf.h" 20 #include <traceevent/event-parse.h> 21 #include "mem-events.h" 22 #include "annotate.h" 23 #include "time-utils.h" 24 #include <linux/kernel.h> 25 #include <linux/string.h> 26 27 regex_t parent_regex; 28 const char default_parent_pattern[] = "^sys_|^do_page_fault"; 29 const char *parent_pattern = default_parent_pattern; 30 const char *default_sort_order = "comm,dso,symbol"; 31 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 32 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked"; 33 const char default_top_sort_order[] = "dso,symbol"; 34 const char default_diff_sort_order[] = "dso,symbol"; 35 const char default_tracepoint_sort_order[] = "trace"; 36 const char *sort_order; 37 const char *field_order; 38 regex_t ignore_callees_regex; 39 int have_ignore_callees = 0; 40 enum sort_mode sort__mode = SORT_MODE__NORMAL; 41 42 /* 43 * Replaces all occurrences of a char used with the: 44 * 45 * -t, --field-separator 46 * 47 * option, that uses a special separator character and don't pad with spaces, 48 * replacing all occurrences of this separator in symbol names (and other 49 * output) with a '.' character, that thus it's the only non valid separator. 50 */ 51 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 52 { 53 int n; 54 va_list ap; 55 56 va_start(ap, fmt); 57 n = vsnprintf(bf, size, fmt, ap); 58 if (symbol_conf.field_sep && n > 0) { 59 char *sep = bf; 60 61 while (1) { 62 sep = strchr(sep, *symbol_conf.field_sep); 63 if (sep == NULL) 64 break; 65 *sep = '.'; 66 } 67 } 68 va_end(ap); 69 70 if (n >= (int)size) 71 return size - 1; 72 return n; 73 } 74 75 static int64_t cmp_null(const void *l, const void *r) 76 { 77 if (!l && !r) 78 return 0; 79 else if (!l) 80 return -1; 81 else 82 return 1; 83 } 84 85 /* --sort pid */ 86 87 static int64_t 88 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 89 { 90 return right->thread->tid - left->thread->tid; 91 } 92 93 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 94 size_t size, unsigned int width) 95 { 96 const char *comm = thread__comm_str(he->thread); 97 98 width = max(7U, width) - 8; 99 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid, 100 width, width, comm ?: ""); 101 } 102 103 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 104 { 105 const struct thread *th = arg; 106 107 if (type != HIST_FILTER__THREAD) 108 return -1; 109 110 return th && he->thread != th; 111 } 112 113 struct sort_entry sort_thread = { 114 .se_header = " Pid:Command", 115 .se_cmp = sort__thread_cmp, 116 .se_snprintf = hist_entry__thread_snprintf, 117 .se_filter = hist_entry__thread_filter, 118 .se_width_idx = HISTC_THREAD, 119 }; 120 121 /* --sort comm */ 122 123 /* 124 * We can't use pointer comparison in functions below, 125 * because it gives different results based on pointer 126 * values, which could break some sorting assumptions. 127 */ 128 static int64_t 129 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 130 { 131 return strcmp(comm__str(right->comm), comm__str(left->comm)); 132 } 133 134 static int64_t 135 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 136 { 137 return strcmp(comm__str(right->comm), comm__str(left->comm)); 138 } 139 140 static int64_t 141 sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 142 { 143 return strcmp(comm__str(right->comm), comm__str(left->comm)); 144 } 145 146 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 147 size_t size, unsigned int width) 148 { 149 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 150 } 151 152 struct sort_entry sort_comm = { 153 .se_header = "Command", 154 .se_cmp = sort__comm_cmp, 155 .se_collapse = sort__comm_collapse, 156 .se_sort = sort__comm_sort, 157 .se_snprintf = hist_entry__comm_snprintf, 158 .se_filter = hist_entry__thread_filter, 159 .se_width_idx = HISTC_COMM, 160 }; 161 162 /* --sort dso */ 163 164 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 165 { 166 struct dso *dso_l = map_l ? map_l->dso : NULL; 167 struct dso *dso_r = map_r ? map_r->dso : NULL; 168 const char *dso_name_l, *dso_name_r; 169 170 if (!dso_l || !dso_r) 171 return cmp_null(dso_r, dso_l); 172 173 if (verbose > 0) { 174 dso_name_l = dso_l->long_name; 175 dso_name_r = dso_r->long_name; 176 } else { 177 dso_name_l = dso_l->short_name; 178 dso_name_r = dso_r->short_name; 179 } 180 181 return strcmp(dso_name_l, dso_name_r); 182 } 183 184 static int64_t 185 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 186 { 187 return _sort__dso_cmp(right->ms.map, left->ms.map); 188 } 189 190 static int _hist_entry__dso_snprintf(struct map *map, char *bf, 191 size_t size, unsigned int width) 192 { 193 if (map && map->dso) { 194 const char *dso_name = verbose > 0 ? map->dso->long_name : 195 map->dso->short_name; 196 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 197 } 198 199 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]"); 200 } 201 202 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 203 size_t size, unsigned int width) 204 { 205 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 206 } 207 208 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 209 { 210 const struct dso *dso = arg; 211 212 if (type != HIST_FILTER__DSO) 213 return -1; 214 215 return dso && (!he->ms.map || he->ms.map->dso != dso); 216 } 217 218 struct sort_entry sort_dso = { 219 .se_header = "Shared Object", 220 .se_cmp = sort__dso_cmp, 221 .se_snprintf = hist_entry__dso_snprintf, 222 .se_filter = hist_entry__dso_filter, 223 .se_width_idx = HISTC_DSO, 224 }; 225 226 /* --sort symbol */ 227 228 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 229 { 230 return (int64_t)(right_ip - left_ip); 231 } 232 233 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 234 { 235 if (!sym_l || !sym_r) 236 return cmp_null(sym_l, sym_r); 237 238 if (sym_l == sym_r) 239 return 0; 240 241 if (sym_l->inlined || sym_r->inlined) { 242 int ret = strcmp(sym_l->name, sym_r->name); 243 244 if (ret) 245 return ret; 246 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start)) 247 return 0; 248 } 249 250 if (sym_l->start != sym_r->start) 251 return (int64_t)(sym_r->start - sym_l->start); 252 253 return (int64_t)(sym_r->end - sym_l->end); 254 } 255 256 static int64_t 257 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 258 { 259 int64_t ret; 260 261 if (!left->ms.sym && !right->ms.sym) 262 return _sort__addr_cmp(left->ip, right->ip); 263 264 /* 265 * comparing symbol address alone is not enough since it's a 266 * relative address within a dso. 267 */ 268 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) { 269 ret = sort__dso_cmp(left, right); 270 if (ret != 0) 271 return ret; 272 } 273 274 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 275 } 276 277 static int64_t 278 sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 279 { 280 if (!left->ms.sym || !right->ms.sym) 281 return cmp_null(left->ms.sym, right->ms.sym); 282 283 return strcmp(right->ms.sym->name, left->ms.sym->name); 284 } 285 286 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, 287 u64 ip, char level, char *bf, size_t size, 288 unsigned int width) 289 { 290 size_t ret = 0; 291 292 if (verbose > 0) { 293 char o = map ? dso__symtab_origin(map->dso) : '!'; 294 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 295 BITS_PER_LONG / 4 + 2, ip, o); 296 } 297 298 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 299 if (sym && map) { 300 if (sym->type == STT_OBJECT) { 301 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 302 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 303 ip - map->unmap_ip(map, sym->start)); 304 } else { 305 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 306 width - ret, 307 sym->name); 308 if (sym->inlined) 309 ret += repsep_snprintf(bf + ret, size - ret, 310 " (inlined)"); 311 } 312 } else { 313 size_t len = BITS_PER_LONG / 4; 314 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 315 len, ip); 316 } 317 318 return ret; 319 } 320 321 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, 322 size_t size, unsigned int width) 323 { 324 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip, 325 he->level, bf, size, width); 326 } 327 328 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 329 { 330 const char *sym = arg; 331 332 if (type != HIST_FILTER__SYMBOL) 333 return -1; 334 335 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 336 } 337 338 struct sort_entry sort_sym = { 339 .se_header = "Symbol", 340 .se_cmp = sort__sym_cmp, 341 .se_sort = sort__sym_sort, 342 .se_snprintf = hist_entry__sym_snprintf, 343 .se_filter = hist_entry__sym_filter, 344 .se_width_idx = HISTC_SYMBOL, 345 }; 346 347 /* --sort srcline */ 348 349 char *hist_entry__srcline(struct hist_entry *he) 350 { 351 return map__srcline(he->ms.map, he->ip, he->ms.sym); 352 } 353 354 static int64_t 355 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 356 { 357 if (!left->srcline) 358 left->srcline = hist_entry__srcline(left); 359 if (!right->srcline) 360 right->srcline = hist_entry__srcline(right); 361 362 return strcmp(right->srcline, left->srcline); 363 } 364 365 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 366 size_t size, unsigned int width) 367 { 368 if (!he->srcline) 369 he->srcline = hist_entry__srcline(he); 370 371 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 372 } 373 374 struct sort_entry sort_srcline = { 375 .se_header = "Source:Line", 376 .se_cmp = sort__srcline_cmp, 377 .se_snprintf = hist_entry__srcline_snprintf, 378 .se_width_idx = HISTC_SRCLINE, 379 }; 380 381 /* --sort srcline_from */ 382 383 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams) 384 { 385 return map__srcline(ams->map, ams->al_addr, ams->sym); 386 } 387 388 static int64_t 389 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 390 { 391 if (!left->branch_info->srcline_from) 392 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from); 393 394 if (!right->branch_info->srcline_from) 395 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from); 396 397 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 398 } 399 400 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 401 size_t size, unsigned int width) 402 { 403 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 404 } 405 406 struct sort_entry sort_srcline_from = { 407 .se_header = "From Source:Line", 408 .se_cmp = sort__srcline_from_cmp, 409 .se_snprintf = hist_entry__srcline_from_snprintf, 410 .se_width_idx = HISTC_SRCLINE_FROM, 411 }; 412 413 /* --sort srcline_to */ 414 415 static int64_t 416 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 417 { 418 if (!left->branch_info->srcline_to) 419 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to); 420 421 if (!right->branch_info->srcline_to) 422 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to); 423 424 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 425 } 426 427 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 428 size_t size, unsigned int width) 429 { 430 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 431 } 432 433 struct sort_entry sort_srcline_to = { 434 .se_header = "To Source:Line", 435 .se_cmp = sort__srcline_to_cmp, 436 .se_snprintf = hist_entry__srcline_to_snprintf, 437 .se_width_idx = HISTC_SRCLINE_TO, 438 }; 439 440 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf, 441 size_t size, unsigned int width) 442 { 443 444 struct symbol *sym = he->ms.sym; 445 struct annotation *notes; 446 double ipc = 0.0, coverage = 0.0; 447 char tmp[64]; 448 449 if (!sym) 450 return repsep_snprintf(bf, size, "%-*s", width, "-"); 451 452 notes = symbol__annotation(sym); 453 454 if (notes->hit_cycles) 455 ipc = notes->hit_insn / ((double)notes->hit_cycles); 456 457 if (notes->total_insn) { 458 coverage = notes->cover_insn * 100.0 / 459 ((double)notes->total_insn); 460 } 461 462 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage); 463 return repsep_snprintf(bf, size, "%-*s", width, tmp); 464 } 465 466 struct sort_entry sort_sym_ipc = { 467 .se_header = "IPC [IPC Coverage]", 468 .se_cmp = sort__sym_cmp, 469 .se_snprintf = hist_entry__sym_ipc_snprintf, 470 .se_width_idx = HISTC_SYMBOL_IPC, 471 }; 472 473 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he 474 __maybe_unused, 475 char *bf, size_t size, 476 unsigned int width) 477 { 478 char tmp[64]; 479 480 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-"); 481 return repsep_snprintf(bf, size, "%-*s", width, tmp); 482 } 483 484 struct sort_entry sort_sym_ipc_null = { 485 .se_header = "IPC [IPC Coverage]", 486 .se_cmp = sort__sym_cmp, 487 .se_snprintf = hist_entry__sym_ipc_null_snprintf, 488 .se_width_idx = HISTC_SYMBOL_IPC, 489 }; 490 491 /* --sort srcfile */ 492 493 static char no_srcfile[1]; 494 495 static char *hist_entry__get_srcfile(struct hist_entry *e) 496 { 497 char *sf, *p; 498 struct map *map = e->ms.map; 499 500 if (!map) 501 return no_srcfile; 502 503 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip), 504 e->ms.sym, false, true, true, e->ip); 505 if (!strcmp(sf, SRCLINE_UNKNOWN)) 506 return no_srcfile; 507 p = strchr(sf, ':'); 508 if (p && *sf) { 509 *p = 0; 510 return sf; 511 } 512 free(sf); 513 return no_srcfile; 514 } 515 516 static int64_t 517 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 518 { 519 if (!left->srcfile) 520 left->srcfile = hist_entry__get_srcfile(left); 521 if (!right->srcfile) 522 right->srcfile = hist_entry__get_srcfile(right); 523 524 return strcmp(right->srcfile, left->srcfile); 525 } 526 527 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 528 size_t size, unsigned int width) 529 { 530 if (!he->srcfile) 531 he->srcfile = hist_entry__get_srcfile(he); 532 533 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 534 } 535 536 struct sort_entry sort_srcfile = { 537 .se_header = "Source File", 538 .se_cmp = sort__srcfile_cmp, 539 .se_snprintf = hist_entry__srcfile_snprintf, 540 .se_width_idx = HISTC_SRCFILE, 541 }; 542 543 /* --sort parent */ 544 545 static int64_t 546 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 547 { 548 struct symbol *sym_l = left->parent; 549 struct symbol *sym_r = right->parent; 550 551 if (!sym_l || !sym_r) 552 return cmp_null(sym_l, sym_r); 553 554 return strcmp(sym_r->name, sym_l->name); 555 } 556 557 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 558 size_t size, unsigned int width) 559 { 560 return repsep_snprintf(bf, size, "%-*.*s", width, width, 561 he->parent ? he->parent->name : "[other]"); 562 } 563 564 struct sort_entry sort_parent = { 565 .se_header = "Parent symbol", 566 .se_cmp = sort__parent_cmp, 567 .se_snprintf = hist_entry__parent_snprintf, 568 .se_width_idx = HISTC_PARENT, 569 }; 570 571 /* --sort cpu */ 572 573 static int64_t 574 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 575 { 576 return right->cpu - left->cpu; 577 } 578 579 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 580 size_t size, unsigned int width) 581 { 582 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 583 } 584 585 struct sort_entry sort_cpu = { 586 .se_header = "CPU", 587 .se_cmp = sort__cpu_cmp, 588 .se_snprintf = hist_entry__cpu_snprintf, 589 .se_width_idx = HISTC_CPU, 590 }; 591 592 /* --sort cgroup_id */ 593 594 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev) 595 { 596 return (int64_t)(right_dev - left_dev); 597 } 598 599 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino) 600 { 601 return (int64_t)(right_ino - left_ino); 602 } 603 604 static int64_t 605 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right) 606 { 607 int64_t ret; 608 609 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev); 610 if (ret != 0) 611 return ret; 612 613 return _sort__cgroup_inode_cmp(right->cgroup_id.ino, 614 left->cgroup_id.ino); 615 } 616 617 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he, 618 char *bf, size_t size, 619 unsigned int width __maybe_unused) 620 { 621 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev, 622 he->cgroup_id.ino); 623 } 624 625 struct sort_entry sort_cgroup_id = { 626 .se_header = "cgroup id (dev/inode)", 627 .se_cmp = sort__cgroup_id_cmp, 628 .se_snprintf = hist_entry__cgroup_id_snprintf, 629 .se_width_idx = HISTC_CGROUP_ID, 630 }; 631 632 /* --sort socket */ 633 634 static int64_t 635 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 636 { 637 return right->socket - left->socket; 638 } 639 640 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 641 size_t size, unsigned int width) 642 { 643 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 644 } 645 646 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 647 { 648 int sk = *(const int *)arg; 649 650 if (type != HIST_FILTER__SOCKET) 651 return -1; 652 653 return sk >= 0 && he->socket != sk; 654 } 655 656 struct sort_entry sort_socket = { 657 .se_header = "Socket", 658 .se_cmp = sort__socket_cmp, 659 .se_snprintf = hist_entry__socket_snprintf, 660 .se_filter = hist_entry__socket_filter, 661 .se_width_idx = HISTC_SOCKET, 662 }; 663 664 /* --sort time */ 665 666 static int64_t 667 sort__time_cmp(struct hist_entry *left, struct hist_entry *right) 668 { 669 return right->time - left->time; 670 } 671 672 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf, 673 size_t size, unsigned int width) 674 { 675 char he_time[32]; 676 677 if (symbol_conf.nanosecs) 678 timestamp__scnprintf_nsec(he->time, he_time, 679 sizeof(he_time)); 680 else 681 timestamp__scnprintf_usec(he->time, he_time, 682 sizeof(he_time)); 683 684 return repsep_snprintf(bf, size, "%-.*s", width, he_time); 685 } 686 687 struct sort_entry sort_time = { 688 .se_header = "Time", 689 .se_cmp = sort__time_cmp, 690 .se_snprintf = hist_entry__time_snprintf, 691 .se_width_idx = HISTC_TIME, 692 }; 693 694 /* --sort trace */ 695 696 static char *get_trace_output(struct hist_entry *he) 697 { 698 struct trace_seq seq; 699 struct evsel *evsel; 700 struct tep_record rec = { 701 .data = he->raw_data, 702 .size = he->raw_size, 703 }; 704 705 evsel = hists_to_evsel(he->hists); 706 707 trace_seq_init(&seq); 708 if (symbol_conf.raw_trace) { 709 tep_print_fields(&seq, he->raw_data, he->raw_size, 710 evsel->tp_format); 711 } else { 712 tep_event_info(&seq, evsel->tp_format, &rec); 713 } 714 /* 715 * Trim the buffer, it starts at 4KB and we're not going to 716 * add anything more to this buffer. 717 */ 718 return realloc(seq.buffer, seq.len + 1); 719 } 720 721 static int64_t 722 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 723 { 724 struct evsel *evsel; 725 726 evsel = hists_to_evsel(left->hists); 727 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 728 return 0; 729 730 if (left->trace_output == NULL) 731 left->trace_output = get_trace_output(left); 732 if (right->trace_output == NULL) 733 right->trace_output = get_trace_output(right); 734 735 return strcmp(right->trace_output, left->trace_output); 736 } 737 738 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 739 size_t size, unsigned int width) 740 { 741 struct evsel *evsel; 742 743 evsel = hists_to_evsel(he->hists); 744 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 745 return scnprintf(bf, size, "%-.*s", width, "N/A"); 746 747 if (he->trace_output == NULL) 748 he->trace_output = get_trace_output(he); 749 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 750 } 751 752 struct sort_entry sort_trace = { 753 .se_header = "Trace output", 754 .se_cmp = sort__trace_cmp, 755 .se_snprintf = hist_entry__trace_snprintf, 756 .se_width_idx = HISTC_TRACE, 757 }; 758 759 /* sort keys for branch stacks */ 760 761 static int64_t 762 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 763 { 764 if (!left->branch_info || !right->branch_info) 765 return cmp_null(left->branch_info, right->branch_info); 766 767 return _sort__dso_cmp(left->branch_info->from.map, 768 right->branch_info->from.map); 769 } 770 771 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 772 size_t size, unsigned int width) 773 { 774 if (he->branch_info) 775 return _hist_entry__dso_snprintf(he->branch_info->from.map, 776 bf, size, width); 777 else 778 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 779 } 780 781 static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 782 const void *arg) 783 { 784 const struct dso *dso = arg; 785 786 if (type != HIST_FILTER__DSO) 787 return -1; 788 789 return dso && (!he->branch_info || !he->branch_info->from.map || 790 he->branch_info->from.map->dso != dso); 791 } 792 793 static int64_t 794 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 795 { 796 if (!left->branch_info || !right->branch_info) 797 return cmp_null(left->branch_info, right->branch_info); 798 799 return _sort__dso_cmp(left->branch_info->to.map, 800 right->branch_info->to.map); 801 } 802 803 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 804 size_t size, unsigned int width) 805 { 806 if (he->branch_info) 807 return _hist_entry__dso_snprintf(he->branch_info->to.map, 808 bf, size, width); 809 else 810 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 811 } 812 813 static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 814 const void *arg) 815 { 816 const struct dso *dso = arg; 817 818 if (type != HIST_FILTER__DSO) 819 return -1; 820 821 return dso && (!he->branch_info || !he->branch_info->to.map || 822 he->branch_info->to.map->dso != dso); 823 } 824 825 static int64_t 826 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 827 { 828 struct addr_map_symbol *from_l = &left->branch_info->from; 829 struct addr_map_symbol *from_r = &right->branch_info->from; 830 831 if (!left->branch_info || !right->branch_info) 832 return cmp_null(left->branch_info, right->branch_info); 833 834 from_l = &left->branch_info->from; 835 from_r = &right->branch_info->from; 836 837 if (!from_l->sym && !from_r->sym) 838 return _sort__addr_cmp(from_l->addr, from_r->addr); 839 840 return _sort__sym_cmp(from_l->sym, from_r->sym); 841 } 842 843 static int64_t 844 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 845 { 846 struct addr_map_symbol *to_l, *to_r; 847 848 if (!left->branch_info || !right->branch_info) 849 return cmp_null(left->branch_info, right->branch_info); 850 851 to_l = &left->branch_info->to; 852 to_r = &right->branch_info->to; 853 854 if (!to_l->sym && !to_r->sym) 855 return _sort__addr_cmp(to_l->addr, to_r->addr); 856 857 return _sort__sym_cmp(to_l->sym, to_r->sym); 858 } 859 860 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 861 size_t size, unsigned int width) 862 { 863 if (he->branch_info) { 864 struct addr_map_symbol *from = &he->branch_info->from; 865 866 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, 867 he->level, bf, size, width); 868 } 869 870 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 871 } 872 873 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 874 size_t size, unsigned int width) 875 { 876 if (he->branch_info) { 877 struct addr_map_symbol *to = &he->branch_info->to; 878 879 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, 880 he->level, bf, size, width); 881 } 882 883 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 884 } 885 886 static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 887 const void *arg) 888 { 889 const char *sym = arg; 890 891 if (type != HIST_FILTER__SYMBOL) 892 return -1; 893 894 return sym && !(he->branch_info && he->branch_info->from.sym && 895 strstr(he->branch_info->from.sym->name, sym)); 896 } 897 898 static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 899 const void *arg) 900 { 901 const char *sym = arg; 902 903 if (type != HIST_FILTER__SYMBOL) 904 return -1; 905 906 return sym && !(he->branch_info && he->branch_info->to.sym && 907 strstr(he->branch_info->to.sym->name, sym)); 908 } 909 910 struct sort_entry sort_dso_from = { 911 .se_header = "Source Shared Object", 912 .se_cmp = sort__dso_from_cmp, 913 .se_snprintf = hist_entry__dso_from_snprintf, 914 .se_filter = hist_entry__dso_from_filter, 915 .se_width_idx = HISTC_DSO_FROM, 916 }; 917 918 struct sort_entry sort_dso_to = { 919 .se_header = "Target Shared Object", 920 .se_cmp = sort__dso_to_cmp, 921 .se_snprintf = hist_entry__dso_to_snprintf, 922 .se_filter = hist_entry__dso_to_filter, 923 .se_width_idx = HISTC_DSO_TO, 924 }; 925 926 struct sort_entry sort_sym_from = { 927 .se_header = "Source Symbol", 928 .se_cmp = sort__sym_from_cmp, 929 .se_snprintf = hist_entry__sym_from_snprintf, 930 .se_filter = hist_entry__sym_from_filter, 931 .se_width_idx = HISTC_SYMBOL_FROM, 932 }; 933 934 struct sort_entry sort_sym_to = { 935 .se_header = "Target Symbol", 936 .se_cmp = sort__sym_to_cmp, 937 .se_snprintf = hist_entry__sym_to_snprintf, 938 .se_filter = hist_entry__sym_to_filter, 939 .se_width_idx = HISTC_SYMBOL_TO, 940 }; 941 942 static int64_t 943 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 944 { 945 unsigned char mp, p; 946 947 if (!left->branch_info || !right->branch_info) 948 return cmp_null(left->branch_info, right->branch_info); 949 950 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 951 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 952 return mp || p; 953 } 954 955 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 956 size_t size, unsigned int width){ 957 static const char *out = "N/A"; 958 959 if (he->branch_info) { 960 if (he->branch_info->flags.predicted) 961 out = "N"; 962 else if (he->branch_info->flags.mispred) 963 out = "Y"; 964 } 965 966 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 967 } 968 969 static int64_t 970 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 971 { 972 if (!left->branch_info || !right->branch_info) 973 return cmp_null(left->branch_info, right->branch_info); 974 975 return left->branch_info->flags.cycles - 976 right->branch_info->flags.cycles; 977 } 978 979 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 980 size_t size, unsigned int width) 981 { 982 if (!he->branch_info) 983 return scnprintf(bf, size, "%-.*s", width, "N/A"); 984 if (he->branch_info->flags.cycles == 0) 985 return repsep_snprintf(bf, size, "%-*s", width, "-"); 986 return repsep_snprintf(bf, size, "%-*hd", width, 987 he->branch_info->flags.cycles); 988 } 989 990 struct sort_entry sort_cycles = { 991 .se_header = "Basic Block Cycles", 992 .se_cmp = sort__cycles_cmp, 993 .se_snprintf = hist_entry__cycles_snprintf, 994 .se_width_idx = HISTC_CYCLES, 995 }; 996 997 /* --sort daddr_sym */ 998 int64_t 999 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1000 { 1001 uint64_t l = 0, r = 0; 1002 1003 if (left->mem_info) 1004 l = left->mem_info->daddr.addr; 1005 if (right->mem_info) 1006 r = right->mem_info->daddr.addr; 1007 1008 return (int64_t)(r - l); 1009 } 1010 1011 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 1012 size_t size, unsigned int width) 1013 { 1014 uint64_t addr = 0; 1015 struct map *map = NULL; 1016 struct symbol *sym = NULL; 1017 1018 if (he->mem_info) { 1019 addr = he->mem_info->daddr.addr; 1020 map = he->mem_info->daddr.map; 1021 sym = he->mem_info->daddr.sym; 1022 } 1023 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 1024 width); 1025 } 1026 1027 int64_t 1028 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 1029 { 1030 uint64_t l = 0, r = 0; 1031 1032 if (left->mem_info) 1033 l = left->mem_info->iaddr.addr; 1034 if (right->mem_info) 1035 r = right->mem_info->iaddr.addr; 1036 1037 return (int64_t)(r - l); 1038 } 1039 1040 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 1041 size_t size, unsigned int width) 1042 { 1043 uint64_t addr = 0; 1044 struct map *map = NULL; 1045 struct symbol *sym = NULL; 1046 1047 if (he->mem_info) { 1048 addr = he->mem_info->iaddr.addr; 1049 map = he->mem_info->iaddr.map; 1050 sym = he->mem_info->iaddr.sym; 1051 } 1052 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 1053 width); 1054 } 1055 1056 static int64_t 1057 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1058 { 1059 struct map *map_l = NULL; 1060 struct map *map_r = NULL; 1061 1062 if (left->mem_info) 1063 map_l = left->mem_info->daddr.map; 1064 if (right->mem_info) 1065 map_r = right->mem_info->daddr.map; 1066 1067 return _sort__dso_cmp(map_l, map_r); 1068 } 1069 1070 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 1071 size_t size, unsigned int width) 1072 { 1073 struct map *map = NULL; 1074 1075 if (he->mem_info) 1076 map = he->mem_info->daddr.map; 1077 1078 return _hist_entry__dso_snprintf(map, bf, size, width); 1079 } 1080 1081 static int64_t 1082 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 1083 { 1084 union perf_mem_data_src data_src_l; 1085 union perf_mem_data_src data_src_r; 1086 1087 if (left->mem_info) 1088 data_src_l = left->mem_info->data_src; 1089 else 1090 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 1091 1092 if (right->mem_info) 1093 data_src_r = right->mem_info->data_src; 1094 else 1095 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 1096 1097 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 1098 } 1099 1100 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 1101 size_t size, unsigned int width) 1102 { 1103 char out[10]; 1104 1105 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 1106 return repsep_snprintf(bf, size, "%.*s", width, out); 1107 } 1108 1109 static int64_t 1110 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 1111 { 1112 union perf_mem_data_src data_src_l; 1113 union perf_mem_data_src data_src_r; 1114 1115 if (left->mem_info) 1116 data_src_l = left->mem_info->data_src; 1117 else 1118 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 1119 1120 if (right->mem_info) 1121 data_src_r = right->mem_info->data_src; 1122 else 1123 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 1124 1125 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 1126 } 1127 1128 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1129 size_t size, unsigned int width) 1130 { 1131 char out[64]; 1132 1133 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1134 return repsep_snprintf(bf, size, "%-*s", width, out); 1135 } 1136 1137 static int64_t 1138 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1139 { 1140 union perf_mem_data_src data_src_l; 1141 union perf_mem_data_src data_src_r; 1142 1143 if (left->mem_info) 1144 data_src_l = left->mem_info->data_src; 1145 else 1146 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1147 1148 if (right->mem_info) 1149 data_src_r = right->mem_info->data_src; 1150 else 1151 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1152 1153 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1154 } 1155 1156 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1157 size_t size, unsigned int width) 1158 { 1159 char out[64]; 1160 1161 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1162 return repsep_snprintf(bf, size, "%-*s", width, out); 1163 } 1164 1165 static int64_t 1166 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1167 { 1168 union perf_mem_data_src data_src_l; 1169 union perf_mem_data_src data_src_r; 1170 1171 if (left->mem_info) 1172 data_src_l = left->mem_info->data_src; 1173 else 1174 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1175 1176 if (right->mem_info) 1177 data_src_r = right->mem_info->data_src; 1178 else 1179 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1180 1181 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1182 } 1183 1184 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1185 size_t size, unsigned int width) 1186 { 1187 char out[64]; 1188 1189 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1190 return repsep_snprintf(bf, size, "%-*s", width, out); 1191 } 1192 1193 int64_t 1194 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1195 { 1196 u64 l, r; 1197 struct map *l_map, *r_map; 1198 1199 if (!left->mem_info) return -1; 1200 if (!right->mem_info) return 1; 1201 1202 /* group event types together */ 1203 if (left->cpumode > right->cpumode) return -1; 1204 if (left->cpumode < right->cpumode) return 1; 1205 1206 l_map = left->mem_info->daddr.map; 1207 r_map = right->mem_info->daddr.map; 1208 1209 /* if both are NULL, jump to sort on al_addr instead */ 1210 if (!l_map && !r_map) 1211 goto addr; 1212 1213 if (!l_map) return -1; 1214 if (!r_map) return 1; 1215 1216 if (l_map->maj > r_map->maj) return -1; 1217 if (l_map->maj < r_map->maj) return 1; 1218 1219 if (l_map->min > r_map->min) return -1; 1220 if (l_map->min < r_map->min) return 1; 1221 1222 if (l_map->ino > r_map->ino) return -1; 1223 if (l_map->ino < r_map->ino) return 1; 1224 1225 if (l_map->ino_generation > r_map->ino_generation) return -1; 1226 if (l_map->ino_generation < r_map->ino_generation) return 1; 1227 1228 /* 1229 * Addresses with no major/minor numbers are assumed to be 1230 * anonymous in userspace. Sort those on pid then address. 1231 * 1232 * The kernel and non-zero major/minor mapped areas are 1233 * assumed to be unity mapped. Sort those on address. 1234 */ 1235 1236 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1237 (!(l_map->flags & MAP_SHARED)) && 1238 !l_map->maj && !l_map->min && !l_map->ino && 1239 !l_map->ino_generation) { 1240 /* userspace anonymous */ 1241 1242 if (left->thread->pid_ > right->thread->pid_) return -1; 1243 if (left->thread->pid_ < right->thread->pid_) return 1; 1244 } 1245 1246 addr: 1247 /* al_addr does all the right addr - start + offset calculations */ 1248 l = cl_address(left->mem_info->daddr.al_addr); 1249 r = cl_address(right->mem_info->daddr.al_addr); 1250 1251 if (l > r) return -1; 1252 if (l < r) return 1; 1253 1254 return 0; 1255 } 1256 1257 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1258 size_t size, unsigned int width) 1259 { 1260 1261 uint64_t addr = 0; 1262 struct map *map = NULL; 1263 struct symbol *sym = NULL; 1264 char level = he->level; 1265 1266 if (he->mem_info) { 1267 addr = cl_address(he->mem_info->daddr.al_addr); 1268 map = he->mem_info->daddr.map; 1269 sym = he->mem_info->daddr.sym; 1270 1271 /* print [s] for shared data mmaps */ 1272 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1273 map && !(map->prot & PROT_EXEC) && 1274 (map->flags & MAP_SHARED) && 1275 (map->maj || map->min || map->ino || 1276 map->ino_generation)) 1277 level = 's'; 1278 else if (!map) 1279 level = 'X'; 1280 } 1281 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size, 1282 width); 1283 } 1284 1285 struct sort_entry sort_mispredict = { 1286 .se_header = "Branch Mispredicted", 1287 .se_cmp = sort__mispredict_cmp, 1288 .se_snprintf = hist_entry__mispredict_snprintf, 1289 .se_width_idx = HISTC_MISPREDICT, 1290 }; 1291 1292 static u64 he_weight(struct hist_entry *he) 1293 { 1294 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; 1295 } 1296 1297 static int64_t 1298 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1299 { 1300 return he_weight(left) - he_weight(right); 1301 } 1302 1303 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1304 size_t size, unsigned int width) 1305 { 1306 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he)); 1307 } 1308 1309 struct sort_entry sort_local_weight = { 1310 .se_header = "Local Weight", 1311 .se_cmp = sort__local_weight_cmp, 1312 .se_snprintf = hist_entry__local_weight_snprintf, 1313 .se_width_idx = HISTC_LOCAL_WEIGHT, 1314 }; 1315 1316 static int64_t 1317 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1318 { 1319 return left->stat.weight - right->stat.weight; 1320 } 1321 1322 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1323 size_t size, unsigned int width) 1324 { 1325 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight); 1326 } 1327 1328 struct sort_entry sort_global_weight = { 1329 .se_header = "Weight", 1330 .se_cmp = sort__global_weight_cmp, 1331 .se_snprintf = hist_entry__global_weight_snprintf, 1332 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1333 }; 1334 1335 struct sort_entry sort_mem_daddr_sym = { 1336 .se_header = "Data Symbol", 1337 .se_cmp = sort__daddr_cmp, 1338 .se_snprintf = hist_entry__daddr_snprintf, 1339 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1340 }; 1341 1342 struct sort_entry sort_mem_iaddr_sym = { 1343 .se_header = "Code Symbol", 1344 .se_cmp = sort__iaddr_cmp, 1345 .se_snprintf = hist_entry__iaddr_snprintf, 1346 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1347 }; 1348 1349 struct sort_entry sort_mem_daddr_dso = { 1350 .se_header = "Data Object", 1351 .se_cmp = sort__dso_daddr_cmp, 1352 .se_snprintf = hist_entry__dso_daddr_snprintf, 1353 .se_width_idx = HISTC_MEM_DADDR_DSO, 1354 }; 1355 1356 struct sort_entry sort_mem_locked = { 1357 .se_header = "Locked", 1358 .se_cmp = sort__locked_cmp, 1359 .se_snprintf = hist_entry__locked_snprintf, 1360 .se_width_idx = HISTC_MEM_LOCKED, 1361 }; 1362 1363 struct sort_entry sort_mem_tlb = { 1364 .se_header = "TLB access", 1365 .se_cmp = sort__tlb_cmp, 1366 .se_snprintf = hist_entry__tlb_snprintf, 1367 .se_width_idx = HISTC_MEM_TLB, 1368 }; 1369 1370 struct sort_entry sort_mem_lvl = { 1371 .se_header = "Memory access", 1372 .se_cmp = sort__lvl_cmp, 1373 .se_snprintf = hist_entry__lvl_snprintf, 1374 .se_width_idx = HISTC_MEM_LVL, 1375 }; 1376 1377 struct sort_entry sort_mem_snoop = { 1378 .se_header = "Snoop", 1379 .se_cmp = sort__snoop_cmp, 1380 .se_snprintf = hist_entry__snoop_snprintf, 1381 .se_width_idx = HISTC_MEM_SNOOP, 1382 }; 1383 1384 struct sort_entry sort_mem_dcacheline = { 1385 .se_header = "Data Cacheline", 1386 .se_cmp = sort__dcacheline_cmp, 1387 .se_snprintf = hist_entry__dcacheline_snprintf, 1388 .se_width_idx = HISTC_MEM_DCACHELINE, 1389 }; 1390 1391 static int64_t 1392 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1393 { 1394 uint64_t l = 0, r = 0; 1395 1396 if (left->mem_info) 1397 l = left->mem_info->daddr.phys_addr; 1398 if (right->mem_info) 1399 r = right->mem_info->daddr.phys_addr; 1400 1401 return (int64_t)(r - l); 1402 } 1403 1404 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf, 1405 size_t size, unsigned int width) 1406 { 1407 uint64_t addr = 0; 1408 size_t ret = 0; 1409 size_t len = BITS_PER_LONG / 4; 1410 1411 addr = he->mem_info->daddr.phys_addr; 1412 1413 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level); 1414 1415 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr); 1416 1417 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, ""); 1418 1419 if (ret > width) 1420 bf[width] = '\0'; 1421 1422 return width; 1423 } 1424 1425 struct sort_entry sort_mem_phys_daddr = { 1426 .se_header = "Data Physical Address", 1427 .se_cmp = sort__phys_daddr_cmp, 1428 .se_snprintf = hist_entry__phys_daddr_snprintf, 1429 .se_width_idx = HISTC_MEM_PHYS_DADDR, 1430 }; 1431 1432 static int64_t 1433 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1434 { 1435 if (!left->branch_info || !right->branch_info) 1436 return cmp_null(left->branch_info, right->branch_info); 1437 1438 return left->branch_info->flags.abort != 1439 right->branch_info->flags.abort; 1440 } 1441 1442 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1443 size_t size, unsigned int width) 1444 { 1445 static const char *out = "N/A"; 1446 1447 if (he->branch_info) { 1448 if (he->branch_info->flags.abort) 1449 out = "A"; 1450 else 1451 out = "."; 1452 } 1453 1454 return repsep_snprintf(bf, size, "%-*s", width, out); 1455 } 1456 1457 struct sort_entry sort_abort = { 1458 .se_header = "Transaction abort", 1459 .se_cmp = sort__abort_cmp, 1460 .se_snprintf = hist_entry__abort_snprintf, 1461 .se_width_idx = HISTC_ABORT, 1462 }; 1463 1464 static int64_t 1465 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1466 { 1467 if (!left->branch_info || !right->branch_info) 1468 return cmp_null(left->branch_info, right->branch_info); 1469 1470 return left->branch_info->flags.in_tx != 1471 right->branch_info->flags.in_tx; 1472 } 1473 1474 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1475 size_t size, unsigned int width) 1476 { 1477 static const char *out = "N/A"; 1478 1479 if (he->branch_info) { 1480 if (he->branch_info->flags.in_tx) 1481 out = "T"; 1482 else 1483 out = "."; 1484 } 1485 1486 return repsep_snprintf(bf, size, "%-*s", width, out); 1487 } 1488 1489 struct sort_entry sort_in_tx = { 1490 .se_header = "Branch in transaction", 1491 .se_cmp = sort__in_tx_cmp, 1492 .se_snprintf = hist_entry__in_tx_snprintf, 1493 .se_width_idx = HISTC_IN_TX, 1494 }; 1495 1496 static int64_t 1497 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1498 { 1499 return left->transaction - right->transaction; 1500 } 1501 1502 static inline char *add_str(char *p, const char *str) 1503 { 1504 strcpy(p, str); 1505 return p + strlen(str); 1506 } 1507 1508 static struct txbit { 1509 unsigned flag; 1510 const char *name; 1511 int skip_for_len; 1512 } txbits[] = { 1513 { PERF_TXN_ELISION, "EL ", 0 }, 1514 { PERF_TXN_TRANSACTION, "TX ", 1 }, 1515 { PERF_TXN_SYNC, "SYNC ", 1 }, 1516 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 1517 { PERF_TXN_RETRY, "RETRY ", 0 }, 1518 { PERF_TXN_CONFLICT, "CON ", 0 }, 1519 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 1520 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 1521 { 0, NULL, 0 } 1522 }; 1523 1524 int hist_entry__transaction_len(void) 1525 { 1526 int i; 1527 int len = 0; 1528 1529 for (i = 0; txbits[i].name; i++) { 1530 if (!txbits[i].skip_for_len) 1531 len += strlen(txbits[i].name); 1532 } 1533 len += 4; /* :XX<space> */ 1534 return len; 1535 } 1536 1537 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 1538 size_t size, unsigned int width) 1539 { 1540 u64 t = he->transaction; 1541 char buf[128]; 1542 char *p = buf; 1543 int i; 1544 1545 buf[0] = 0; 1546 for (i = 0; txbits[i].name; i++) 1547 if (txbits[i].flag & t) 1548 p = add_str(p, txbits[i].name); 1549 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 1550 p = add_str(p, "NEITHER "); 1551 if (t & PERF_TXN_ABORT_MASK) { 1552 sprintf(p, ":%" PRIx64, 1553 (t & PERF_TXN_ABORT_MASK) >> 1554 PERF_TXN_ABORT_SHIFT); 1555 p += strlen(p); 1556 } 1557 1558 return repsep_snprintf(bf, size, "%-*s", width, buf); 1559 } 1560 1561 struct sort_entry sort_transaction = { 1562 .se_header = "Transaction ", 1563 .se_cmp = sort__transaction_cmp, 1564 .se_snprintf = hist_entry__transaction_snprintf, 1565 .se_width_idx = HISTC_TRANSACTION, 1566 }; 1567 1568 /* --sort symbol_size */ 1569 1570 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r) 1571 { 1572 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0; 1573 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0; 1574 1575 return size_l < size_r ? -1 : 1576 size_l == size_r ? 0 : 1; 1577 } 1578 1579 static int64_t 1580 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right) 1581 { 1582 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym); 1583 } 1584 1585 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf, 1586 size_t bf_size, unsigned int width) 1587 { 1588 if (sym) 1589 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym)); 1590 1591 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1592 } 1593 1594 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf, 1595 size_t size, unsigned int width) 1596 { 1597 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width); 1598 } 1599 1600 struct sort_entry sort_sym_size = { 1601 .se_header = "Symbol size", 1602 .se_cmp = sort__sym_size_cmp, 1603 .se_snprintf = hist_entry__sym_size_snprintf, 1604 .se_width_idx = HISTC_SYM_SIZE, 1605 }; 1606 1607 /* --sort dso_size */ 1608 1609 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r) 1610 { 1611 int64_t size_l = map_l != NULL ? map__size(map_l) : 0; 1612 int64_t size_r = map_r != NULL ? map__size(map_r) : 0; 1613 1614 return size_l < size_r ? -1 : 1615 size_l == size_r ? 0 : 1; 1616 } 1617 1618 static int64_t 1619 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right) 1620 { 1621 return _sort__dso_size_cmp(right->ms.map, left->ms.map); 1622 } 1623 1624 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf, 1625 size_t bf_size, unsigned int width) 1626 { 1627 if (map && map->dso) 1628 return repsep_snprintf(bf, bf_size, "%*d", width, 1629 map__size(map)); 1630 1631 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1632 } 1633 1634 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf, 1635 size_t size, unsigned int width) 1636 { 1637 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width); 1638 } 1639 1640 struct sort_entry sort_dso_size = { 1641 .se_header = "DSO size", 1642 .se_cmp = sort__dso_size_cmp, 1643 .se_snprintf = hist_entry__dso_size_snprintf, 1644 .se_width_idx = HISTC_DSO_SIZE, 1645 }; 1646 1647 1648 struct sort_dimension { 1649 const char *name; 1650 struct sort_entry *entry; 1651 int taken; 1652 }; 1653 1654 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 1655 1656 static struct sort_dimension common_sort_dimensions[] = { 1657 DIM(SORT_PID, "pid", sort_thread), 1658 DIM(SORT_COMM, "comm", sort_comm), 1659 DIM(SORT_DSO, "dso", sort_dso), 1660 DIM(SORT_SYM, "symbol", sort_sym), 1661 DIM(SORT_PARENT, "parent", sort_parent), 1662 DIM(SORT_CPU, "cpu", sort_cpu), 1663 DIM(SORT_SOCKET, "socket", sort_socket), 1664 DIM(SORT_SRCLINE, "srcline", sort_srcline), 1665 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 1666 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 1667 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 1668 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 1669 DIM(SORT_TRACE, "trace", sort_trace), 1670 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 1671 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size), 1672 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 1673 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null), 1674 DIM(SORT_TIME, "time", sort_time), 1675 }; 1676 1677 #undef DIM 1678 1679 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 1680 1681 static struct sort_dimension bstack_sort_dimensions[] = { 1682 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 1683 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 1684 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 1685 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 1686 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 1687 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 1688 DIM(SORT_ABORT, "abort", sort_abort), 1689 DIM(SORT_CYCLES, "cycles", sort_cycles), 1690 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 1691 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 1692 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc), 1693 }; 1694 1695 #undef DIM 1696 1697 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 1698 1699 static struct sort_dimension memory_sort_dimensions[] = { 1700 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 1701 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 1702 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 1703 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 1704 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 1705 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 1706 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 1707 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 1708 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr), 1709 }; 1710 1711 #undef DIM 1712 1713 struct hpp_dimension { 1714 const char *name; 1715 struct perf_hpp_fmt *fmt; 1716 int taken; 1717 }; 1718 1719 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 1720 1721 static struct hpp_dimension hpp_sort_dimensions[] = { 1722 DIM(PERF_HPP__OVERHEAD, "overhead"), 1723 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 1724 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 1725 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 1726 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 1727 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 1728 DIM(PERF_HPP__SAMPLES, "sample"), 1729 DIM(PERF_HPP__PERIOD, "period"), 1730 }; 1731 1732 #undef DIM 1733 1734 struct hpp_sort_entry { 1735 struct perf_hpp_fmt hpp; 1736 struct sort_entry *se; 1737 }; 1738 1739 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 1740 { 1741 struct hpp_sort_entry *hse; 1742 1743 if (!perf_hpp__is_sort_entry(fmt)) 1744 return; 1745 1746 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1747 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 1748 } 1749 1750 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1751 struct hists *hists, int line __maybe_unused, 1752 int *span __maybe_unused) 1753 { 1754 struct hpp_sort_entry *hse; 1755 size_t len = fmt->user_len; 1756 1757 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1758 1759 if (!len) 1760 len = hists__col_len(hists, hse->se->se_width_idx); 1761 1762 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 1763 } 1764 1765 static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 1766 struct perf_hpp *hpp __maybe_unused, 1767 struct hists *hists) 1768 { 1769 struct hpp_sort_entry *hse; 1770 size_t len = fmt->user_len; 1771 1772 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1773 1774 if (!len) 1775 len = hists__col_len(hists, hse->se->se_width_idx); 1776 1777 return len; 1778 } 1779 1780 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1781 struct hist_entry *he) 1782 { 1783 struct hpp_sort_entry *hse; 1784 size_t len = fmt->user_len; 1785 1786 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1787 1788 if (!len) 1789 len = hists__col_len(he->hists, hse->se->se_width_idx); 1790 1791 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 1792 } 1793 1794 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 1795 struct hist_entry *a, struct hist_entry *b) 1796 { 1797 struct hpp_sort_entry *hse; 1798 1799 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1800 return hse->se->se_cmp(a, b); 1801 } 1802 1803 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 1804 struct hist_entry *a, struct hist_entry *b) 1805 { 1806 struct hpp_sort_entry *hse; 1807 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 1808 1809 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1810 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 1811 return collapse_fn(a, b); 1812 } 1813 1814 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 1815 struct hist_entry *a, struct hist_entry *b) 1816 { 1817 struct hpp_sort_entry *hse; 1818 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 1819 1820 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1821 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 1822 return sort_fn(a, b); 1823 } 1824 1825 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 1826 { 1827 return format->header == __sort__hpp_header; 1828 } 1829 1830 #define MK_SORT_ENTRY_CHK(key) \ 1831 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 1832 { \ 1833 struct hpp_sort_entry *hse; \ 1834 \ 1835 if (!perf_hpp__is_sort_entry(fmt)) \ 1836 return false; \ 1837 \ 1838 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 1839 return hse->se == &sort_ ## key ; \ 1840 } 1841 1842 MK_SORT_ENTRY_CHK(trace) 1843 MK_SORT_ENTRY_CHK(srcline) 1844 MK_SORT_ENTRY_CHK(srcfile) 1845 MK_SORT_ENTRY_CHK(thread) 1846 MK_SORT_ENTRY_CHK(comm) 1847 MK_SORT_ENTRY_CHK(dso) 1848 MK_SORT_ENTRY_CHK(sym) 1849 1850 1851 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 1852 { 1853 struct hpp_sort_entry *hse_a; 1854 struct hpp_sort_entry *hse_b; 1855 1856 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 1857 return false; 1858 1859 hse_a = container_of(a, struct hpp_sort_entry, hpp); 1860 hse_b = container_of(b, struct hpp_sort_entry, hpp); 1861 1862 return hse_a->se == hse_b->se; 1863 } 1864 1865 static void hse_free(struct perf_hpp_fmt *fmt) 1866 { 1867 struct hpp_sort_entry *hse; 1868 1869 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1870 free(hse); 1871 } 1872 1873 static struct hpp_sort_entry * 1874 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 1875 { 1876 struct hpp_sort_entry *hse; 1877 1878 hse = malloc(sizeof(*hse)); 1879 if (hse == NULL) { 1880 pr_err("Memory allocation failed\n"); 1881 return NULL; 1882 } 1883 1884 hse->se = sd->entry; 1885 hse->hpp.name = sd->entry->se_header; 1886 hse->hpp.header = __sort__hpp_header; 1887 hse->hpp.width = __sort__hpp_width; 1888 hse->hpp.entry = __sort__hpp_entry; 1889 hse->hpp.color = NULL; 1890 1891 hse->hpp.cmp = __sort__hpp_cmp; 1892 hse->hpp.collapse = __sort__hpp_collapse; 1893 hse->hpp.sort = __sort__hpp_sort; 1894 hse->hpp.equal = __sort__hpp_equal; 1895 hse->hpp.free = hse_free; 1896 1897 INIT_LIST_HEAD(&hse->hpp.list); 1898 INIT_LIST_HEAD(&hse->hpp.sort_list); 1899 hse->hpp.elide = false; 1900 hse->hpp.len = 0; 1901 hse->hpp.user_len = 0; 1902 hse->hpp.level = level; 1903 1904 return hse; 1905 } 1906 1907 static void hpp_free(struct perf_hpp_fmt *fmt) 1908 { 1909 free(fmt); 1910 } 1911 1912 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 1913 int level) 1914 { 1915 struct perf_hpp_fmt *fmt; 1916 1917 fmt = memdup(hd->fmt, sizeof(*fmt)); 1918 if (fmt) { 1919 INIT_LIST_HEAD(&fmt->list); 1920 INIT_LIST_HEAD(&fmt->sort_list); 1921 fmt->free = hpp_free; 1922 fmt->level = level; 1923 } 1924 1925 return fmt; 1926 } 1927 1928 int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 1929 { 1930 struct perf_hpp_fmt *fmt; 1931 struct hpp_sort_entry *hse; 1932 int ret = -1; 1933 int r; 1934 1935 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1936 if (!perf_hpp__is_sort_entry(fmt)) 1937 continue; 1938 1939 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1940 if (hse->se->se_filter == NULL) 1941 continue; 1942 1943 /* 1944 * hist entry is filtered if any of sort key in the hpp list 1945 * is applied. But it should skip non-matched filter types. 1946 */ 1947 r = hse->se->se_filter(he, type, arg); 1948 if (r >= 0) { 1949 if (ret < 0) 1950 ret = 0; 1951 ret |= r; 1952 } 1953 } 1954 1955 return ret; 1956 } 1957 1958 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 1959 struct perf_hpp_list *list, 1960 int level) 1961 { 1962 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 1963 1964 if (hse == NULL) 1965 return -1; 1966 1967 perf_hpp_list__register_sort_field(list, &hse->hpp); 1968 return 0; 1969 } 1970 1971 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 1972 struct perf_hpp_list *list) 1973 { 1974 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 1975 1976 if (hse == NULL) 1977 return -1; 1978 1979 perf_hpp_list__column_register(list, &hse->hpp); 1980 return 0; 1981 } 1982 1983 struct hpp_dynamic_entry { 1984 struct perf_hpp_fmt hpp; 1985 struct evsel *evsel; 1986 struct tep_format_field *field; 1987 unsigned dynamic_len; 1988 bool raw_trace; 1989 }; 1990 1991 static int hde_width(struct hpp_dynamic_entry *hde) 1992 { 1993 if (!hde->hpp.len) { 1994 int len = hde->dynamic_len; 1995 int namelen = strlen(hde->field->name); 1996 int fieldlen = hde->field->size; 1997 1998 if (namelen > len) 1999 len = namelen; 2000 2001 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) { 2002 /* length for print hex numbers */ 2003 fieldlen = hde->field->size * 2 + 2; 2004 } 2005 if (fieldlen > len) 2006 len = fieldlen; 2007 2008 hde->hpp.len = len; 2009 } 2010 return hde->hpp.len; 2011 } 2012 2013 static void update_dynamic_len(struct hpp_dynamic_entry *hde, 2014 struct hist_entry *he) 2015 { 2016 char *str, *pos; 2017 struct tep_format_field *field = hde->field; 2018 size_t namelen; 2019 bool last = false; 2020 2021 if (hde->raw_trace) 2022 return; 2023 2024 /* parse pretty print result and update max length */ 2025 if (!he->trace_output) 2026 he->trace_output = get_trace_output(he); 2027 2028 namelen = strlen(field->name); 2029 str = he->trace_output; 2030 2031 while (str) { 2032 pos = strchr(str, ' '); 2033 if (pos == NULL) { 2034 last = true; 2035 pos = str + strlen(str); 2036 } 2037 2038 if (!strncmp(str, field->name, namelen)) { 2039 size_t len; 2040 2041 str += namelen + 1; 2042 len = pos - str; 2043 2044 if (len > hde->dynamic_len) 2045 hde->dynamic_len = len; 2046 break; 2047 } 2048 2049 if (last) 2050 str = NULL; 2051 else 2052 str = pos + 1; 2053 } 2054 } 2055 2056 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2057 struct hists *hists __maybe_unused, 2058 int line __maybe_unused, 2059 int *span __maybe_unused) 2060 { 2061 struct hpp_dynamic_entry *hde; 2062 size_t len = fmt->user_len; 2063 2064 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2065 2066 if (!len) 2067 len = hde_width(hde); 2068 2069 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 2070 } 2071 2072 static int __sort__hde_width(struct perf_hpp_fmt *fmt, 2073 struct perf_hpp *hpp __maybe_unused, 2074 struct hists *hists __maybe_unused) 2075 { 2076 struct hpp_dynamic_entry *hde; 2077 size_t len = fmt->user_len; 2078 2079 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2080 2081 if (!len) 2082 len = hde_width(hde); 2083 2084 return len; 2085 } 2086 2087 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 2088 { 2089 struct hpp_dynamic_entry *hde; 2090 2091 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2092 2093 return hists_to_evsel(hists) == hde->evsel; 2094 } 2095 2096 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2097 struct hist_entry *he) 2098 { 2099 struct hpp_dynamic_entry *hde; 2100 size_t len = fmt->user_len; 2101 char *str, *pos; 2102 struct tep_format_field *field; 2103 size_t namelen; 2104 bool last = false; 2105 int ret; 2106 2107 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2108 2109 if (!len) 2110 len = hde_width(hde); 2111 2112 if (hde->raw_trace) 2113 goto raw_field; 2114 2115 if (!he->trace_output) 2116 he->trace_output = get_trace_output(he); 2117 2118 field = hde->field; 2119 namelen = strlen(field->name); 2120 str = he->trace_output; 2121 2122 while (str) { 2123 pos = strchr(str, ' '); 2124 if (pos == NULL) { 2125 last = true; 2126 pos = str + strlen(str); 2127 } 2128 2129 if (!strncmp(str, field->name, namelen)) { 2130 str += namelen + 1; 2131 str = strndup(str, pos - str); 2132 2133 if (str == NULL) 2134 return scnprintf(hpp->buf, hpp->size, 2135 "%*.*s", len, len, "ERROR"); 2136 break; 2137 } 2138 2139 if (last) 2140 str = NULL; 2141 else 2142 str = pos + 1; 2143 } 2144 2145 if (str == NULL) { 2146 struct trace_seq seq; 2147 raw_field: 2148 trace_seq_init(&seq); 2149 tep_print_field(&seq, he->raw_data, hde->field); 2150 str = seq.buffer; 2151 } 2152 2153 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 2154 free(str); 2155 return ret; 2156 } 2157 2158 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 2159 struct hist_entry *a, struct hist_entry *b) 2160 { 2161 struct hpp_dynamic_entry *hde; 2162 struct tep_format_field *field; 2163 unsigned offset, size; 2164 2165 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2166 2167 if (b == NULL) { 2168 update_dynamic_len(hde, a); 2169 return 0; 2170 } 2171 2172 field = hde->field; 2173 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2174 unsigned long long dyn; 2175 2176 tep_read_number_field(field, a->raw_data, &dyn); 2177 offset = dyn & 0xffff; 2178 size = (dyn >> 16) & 0xffff; 2179 2180 /* record max width for output */ 2181 if (size > hde->dynamic_len) 2182 hde->dynamic_len = size; 2183 } else { 2184 offset = field->offset; 2185 size = field->size; 2186 } 2187 2188 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 2189 } 2190 2191 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 2192 { 2193 return fmt->cmp == __sort__hde_cmp; 2194 } 2195 2196 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2197 { 2198 struct hpp_dynamic_entry *hde_a; 2199 struct hpp_dynamic_entry *hde_b; 2200 2201 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 2202 return false; 2203 2204 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 2205 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 2206 2207 return hde_a->field == hde_b->field; 2208 } 2209 2210 static void hde_free(struct perf_hpp_fmt *fmt) 2211 { 2212 struct hpp_dynamic_entry *hde; 2213 2214 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2215 free(hde); 2216 } 2217 2218 static struct hpp_dynamic_entry * 2219 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field, 2220 int level) 2221 { 2222 struct hpp_dynamic_entry *hde; 2223 2224 hde = malloc(sizeof(*hde)); 2225 if (hde == NULL) { 2226 pr_debug("Memory allocation failed\n"); 2227 return NULL; 2228 } 2229 2230 hde->evsel = evsel; 2231 hde->field = field; 2232 hde->dynamic_len = 0; 2233 2234 hde->hpp.name = field->name; 2235 hde->hpp.header = __sort__hde_header; 2236 hde->hpp.width = __sort__hde_width; 2237 hde->hpp.entry = __sort__hde_entry; 2238 hde->hpp.color = NULL; 2239 2240 hde->hpp.cmp = __sort__hde_cmp; 2241 hde->hpp.collapse = __sort__hde_cmp; 2242 hde->hpp.sort = __sort__hde_cmp; 2243 hde->hpp.equal = __sort__hde_equal; 2244 hde->hpp.free = hde_free; 2245 2246 INIT_LIST_HEAD(&hde->hpp.list); 2247 INIT_LIST_HEAD(&hde->hpp.sort_list); 2248 hde->hpp.elide = false; 2249 hde->hpp.len = 0; 2250 hde->hpp.user_len = 0; 2251 hde->hpp.level = level; 2252 2253 return hde; 2254 } 2255 2256 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 2257 { 2258 struct perf_hpp_fmt *new_fmt = NULL; 2259 2260 if (perf_hpp__is_sort_entry(fmt)) { 2261 struct hpp_sort_entry *hse, *new_hse; 2262 2263 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2264 new_hse = memdup(hse, sizeof(*hse)); 2265 if (new_hse) 2266 new_fmt = &new_hse->hpp; 2267 } else if (perf_hpp__is_dynamic_entry(fmt)) { 2268 struct hpp_dynamic_entry *hde, *new_hde; 2269 2270 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2271 new_hde = memdup(hde, sizeof(*hde)); 2272 if (new_hde) 2273 new_fmt = &new_hde->hpp; 2274 } else { 2275 new_fmt = memdup(fmt, sizeof(*fmt)); 2276 } 2277 2278 INIT_LIST_HEAD(&new_fmt->list); 2279 INIT_LIST_HEAD(&new_fmt->sort_list); 2280 2281 return new_fmt; 2282 } 2283 2284 static int parse_field_name(char *str, char **event, char **field, char **opt) 2285 { 2286 char *event_name, *field_name, *opt_name; 2287 2288 event_name = str; 2289 field_name = strchr(str, '.'); 2290 2291 if (field_name) { 2292 *field_name++ = '\0'; 2293 } else { 2294 event_name = NULL; 2295 field_name = str; 2296 } 2297 2298 opt_name = strchr(field_name, '/'); 2299 if (opt_name) 2300 *opt_name++ = '\0'; 2301 2302 *event = event_name; 2303 *field = field_name; 2304 *opt = opt_name; 2305 2306 return 0; 2307 } 2308 2309 /* find match evsel using a given event name. The event name can be: 2310 * 1. '%' + event index (e.g. '%1' for first event) 2311 * 2. full event name (e.g. sched:sched_switch) 2312 * 3. partial event name (should not contain ':') 2313 */ 2314 static struct evsel *find_evsel(struct evlist *evlist, char *event_name) 2315 { 2316 struct evsel *evsel = NULL; 2317 struct evsel *pos; 2318 bool full_name; 2319 2320 /* case 1 */ 2321 if (event_name[0] == '%') { 2322 int nr = strtol(event_name+1, NULL, 0); 2323 2324 if (nr > evlist->core.nr_entries) 2325 return NULL; 2326 2327 evsel = perf_evlist__first(evlist); 2328 while (--nr > 0) 2329 evsel = perf_evsel__next(evsel); 2330 2331 return evsel; 2332 } 2333 2334 full_name = !!strchr(event_name, ':'); 2335 evlist__for_each_entry(evlist, pos) { 2336 /* case 2 */ 2337 if (full_name && !strcmp(pos->name, event_name)) 2338 return pos; 2339 /* case 3 */ 2340 if (!full_name && strstr(pos->name, event_name)) { 2341 if (evsel) { 2342 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 2343 event_name, evsel->name, pos->name); 2344 return NULL; 2345 } 2346 evsel = pos; 2347 } 2348 } 2349 2350 return evsel; 2351 } 2352 2353 static int __dynamic_dimension__add(struct evsel *evsel, 2354 struct tep_format_field *field, 2355 bool raw_trace, int level) 2356 { 2357 struct hpp_dynamic_entry *hde; 2358 2359 hde = __alloc_dynamic_entry(evsel, field, level); 2360 if (hde == NULL) 2361 return -ENOMEM; 2362 2363 hde->raw_trace = raw_trace; 2364 2365 perf_hpp__register_sort_field(&hde->hpp); 2366 return 0; 2367 } 2368 2369 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level) 2370 { 2371 int ret; 2372 struct tep_format_field *field; 2373 2374 field = evsel->tp_format->format.fields; 2375 while (field) { 2376 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2377 if (ret < 0) 2378 return ret; 2379 2380 field = field->next; 2381 } 2382 return 0; 2383 } 2384 2385 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace, 2386 int level) 2387 { 2388 int ret; 2389 struct evsel *evsel; 2390 2391 evlist__for_each_entry(evlist, evsel) { 2392 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 2393 continue; 2394 2395 ret = add_evsel_fields(evsel, raw_trace, level); 2396 if (ret < 0) 2397 return ret; 2398 } 2399 return 0; 2400 } 2401 2402 static int add_all_matching_fields(struct evlist *evlist, 2403 char *field_name, bool raw_trace, int level) 2404 { 2405 int ret = -ESRCH; 2406 struct evsel *evsel; 2407 struct tep_format_field *field; 2408 2409 evlist__for_each_entry(evlist, evsel) { 2410 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 2411 continue; 2412 2413 field = tep_find_any_field(evsel->tp_format, field_name); 2414 if (field == NULL) 2415 continue; 2416 2417 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2418 if (ret < 0) 2419 break; 2420 } 2421 return ret; 2422 } 2423 2424 static int add_dynamic_entry(struct evlist *evlist, const char *tok, 2425 int level) 2426 { 2427 char *str, *event_name, *field_name, *opt_name; 2428 struct evsel *evsel; 2429 struct tep_format_field *field; 2430 bool raw_trace = symbol_conf.raw_trace; 2431 int ret = 0; 2432 2433 if (evlist == NULL) 2434 return -ENOENT; 2435 2436 str = strdup(tok); 2437 if (str == NULL) 2438 return -ENOMEM; 2439 2440 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 2441 ret = -EINVAL; 2442 goto out; 2443 } 2444 2445 if (opt_name) { 2446 if (strcmp(opt_name, "raw")) { 2447 pr_debug("unsupported field option %s\n", opt_name); 2448 ret = -EINVAL; 2449 goto out; 2450 } 2451 raw_trace = true; 2452 } 2453 2454 if (!strcmp(field_name, "trace_fields")) { 2455 ret = add_all_dynamic_fields(evlist, raw_trace, level); 2456 goto out; 2457 } 2458 2459 if (event_name == NULL) { 2460 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 2461 goto out; 2462 } 2463 2464 evsel = find_evsel(evlist, event_name); 2465 if (evsel == NULL) { 2466 pr_debug("Cannot find event: %s\n", event_name); 2467 ret = -ENOENT; 2468 goto out; 2469 } 2470 2471 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2472 pr_debug("%s is not a tracepoint event\n", event_name); 2473 ret = -EINVAL; 2474 goto out; 2475 } 2476 2477 if (!strcmp(field_name, "*")) { 2478 ret = add_evsel_fields(evsel, raw_trace, level); 2479 } else { 2480 field = tep_find_any_field(evsel->tp_format, field_name); 2481 if (field == NULL) { 2482 pr_debug("Cannot find event field for %s.%s\n", 2483 event_name, field_name); 2484 return -ENOENT; 2485 } 2486 2487 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2488 } 2489 2490 out: 2491 free(str); 2492 return ret; 2493 } 2494 2495 static int __sort_dimension__add(struct sort_dimension *sd, 2496 struct perf_hpp_list *list, 2497 int level) 2498 { 2499 if (sd->taken) 2500 return 0; 2501 2502 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 2503 return -1; 2504 2505 if (sd->entry->se_collapse) 2506 list->need_collapse = 1; 2507 2508 sd->taken = 1; 2509 2510 return 0; 2511 } 2512 2513 static int __hpp_dimension__add(struct hpp_dimension *hd, 2514 struct perf_hpp_list *list, 2515 int level) 2516 { 2517 struct perf_hpp_fmt *fmt; 2518 2519 if (hd->taken) 2520 return 0; 2521 2522 fmt = __hpp_dimension__alloc_hpp(hd, level); 2523 if (!fmt) 2524 return -1; 2525 2526 hd->taken = 1; 2527 perf_hpp_list__register_sort_field(list, fmt); 2528 return 0; 2529 } 2530 2531 static int __sort_dimension__add_output(struct perf_hpp_list *list, 2532 struct sort_dimension *sd) 2533 { 2534 if (sd->taken) 2535 return 0; 2536 2537 if (__sort_dimension__add_hpp_output(sd, list) < 0) 2538 return -1; 2539 2540 sd->taken = 1; 2541 return 0; 2542 } 2543 2544 static int __hpp_dimension__add_output(struct perf_hpp_list *list, 2545 struct hpp_dimension *hd) 2546 { 2547 struct perf_hpp_fmt *fmt; 2548 2549 if (hd->taken) 2550 return 0; 2551 2552 fmt = __hpp_dimension__alloc_hpp(hd, 0); 2553 if (!fmt) 2554 return -1; 2555 2556 hd->taken = 1; 2557 perf_hpp_list__column_register(list, fmt); 2558 return 0; 2559 } 2560 2561 int hpp_dimension__add_output(unsigned col) 2562 { 2563 BUG_ON(col >= PERF_HPP__MAX_INDEX); 2564 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 2565 } 2566 2567 int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 2568 struct evlist *evlist, 2569 int level) 2570 { 2571 unsigned int i; 2572 2573 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2574 struct sort_dimension *sd = &common_sort_dimensions[i]; 2575 2576 if (strncasecmp(tok, sd->name, strlen(tok))) 2577 continue; 2578 2579 if (sd->entry == &sort_parent) { 2580 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 2581 if (ret) { 2582 char err[BUFSIZ]; 2583 2584 regerror(ret, &parent_regex, err, sizeof(err)); 2585 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 2586 return -EINVAL; 2587 } 2588 list->parent = 1; 2589 } else if (sd->entry == &sort_sym) { 2590 list->sym = 1; 2591 /* 2592 * perf diff displays the performance difference amongst 2593 * two or more perf.data files. Those files could come 2594 * from different binaries. So we should not compare 2595 * their ips, but the name of symbol. 2596 */ 2597 if (sort__mode == SORT_MODE__DIFF) 2598 sd->entry->se_collapse = sort__sym_sort; 2599 2600 } else if (sd->entry == &sort_dso) { 2601 list->dso = 1; 2602 } else if (sd->entry == &sort_socket) { 2603 list->socket = 1; 2604 } else if (sd->entry == &sort_thread) { 2605 list->thread = 1; 2606 } else if (sd->entry == &sort_comm) { 2607 list->comm = 1; 2608 } 2609 2610 return __sort_dimension__add(sd, list, level); 2611 } 2612 2613 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2614 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2615 2616 if (strncasecmp(tok, hd->name, strlen(tok))) 2617 continue; 2618 2619 return __hpp_dimension__add(hd, list, level); 2620 } 2621 2622 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2623 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2624 2625 if (strncasecmp(tok, sd->name, strlen(tok))) 2626 continue; 2627 2628 if (sort__mode != SORT_MODE__BRANCH) 2629 return -EINVAL; 2630 2631 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 2632 list->sym = 1; 2633 2634 __sort_dimension__add(sd, list, level); 2635 return 0; 2636 } 2637 2638 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2639 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2640 2641 if (strncasecmp(tok, sd->name, strlen(tok))) 2642 continue; 2643 2644 if (sort__mode != SORT_MODE__MEMORY) 2645 return -EINVAL; 2646 2647 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0) 2648 return -EINVAL; 2649 2650 if (sd->entry == &sort_mem_daddr_sym) 2651 list->sym = 1; 2652 2653 __sort_dimension__add(sd, list, level); 2654 return 0; 2655 } 2656 2657 if (!add_dynamic_entry(evlist, tok, level)) 2658 return 0; 2659 2660 return -ESRCH; 2661 } 2662 2663 static int setup_sort_list(struct perf_hpp_list *list, char *str, 2664 struct evlist *evlist) 2665 { 2666 char *tmp, *tok; 2667 int ret = 0; 2668 int level = 0; 2669 int next_level = 1; 2670 bool in_group = false; 2671 2672 do { 2673 tok = str; 2674 tmp = strpbrk(str, "{}, "); 2675 if (tmp) { 2676 if (in_group) 2677 next_level = level; 2678 else 2679 next_level = level + 1; 2680 2681 if (*tmp == '{') 2682 in_group = true; 2683 else if (*tmp == '}') 2684 in_group = false; 2685 2686 *tmp = '\0'; 2687 str = tmp + 1; 2688 } 2689 2690 if (*tok) { 2691 ret = sort_dimension__add(list, tok, evlist, level); 2692 if (ret == -EINVAL) { 2693 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok))) 2694 pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 2695 else 2696 pr_err("Invalid --sort key: `%s'", tok); 2697 break; 2698 } else if (ret == -ESRCH) { 2699 pr_err("Unknown --sort key: `%s'", tok); 2700 break; 2701 } 2702 } 2703 2704 level = next_level; 2705 } while (tmp); 2706 2707 return ret; 2708 } 2709 2710 static const char *get_default_sort_order(struct evlist *evlist) 2711 { 2712 const char *default_sort_orders[] = { 2713 default_sort_order, 2714 default_branch_sort_order, 2715 default_mem_sort_order, 2716 default_top_sort_order, 2717 default_diff_sort_order, 2718 default_tracepoint_sort_order, 2719 }; 2720 bool use_trace = true; 2721 struct evsel *evsel; 2722 2723 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 2724 2725 if (evlist == NULL || perf_evlist__empty(evlist)) 2726 goto out_no_evlist; 2727 2728 evlist__for_each_entry(evlist, evsel) { 2729 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2730 use_trace = false; 2731 break; 2732 } 2733 } 2734 2735 if (use_trace) { 2736 sort__mode = SORT_MODE__TRACEPOINT; 2737 if (symbol_conf.raw_trace) 2738 return "trace_fields"; 2739 } 2740 out_no_evlist: 2741 return default_sort_orders[sort__mode]; 2742 } 2743 2744 static int setup_sort_order(struct evlist *evlist) 2745 { 2746 char *new_sort_order; 2747 2748 /* 2749 * Append '+'-prefixed sort order to the default sort 2750 * order string. 2751 */ 2752 if (!sort_order || is_strict_order(sort_order)) 2753 return 0; 2754 2755 if (sort_order[1] == '\0') { 2756 pr_err("Invalid --sort key: `+'"); 2757 return -EINVAL; 2758 } 2759 2760 /* 2761 * We allocate new sort_order string, but we never free it, 2762 * because it's checked over the rest of the code. 2763 */ 2764 if (asprintf(&new_sort_order, "%s,%s", 2765 get_default_sort_order(evlist), sort_order + 1) < 0) { 2766 pr_err("Not enough memory to set up --sort"); 2767 return -ENOMEM; 2768 } 2769 2770 sort_order = new_sort_order; 2771 return 0; 2772 } 2773 2774 /* 2775 * Adds 'pre,' prefix into 'str' is 'pre' is 2776 * not already part of 'str'. 2777 */ 2778 static char *prefix_if_not_in(const char *pre, char *str) 2779 { 2780 char *n; 2781 2782 if (!str || strstr(str, pre)) 2783 return str; 2784 2785 if (asprintf(&n, "%s,%s", pre, str) < 0) 2786 return NULL; 2787 2788 free(str); 2789 return n; 2790 } 2791 2792 static char *setup_overhead(char *keys) 2793 { 2794 if (sort__mode == SORT_MODE__DIFF) 2795 return keys; 2796 2797 keys = prefix_if_not_in("overhead", keys); 2798 2799 if (symbol_conf.cumulate_callchain) 2800 keys = prefix_if_not_in("overhead_children", keys); 2801 2802 return keys; 2803 } 2804 2805 static int __setup_sorting(struct evlist *evlist) 2806 { 2807 char *str; 2808 const char *sort_keys; 2809 int ret = 0; 2810 2811 ret = setup_sort_order(evlist); 2812 if (ret) 2813 return ret; 2814 2815 sort_keys = sort_order; 2816 if (sort_keys == NULL) { 2817 if (is_strict_order(field_order)) { 2818 /* 2819 * If user specified field order but no sort order, 2820 * we'll honor it and not add default sort orders. 2821 */ 2822 return 0; 2823 } 2824 2825 sort_keys = get_default_sort_order(evlist); 2826 } 2827 2828 str = strdup(sort_keys); 2829 if (str == NULL) { 2830 pr_err("Not enough memory to setup sort keys"); 2831 return -ENOMEM; 2832 } 2833 2834 /* 2835 * Prepend overhead fields for backward compatibility. 2836 */ 2837 if (!is_strict_order(field_order)) { 2838 str = setup_overhead(str); 2839 if (str == NULL) { 2840 pr_err("Not enough memory to setup overhead keys"); 2841 return -ENOMEM; 2842 } 2843 } 2844 2845 ret = setup_sort_list(&perf_hpp_list, str, evlist); 2846 2847 free(str); 2848 return ret; 2849 } 2850 2851 void perf_hpp__set_elide(int idx, bool elide) 2852 { 2853 struct perf_hpp_fmt *fmt; 2854 struct hpp_sort_entry *hse; 2855 2856 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2857 if (!perf_hpp__is_sort_entry(fmt)) 2858 continue; 2859 2860 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2861 if (hse->se->se_width_idx == idx) { 2862 fmt->elide = elide; 2863 break; 2864 } 2865 } 2866 } 2867 2868 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 2869 { 2870 if (list && strlist__nr_entries(list) == 1) { 2871 if (fp != NULL) 2872 fprintf(fp, "# %s: %s\n", list_name, 2873 strlist__entry(list, 0)->s); 2874 return true; 2875 } 2876 return false; 2877 } 2878 2879 static bool get_elide(int idx, FILE *output) 2880 { 2881 switch (idx) { 2882 case HISTC_SYMBOL: 2883 return __get_elide(symbol_conf.sym_list, "symbol", output); 2884 case HISTC_DSO: 2885 return __get_elide(symbol_conf.dso_list, "dso", output); 2886 case HISTC_COMM: 2887 return __get_elide(symbol_conf.comm_list, "comm", output); 2888 default: 2889 break; 2890 } 2891 2892 if (sort__mode != SORT_MODE__BRANCH) 2893 return false; 2894 2895 switch (idx) { 2896 case HISTC_SYMBOL_FROM: 2897 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 2898 case HISTC_SYMBOL_TO: 2899 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 2900 case HISTC_DSO_FROM: 2901 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 2902 case HISTC_DSO_TO: 2903 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 2904 default: 2905 break; 2906 } 2907 2908 return false; 2909 } 2910 2911 void sort__setup_elide(FILE *output) 2912 { 2913 struct perf_hpp_fmt *fmt; 2914 struct hpp_sort_entry *hse; 2915 2916 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2917 if (!perf_hpp__is_sort_entry(fmt)) 2918 continue; 2919 2920 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2921 fmt->elide = get_elide(hse->se->se_width_idx, output); 2922 } 2923 2924 /* 2925 * It makes no sense to elide all of sort entries. 2926 * Just revert them to show up again. 2927 */ 2928 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2929 if (!perf_hpp__is_sort_entry(fmt)) 2930 continue; 2931 2932 if (!fmt->elide) 2933 return; 2934 } 2935 2936 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2937 if (!perf_hpp__is_sort_entry(fmt)) 2938 continue; 2939 2940 fmt->elide = false; 2941 } 2942 } 2943 2944 int output_field_add(struct perf_hpp_list *list, char *tok) 2945 { 2946 unsigned int i; 2947 2948 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2949 struct sort_dimension *sd = &common_sort_dimensions[i]; 2950 2951 if (strncasecmp(tok, sd->name, strlen(tok))) 2952 continue; 2953 2954 return __sort_dimension__add_output(list, sd); 2955 } 2956 2957 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2958 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2959 2960 if (strncasecmp(tok, hd->name, strlen(tok))) 2961 continue; 2962 2963 return __hpp_dimension__add_output(list, hd); 2964 } 2965 2966 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2967 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2968 2969 if (strncasecmp(tok, sd->name, strlen(tok))) 2970 continue; 2971 2972 return __sort_dimension__add_output(list, sd); 2973 } 2974 2975 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2976 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2977 2978 if (strncasecmp(tok, sd->name, strlen(tok))) 2979 continue; 2980 2981 return __sort_dimension__add_output(list, sd); 2982 } 2983 2984 return -ESRCH; 2985 } 2986 2987 static int setup_output_list(struct perf_hpp_list *list, char *str) 2988 { 2989 char *tmp, *tok; 2990 int ret = 0; 2991 2992 for (tok = strtok_r(str, ", ", &tmp); 2993 tok; tok = strtok_r(NULL, ", ", &tmp)) { 2994 ret = output_field_add(list, tok); 2995 if (ret == -EINVAL) { 2996 ui__error("Invalid --fields key: `%s'", tok); 2997 break; 2998 } else if (ret == -ESRCH) { 2999 ui__error("Unknown --fields key: `%s'", tok); 3000 break; 3001 } 3002 } 3003 3004 return ret; 3005 } 3006 3007 void reset_dimensions(void) 3008 { 3009 unsigned int i; 3010 3011 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 3012 common_sort_dimensions[i].taken = 0; 3013 3014 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 3015 hpp_sort_dimensions[i].taken = 0; 3016 3017 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 3018 bstack_sort_dimensions[i].taken = 0; 3019 3020 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 3021 memory_sort_dimensions[i].taken = 0; 3022 } 3023 3024 bool is_strict_order(const char *order) 3025 { 3026 return order && (*order != '+'); 3027 } 3028 3029 static int __setup_output_field(void) 3030 { 3031 char *str, *strp; 3032 int ret = -EINVAL; 3033 3034 if (field_order == NULL) 3035 return 0; 3036 3037 strp = str = strdup(field_order); 3038 if (str == NULL) { 3039 pr_err("Not enough memory to setup output fields"); 3040 return -ENOMEM; 3041 } 3042 3043 if (!is_strict_order(field_order)) 3044 strp++; 3045 3046 if (!strlen(strp)) { 3047 pr_err("Invalid --fields key: `+'"); 3048 goto out; 3049 } 3050 3051 ret = setup_output_list(&perf_hpp_list, strp); 3052 3053 out: 3054 free(str); 3055 return ret; 3056 } 3057 3058 int setup_sorting(struct evlist *evlist) 3059 { 3060 int err; 3061 3062 err = __setup_sorting(evlist); 3063 if (err < 0) 3064 return err; 3065 3066 if (parent_pattern != default_parent_pattern) { 3067 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 3068 if (err < 0) 3069 return err; 3070 } 3071 3072 reset_dimensions(); 3073 3074 /* 3075 * perf diff doesn't use default hpp output fields. 3076 */ 3077 if (sort__mode != SORT_MODE__DIFF) 3078 perf_hpp__init(); 3079 3080 err = __setup_output_field(); 3081 if (err < 0) 3082 return err; 3083 3084 /* copy sort keys to output fields */ 3085 perf_hpp__setup_output_field(&perf_hpp_list); 3086 /* and then copy output fields to sort keys */ 3087 perf_hpp__append_sort_keys(&perf_hpp_list); 3088 3089 /* setup hists-specific output fields */ 3090 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 3091 return -1; 3092 3093 return 0; 3094 } 3095 3096 void reset_output_field(void) 3097 { 3098 perf_hpp_list.need_collapse = 0; 3099 perf_hpp_list.parent = 0; 3100 perf_hpp_list.sym = 0; 3101 perf_hpp_list.dso = 0; 3102 3103 field_order = NULL; 3104 sort_order = NULL; 3105 3106 reset_dimensions(); 3107 perf_hpp__reset_output_field(&perf_hpp_list); 3108 } 3109 3110 #define INDENT (3*8 + 1) 3111 3112 static void add_key(struct strbuf *sb, const char *str, int *llen) 3113 { 3114 if (*llen >= 75) { 3115 strbuf_addstr(sb, "\n\t\t\t "); 3116 *llen = INDENT; 3117 } 3118 strbuf_addf(sb, " %s", str); 3119 *llen += strlen(str) + 1; 3120 } 3121 3122 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n, 3123 int *llen) 3124 { 3125 int i; 3126 3127 for (i = 0; i < n; i++) 3128 add_key(sb, s[i].name, llen); 3129 } 3130 3131 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n, 3132 int *llen) 3133 { 3134 int i; 3135 3136 for (i = 0; i < n; i++) 3137 add_key(sb, s[i].name, llen); 3138 } 3139 3140 const char *sort_help(const char *prefix) 3141 { 3142 struct strbuf sb; 3143 char *s; 3144 int len = strlen(prefix) + INDENT; 3145 3146 strbuf_init(&sb, 300); 3147 strbuf_addstr(&sb, prefix); 3148 add_hpp_sort_string(&sb, hpp_sort_dimensions, 3149 ARRAY_SIZE(hpp_sort_dimensions), &len); 3150 add_sort_string(&sb, common_sort_dimensions, 3151 ARRAY_SIZE(common_sort_dimensions), &len); 3152 add_sort_string(&sb, bstack_sort_dimensions, 3153 ARRAY_SIZE(bstack_sort_dimensions), &len); 3154 add_sort_string(&sb, memory_sort_dimensions, 3155 ARRAY_SIZE(memory_sort_dimensions), &len); 3156 s = strbuf_detach(&sb, NULL); 3157 strbuf_release(&sb); 3158 return s; 3159 } 3160